From 8b265ede15801387faf6b8fa9bd2be4047eef9a0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 12:01:18 -0400 Subject: [PATCH] fix(deps): update module github.com/prometheus/common to v0.64.0 (main) (#16750) Signed-off-by: Paul Rogers <129207811+paul1r@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Paul Rogers Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com> --- clients/pkg/promtail/promtail_test.go | 2 +- clients/pkg/promtail/wal/wal.go | 3 +- go.mod | 54 +- go.sum | 181 +- pkg/chunkenc/symbols.go | 6 +- pkg/compactor/deletion/job_runner_test.go | 3 +- pkg/distributor/distributor.go | 18 +- pkg/distributor/distributor_test.go | 10 +- pkg/ingester/checkpoint.go | 3 +- pkg/ingester/wal.go | 3 +- pkg/loghttp/push/otlp.go | 3 +- .../queryrangebase/results_cache.go | 6 +- pkg/ruler/base/api_test.go | 84 +- pkg/ruler/base/manager.go | 14 +- pkg/ruler/base/mapper_test.go | 110 +- pkg/ruler/compat.go | 34 +- pkg/ruler/compat_test.go | 38 +- pkg/ruler/grouploader.go | 6 +- pkg/ruler/grouploader_test.go | 14 +- pkg/ruler/rulespb/compat.go | 26 +- .../bucketclient/bucket_client_test.go | 2 +- pkg/ruler/rulestore/local/local_test.go | 6 +- pkg/ruler/storage/instance/instance.go | 6 +- pkg/ruler/storage/wal/wal.go | 5 +- .../shipper/indexshipper/tsdb/head_wal.go | 3 +- pkg/tool/commands/rules.go | 10 +- pkg/tool/commands/rules_test.go | 21 +- pkg/tool/rules/compare.go | 8 +- pkg/tool/rules/compare_test.go | 129 +- pkg/tool/rules/parser_test.go | 8 +- pkg/tool/rules/rules.go | 42 +- pkg/tool/rules/rules_test.go | 78 +- tools/lambda-promtail/go.mod | 14 +- tools/lambda-promtail/go.sum | 28 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 14 + .../sdk/azcore/internal/exported/exported.go | 7 +- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/runtime/policy_bearer_token.go | 14 +- .../sdk/azidentity/CHANGELOG.md | 12 + .../azure-sdk-for-go/sdk/azidentity/README.md | 18 +- .../sdk/azidentity/TOKEN_CACHING.MD | 11 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/azidentity.go | 5 + .../sdk/azidentity/confidential_client.go | 2 +- .../sdk/azidentity/environment_credential.go | 7 +- .../azure-sdk-for-go/sdk/azidentity/go.work | 2 +- .../azidentity/managed-identity-matrix.json | 2 +- .../sdk/azidentity/managed_identity_client.go | 433 +---- .../azidentity/managed_identity_credential.go | 28 +- .../sdk/azidentity/public_client.go | 2 +- .../sdk/azidentity/test-resources-post.ps1 | 4 +- .../username_password_credential.go | 12 +- .../sdk/azidentity/version.go | 2 +- .../azure-sdk-for-go/sdk/internal/log/log.go | 2 +- .../sdk/internal/temporal/resource.go | 51 +- .../apps/confidential/confidential.go | 11 +- .../apps/errors/errors.go | 9 + .../apps/internal/base/base.go | 61 +- .../base/{internal => }/storage/items.go | 7 +- .../storage/partitioned_storage.go | 3 +- .../base/{internal => }/storage/storage.go | 8 +- .../apps/internal/exported/exported.go | 2 + .../apps/internal/local/server.go | 3 +- .../apps/internal/oauth/oauth.go | 20 +- .../oauth/ops/accesstokens/accesstokens.go | 55 +- .../internal/oauth/ops/accesstokens/tokens.go | 66 +- .../internal/oauth/ops/internal/comm/comm.go | 4 +- .../apps/internal/version/version.go | 2 +- .../apps/managedidentity/azure_ml.go | 28 + .../apps/managedidentity/cloud_shell.go | 37 + .../apps/managedidentity/managedidentity.go | 717 ++++++++ .../apps/managedidentity/servicefabric.go | 25 + .../apps/public/public.go | 13 +- .../github.com/digitalocean/godo/CHANGELOG.md | 67 + .../github.com/digitalocean/godo/apps.gen.go | 4 +- .../digitalocean/godo/apps_accessors.go | 8 + .../github.com/digitalocean/godo/databases.go | 76 + vendor/github.com/digitalocean/godo/godo.go | 6 +- .../digitalocean/godo/kubernetes.go | 134 +- .../digitalocean/godo/load_balancers.go | 82 + .../godo/partner_network_connect.go | 415 +++++ .../github.com/digitalocean/godo/snapshots.go | 8 + .../digitalocean/godo/spaces_keys.go | 186 ++ .../go-viper/mapstructure/v2/.editorconfig | 18 + .../go-viper/mapstructure/v2/.envrc | 4 + .../go-viper/mapstructure/v2/.gitignore | 6 + .../go-viper/mapstructure/v2/.golangci.yaml | 23 + .../go-viper/mapstructure/v2/CHANGELOG.md | 104 ++ .../go-viper/mapstructure/v2/LICENSE | 21 + .../go-viper/mapstructure/v2/README.md | 80 + .../go-viper/mapstructure/v2/decode_hooks.go | 630 +++++++ .../go-viper/mapstructure/v2/flake.lock | 472 +++++ .../go-viper/mapstructure/v2/flake.nix | 39 + .../mapstructure/v2/internal/errors/errors.go | 11 + .../mapstructure/v2/internal/errors/join.go | 9 + .../v2/internal/errors/join_go1_19.go | 61 + .../go-viper/mapstructure/v2/mapstructure.go | 1620 +++++++++++++++++ .../mapstructure/v2/reflect_go1_19.go | 44 + .../mapstructure/v2/reflect_go1_20.go | 10 + .../gophercloud/gophercloud/v2/CHANGELOG.md | 26 + .../gophercloud/v2/endpoint_search.go | 61 +- .../gophercloud/v2/openstack/client.go | 16 +- .../openstack/compute/v2/servers/requests.go | 3 + .../v2/openstack/endpoint_location.go | 6 +- .../extensions/layer3/floatingips/requests.go | 20 +- .../extensions/layer3/floatingips/results.go | 3 + .../openstack/networking/v2/ports/requests.go | 1 + .../gophercloud/v2/provider_client.go | 2 +- .../gophercloud/gophercloud/v2/results.go | 18 + .../gophercloud/v2/service_client.go | 10 +- .../hashicorp/go-version/CHANGELOG.md | 64 + .../github.com/hashicorp/go-version/LICENSE | 356 ++++ .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 298 +++ .../hashicorp/go-version/version.go | 441 +++++ .../go-version/version_collection.go | 20 + vendor/github.com/knadh/koanf/maps/LICENSE | 21 + vendor/github.com/knadh/koanf/maps/maps.go | 303 +++ .../knadh/koanf/providers/confmap/LICENSE | 21 + .../knadh/koanf/providers/confmap/confmap.go | 37 + vendor/github.com/knadh/koanf/v2/.gitignore | 4 + vendor/github.com/knadh/koanf/v2/LICENSE | 21 + vendor/github.com/knadh/koanf/v2/README.md | 714 ++++++++ vendor/github.com/knadh/koanf/v2/getters.go | 649 +++++++ vendor/github.com/knadh/koanf/v2/go.work | 31 + vendor/github.com/knadh/koanf/v2/go.work.sum | 154 ++ .../github.com/knadh/koanf/v2/interfaces.go | 20 + vendor/github.com/knadh/koanf/v2/koanf.go | 577 ++++++ vendor/github.com/knadh/koanf/v2/options.go | 33 + vendor/github.com/miekg/dns/README.md | 5 +- vendor/github.com/miekg/dns/edns.go | 35 +- vendor/github.com/miekg/dns/scan.go | 2 + vendor/github.com/miekg/dns/scan_rr.go | 10 + vendor/github.com/miekg/dns/svcb.go | 12 +- vendor/github.com/miekg/dns/types.go | 25 +- vendor/github.com/miekg/dns/udp.go | 4 +- .../dns/{udp_windows.go => udp_no_control.go} | 6 +- vendor/github.com/miekg/dns/update.go | 7 + vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/zduplicate.go | 17 + vendor/github.com/miekg/dns/zmsg.go | 19 + vendor/github.com/miekg/dns/ztypes.go | 15 + vendor/github.com/oklog/ulid/v2/.gitignore | 29 + vendor/github.com/oklog/ulid/v2/AUTHORS.md | 2 + vendor/github.com/oklog/ulid/v2/CHANGELOG.md | 33 + .../github.com/oklog/ulid/v2/CONTRIBUTING.md | 17 + vendor/github.com/oklog/ulid/v2/LICENSE | 201 ++ vendor/github.com/oklog/ulid/v2/README.md | 234 +++ vendor/github.com/oklog/ulid/v2/ulid.go | 696 +++++++ .../internal/exp/metrics/identity/metric.go | 23 +- .../internal/exp/metrics/identity/resource.go | 5 + .../internal/exp/metrics/identity/scope.go | 5 + .../internal/exp/metrics/identity/stream.go | 19 +- .../internal/exp/metrics/identity/strings.go | 24 - .../exp/metrics/staleness/priority_queue.go | 111 -- .../exp/metrics/staleness/staleness.go | 134 -- .../internal/exp/metrics/streams/streams.go | 81 - .../pkg/pdatautil/hash.go | 5 +- .../pkg/pdatautil/metadata.yaml | 1 + .../deltatocumulativeprocessor/config.go | 3 +- .../documentation.md | 48 +- .../deltatocumulativeprocessor/factory.go | 4 +- .../internal/data/add.go | 133 +- .../internal/data/data.go | 26 - .../internal/data/expo/merge.go | 9 + .../internal/data/expo/scale.go | 34 +- .../internal/data/expo/zero.go | 4 +- .../internal/delta/delta.go | 43 +- .../internal/maps/map.go | 116 ++ .../internal/metadata/generated_telemetry.go | 117 +- .../internal/telemetry/metrics.go | 19 +- .../deltatocumulativeprocessor/metadata.yaml | 44 +- .../deltatocumulativeprocessor/processor.go | 180 +- .../prometheus/common/config/headers.go | 6 +- .../prometheus/common/config/http_config.go | 16 +- .../prometheus/common/expfmt/text_parse.go | 4 +- .../prometheus/common/model/alert.go | 2 +- .../prometheus/common/model/labels.go | 5 +- .../prometheus/common/model/metric.go | 28 +- .../prometheus/common/promslog/slog.go | 223 ++- .../exporter-toolkit/web/landing_page.go | 15 +- .../exporter-toolkit/web/landing_page.html | 10 +- .../prometheus/otlptranslator/.golangci.yml | 188 +- .../metric_name_builder.go | 48 +- .../prometheus/otlptranslator/metric_namer.go | 331 ---- .../prometheus/otlptranslator/metric_type.go | 36 - .../otlptranslator/normalize_label.go | 14 +- .../prometheus/prometheus/config/config.go | 118 +- .../prometheus/prometheus/config/reload.go | 5 +- .../prometheus/discovery/aws/ec2.go | 4 +- .../prometheus/discovery/aws/lightsail.go | 2 +- .../prometheus/discovery/azure/azure.go | 8 +- .../prometheus/discovery/consul/metrics.go | 2 +- .../discovery/digitalocean/digitalocean.go | 2 +- .../prometheus/discovery/file/metrics.go | 2 +- .../prometheus/discovery/gce/gce.go | 2 +- .../discovery/kubernetes/endpointslice.go | 128 +- .../kubernetes/endpointslice_adaptor.go | 190 -- .../discovery/kubernetes/ingress.go | 55 +- .../discovery/kubernetes/ingress_adaptor.go | 90 - .../discovery/kubernetes/kubernetes.go | 8 +- .../discovery/kubernetes/metrics.go | 2 +- .../prometheus/discovery/manager.go | 6 +- .../prometheus/discovery/marathon/marathon.go | 2 +- .../prometheus/discovery/moby/docker.go | 24 +- .../prometheus/discovery/moby/dockerswarm.go | 2 +- .../discovery/openstack/hypervisor.go | 2 +- .../discovery/openstack/instance.go | 2 +- .../discovery/openstack/loadbalancer.go | 7 - .../discovery/openstack/openstack.go | 2 +- .../prometheus/discovery/registry.go | 3 +- .../prometheus/discovery/triton/triton.go | 2 +- .../discovery/zookeeper/zookeeper.go | 4 +- .../prometheus/model/labels/labels.go | 10 +- .../prometheus/model/labels/labels_common.go | 4 +- .../prometheus/model/labels/regexp.go | 4 +- .../prometheus/model/relabel/relabel.go | 5 - .../prometheus/model/rulefmt/rulefmt.go | 69 +- .../prometheus/model/textparse/interface.go | 17 +- .../prometheus/model/textparse/nhcbparse.go | 22 +- .../model/textparse/openmetricsparse.go | 26 +- .../prometheus/model/textparse/promparse.go | 21 +- .../model/textparse/protobufparse.go | 409 +++-- .../prometheus/prometheus/notifier/alert.go | 91 + .../prometheus/notifier/alertmanager.go | 90 + .../prometheus/notifier/alertmanagerset.go | 128 ++ .../notifier/{notifier.go => manager.go} | 386 +--- .../prometheus/prometheus/notifier/metric.go | 94 + .../prometheus/prometheus/notifier/util.go | 49 + .../prometheus/prometheus/prompb/buf.gen.yaml | 5 + .../prometheus/prometheus/prompb/buf.lock | 6 +- .../prometheus/prometheus/prompb/codec.go | 2 + .../prompb/io/prometheus/client/decoder.go | 780 ++++++++ .../prompb/io/prometheus/write/v2/codec.go | 3 + .../prompb/io/prometheus/write/v2/types.pb.go | 5 +- .../prometheus/prometheus/prompb/types.pb.go | 284 +-- .../prometheus/prometheus/prompb/types.proto | 4 + .../prometheus/prometheus/promql/durations.go | 136 ++ .../prometheus/prometheus/promql/engine.go | 137 +- .../prometheus/prometheus/promql/functions.go | 506 ++--- .../prometheus/prometheus/promql/info.go | 2 +- .../prometheus/promql/parser/ast.go | 45 +- .../promql/parser/generated_parser.y | 194 +- .../promql/parser/generated_parser.y.go | 1058 ++++++----- .../prometheus/promql/parser/lex.go | 73 +- .../prometheus/promql/parser/parse.go | 59 +- .../prometheus/promql/parser/prettier.go | 16 + .../prometheus/promql/parser/printer.go | 57 +- .../prometheus/prometheus/promql/quantile.go | 78 + .../prometheus/prometheus/rules/group.go | 55 +- .../prometheus/prometheus/rules/manager.go | 14 +- .../prometheus/scrape/clientprotobuf.go | 1 - .../prometheus/prometheus/scrape/manager.go | 2 +- .../prometheus/prometheus/scrape/scrape.go | 114 +- .../prometheus/storage/interface.go | 1 - .../prometheus/prometheus/storage/merge.go | 2 +- .../storage/remote/azuread/azuread.go | 8 +- .../prometheus/storage/remote/client.go | 29 +- .../prometheus/storage/remote/intern.go | 6 +- .../prometheus/normalize_label.go | 48 - .../otlptranslator/prometheus/unit_to_ucum.go | 102 -- .../prometheusremotewrite/helper.go | 39 +- .../prometheusremotewrite/histograms.go | 197 +- .../prometheusremotewrite/metrics_to_prw.go | 40 +- .../number_data_points.go | 6 +- .../otlp_to_openmetrics_metadata.go | 15 + .../storage/remote/queue_manager.go | 101 +- .../prometheus/storage/remote/storage.go | 4 +- .../prometheus/storage/remote/write.go | 8 +- .../storage/remote/write_handler.go | 81 +- .../prometheus/prometheus/storage/series.go | 2 +- .../prometheus/template/template.go | 3 +- .../prometheus/prometheus/tsdb/block.go | 5 +- .../prometheus/prometheus/tsdb/blockwriter.go | 2 +- .../prometheus/tsdb/chunkenc/histogram.go | 42 +- .../prometheus/prometheus/tsdb/compact.go | 15 +- .../prometheus/prometheus/tsdb/db.go | 58 +- .../prometheus/tsdb/errors/errors.go | 2 +- .../prometheus/tsdb/fileutil/dir.go | 2 +- .../prometheus/tsdb/fileutil/fileutil.go | 2 +- .../prometheus/prometheus/tsdb/head.go | 155 +- .../prometheus/prometheus/tsdb/head_append.go | 54 +- .../prometheus/prometheus/tsdb/head_other.go | 2 +- .../prometheus/prometheus/tsdb/head_read.go | 14 +- .../prometheus/prometheus/tsdb/head_wal.go | 211 ++- .../prometheus/prometheus/tsdb/index/index.go | 2 +- .../prometheus/prometheus/tsdb/isolation.go | 7 +- .../prometheus/prometheus/tsdb/ooo_head.go | 2 +- .../prometheus/tsdb/ooo_head_read.go | 10 +- .../prometheus/prometheus/tsdb/querier.go | 6 +- .../prometheus/tsdb/record/record.go | 2 +- .../prometheus/prometheus/tsdb/testutil.go | 4 +- .../tsdb/tsdbutil/dir_locker_testutil.go | 8 +- .../prometheus/tsdb/wlog/checkpoint.go | 6 +- .../prometheus/tsdb/wlog/live_reader.go | 64 +- .../prometheus/prometheus/tsdb/wlog/reader.go | 57 +- .../prometheus/tsdb/wlog/watcher.go | 15 +- .../prometheus/prometheus/tsdb/wlog/wlog.go | 251 ++- .../util/annotations/annotations.go | 12 + .../prometheus/util/compression/buffers.go | 142 ++ .../util/compression/compression.go | 122 ++ .../prometheus/util/httputil/compression.go | 12 +- .../prometheus/util/logging/file.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 41 +- .../github.com/puzpuzpuz/xsync/v3/.gitignore | 15 + .../puzpuzpuz/xsync/v3/BENCHMARKS.md | 133 ++ vendor/github.com/puzpuzpuz/xsync/v3/LICENSE | 201 ++ .../github.com/puzpuzpuz/xsync/v3/README.md | 195 ++ .../github.com/puzpuzpuz/xsync/v3/counter.go | 99 + vendor/github.com/puzpuzpuz/xsync/v3/map.go | 917 ++++++++++ vendor/github.com/puzpuzpuz/xsync/v3/mapof.go | 738 ++++++++ .../puzpuzpuz/xsync/v3/mpmcqueue.go | 125 ++ .../puzpuzpuz/xsync/v3/mpmcqueueof.go | 138 ++ .../github.com/puzpuzpuz/xsync/v3/rbmutex.go | 188 ++ .../puzpuzpuz/xsync/v3/spscqueue.go | 92 + .../puzpuzpuz/xsync/v3/spscqueueof.go | 96 + vendor/github.com/puzpuzpuz/xsync/v3/util.go | 66 + .../puzpuzpuz/xsync/v3/util_hash.go | 77 + .../collector/component/component.go | 31 +- .../collector/component/config.go | 84 +- .../collector/component/identifiable.go | 20 +- .../collector/component/telemetry.go | 27 +- .../config/configtelemetry/configtelemetry.go | 73 - .../collector/config/configtelemetry/doc.go | 47 - .../configtelemetry => confmap}/LICENSE | 0 .../collector/confmap/Makefile | 1 + .../collector/confmap/README.md | 284 +++ .../collector/confmap/confmap.go | 583 ++++++ .../collector/confmap/converter.go | 38 + .../collector/confmap/expand.go | 237 +++ .../confmap/internal/mapstructure/encoder.go | 261 +++ .../collector/confmap/merge.go | 71 + .../collector/confmap/metadata.yaml | 11 + .../collector/confmap/provider.go | 261 +++ .../collector/confmap/resolver.go | 285 +++ .../collector/confmap/xconfmap/LICENSE | 202 ++ .../xconfmap}/Makefile | 0 .../collector/confmap/xconfmap/config.go | 199 ++ .../collector/consumer/logs.go | 3 +- .../collector/consumer/metrics.go | 3 +- .../collector/consumer/traces.go | 3 +- .../collector/featuregate/LICENSE | 202 ++ .../collector/featuregate/Makefile | 1 + .../collector/featuregate/README.md | 77 + .../collector/featuregate/flag.go | 71 + .../collector/featuregate/gate.go | 58 + .../collector/featuregate/registry.go | 211 +++ .../collector/featuregate/stage.go | 44 + .../collector/internal/telemetry/LICENSE | 202 ++ .../collector/internal/telemetry/Makefile | 1 + .../telemetry/componentattribute/attribute.go | 25 + .../componentattribute/logger_provider.go | 37 + .../componentattribute/logger_zap.go | 146 ++ .../componentattribute/meter_provider.go | 37 + .../componentattribute/tracer_provider.go | 60 + .../collector/internal/telemetry/telemetry.go | 64 + .../collector/pipeline/pipeline.go | 20 +- .../collector/processor/internal/err.go | 14 + .../processor/internal/obsmetrics.go | 13 + .../collector/processor/processor.go | 85 +- .../contrib/bridges/otelzap/LICENSE | 201 ++ .../contrib/bridges/otelzap/README.md | 3 + .../contrib/bridges/otelzap/convert.go | 123 ++ .../contrib/bridges/otelzap/core.go | 262 +++ .../contrib/bridges/otelzap/encoder.go | 274 +++ .../contrib/bridges/otelzap/gen.go | 8 + vendor/go.opentelemetry.io/otel/log/DESIGN.md | 634 +++++++ vendor/go.opentelemetry.io/otel/log/LICENSE | 201 ++ vendor/go.opentelemetry.io/otel/log/README.md | 3 + vendor/go.opentelemetry.io/otel/log/doc.go | 76 + .../otel/log/embedded/README.md | 3 + .../otel/log/embedded/embedded.go | 36 + .../otel/log/global/README.md | 3 + .../otel/log/global/log.go | 49 + .../otel/log/internal/global/log.go | 107 ++ .../otel/log/internal/global/state.go | 53 + .../go.opentelemetry.io/otel/log/keyvalue.go | 443 +++++ .../otel/log/kind_string.go | 30 + vendor/go.opentelemetry.io/otel/log/logger.go | 140 ++ .../go.opentelemetry.io/otel/log/provider.go | 37 + vendor/go.opentelemetry.io/otel/log/record.go | 144 ++ .../go.opentelemetry.io/otel/log/severity.go | 64 + .../otel/log/severity_string.go | 47 + vendor/modules.txt | 127 +- 384 files changed, 26713 insertions(+), 5583 deletions(-) rename vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/{internal => }/storage/items.go (95%) rename vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/{internal => }/storage/partitioned_storage.go (99%) rename vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/{internal => }/storage/storage.go (98%) create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go create mode 100644 vendor/github.com/digitalocean/godo/partner_network_connect.go create mode 100644 vendor/github.com/digitalocean/godo/spaces_keys.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.editorconfig create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.envrc create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.gitignore create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml create mode 100644 vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/LICENSE create mode 100644 vendor/github.com/go-viper/mapstructure/v2/README.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.lock create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.nix create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/mapstructure.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/knadh/koanf/maps/LICENSE create mode 100644 vendor/github.com/knadh/koanf/maps/maps.go create mode 100644 vendor/github.com/knadh/koanf/providers/confmap/LICENSE create mode 100644 vendor/github.com/knadh/koanf/providers/confmap/confmap.go create mode 100644 vendor/github.com/knadh/koanf/v2/.gitignore create mode 100644 vendor/github.com/knadh/koanf/v2/LICENSE create mode 100644 vendor/github.com/knadh/koanf/v2/README.md create mode 100644 vendor/github.com/knadh/koanf/v2/getters.go create mode 100644 vendor/github.com/knadh/koanf/v2/go.work create mode 100644 vendor/github.com/knadh/koanf/v2/go.work.sum create mode 100644 vendor/github.com/knadh/koanf/v2/interfaces.go create mode 100644 vendor/github.com/knadh/koanf/v2/koanf.go create mode 100644 vendor/github.com/knadh/koanf/v2/options.go rename vendor/github.com/miekg/dns/{udp_windows.go => udp_no_control.go} (85%) create mode 100644 vendor/github.com/oklog/ulid/v2/.gitignore create mode 100644 vendor/github.com/oklog/ulid/v2/AUTHORS.md create mode 100644 vendor/github.com/oklog/ulid/v2/CHANGELOG.md create mode 100644 vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/oklog/ulid/v2/LICENSE create mode 100644 vendor/github.com/oklog/ulid/v2/README.md create mode 100644 vendor/github.com/oklog/ulid/v2/ulid.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity/strings.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/priority_queue.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness/staleness.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go rename vendor/github.com/prometheus/{prometheus/storage/remote/otlptranslator/prometheus => otlptranslator}/metric_name_builder.go (89%) delete mode 100644 vendor/github.com/prometheus/otlptranslator/metric_namer.go delete mode 100644 vendor/github.com/prometheus/otlptranslator/metric_type.go delete mode 100644 vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go delete mode 100644 vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go create mode 100644 vendor/github.com/prometheus/prometheus/notifier/alert.go create mode 100644 vendor/github.com/prometheus/prometheus/notifier/alertmanager.go create mode 100644 vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go rename vendor/github.com/prometheus/prometheus/notifier/{notifier.go => manager.go} (54%) create mode 100644 vendor/github.com/prometheus/prometheus/notifier/metric.go create mode 100644 vendor/github.com/prometheus/prometheus/notifier/util.go create mode 100644 vendor/github.com/prometheus/prometheus/prompb/buf.gen.yaml create mode 100644 vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go create mode 100644 vendor/github.com/prometheus/prometheus/promql/durations.go delete mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go delete mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go create mode 100644 vendor/github.com/prometheus/prometheus/util/compression/buffers.go create mode 100644 vendor/github.com/prometheus/prometheus/util/compression/compression.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/.gitignore create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/LICENSE create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/README.md create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/counter.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/map.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mapof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/util.go create mode 100644 vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go delete mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go delete mode 100644 vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go rename vendor/go.opentelemetry.io/collector/{config/configtelemetry => confmap}/LICENSE (100%) create mode 100644 vendor/go.opentelemetry.io/collector/confmap/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/confmap/README.md create mode 100644 vendor/go.opentelemetry.io/collector/confmap/confmap.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/converter.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/expand.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/merge.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/metadata.yaml create mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/resolver.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/xconfmap/LICENSE rename vendor/go.opentelemetry.io/collector/{config/configtelemetry => confmap/xconfmap}/Makefile (100%) create mode 100644 vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/README.md create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/flag.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/gate.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/registry.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/stage.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_provider.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/internal/err.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/log/DESIGN.md create mode 100644 vendor/go.opentelemetry.io/otel/log/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/log/README.md create mode 100644 vendor/go.opentelemetry.io/otel/log/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/log/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/log/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/log/global/README.md create mode 100644 vendor/go.opentelemetry.io/otel/log/global/log.go create mode 100644 vendor/go.opentelemetry.io/otel/log/internal/global/log.go create mode 100644 vendor/go.opentelemetry.io/otel/log/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/log/keyvalue.go create mode 100644 vendor/go.opentelemetry.io/otel/log/kind_string.go create mode 100644 vendor/go.opentelemetry.io/otel/log/logger.go create mode 100644 vendor/go.opentelemetry.io/otel/log/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/log/record.go create mode 100644 vendor/go.opentelemetry.io/otel/log/severity.go create mode 100644 vendor/go.opentelemetry.io/otel/log/severity_string.go diff --git a/clients/pkg/promtail/promtail_test.go b/clients/pkg/promtail/promtail_test.go index d85781bd05..3a12aa4da7 100644 --- a/clients/pkg/promtail/promtail_test.go +++ b/clients/pkg/promtail/promtail_test.go @@ -536,7 +536,7 @@ func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName case textparse.EntrySeries: var res labels.Labels _, _, v := pr.Series() - pr.Metric(&res) + pr.Labels(&res) switch res.Get(labels.MetricName) { case metricName: rb[res.Get(label)] = v diff --git a/clients/pkg/promtail/wal/wal.go b/clients/pkg/promtail/wal/wal.go index adf7eeb45e..1dc1f054b9 100644 --- a/clients/pkg/promtail/wal/wal.go +++ b/clients/pkg/promtail/wal/wal.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/grafana/loki/v3/pkg/ingester/wal" util_log "github.com/grafana/loki/v3/pkg/util/log" @@ -38,7 +39,7 @@ type wrapper struct { func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) { // TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk // yet. This will attest for the lack of buffering in the channel Writer exposes. - tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone) + tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, compression.None) if err != nil { return nil, fmt.Errorf("failde to create tsdb WAL: %w", err) } diff --git a/go.mod b/go.mod index 0c624eb58d..a511c530ff 100644 --- a/go.mod +++ b/go.mod @@ -84,8 +84,8 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.62.0 - github.com/prometheus/prometheus v0.302.1 + github.com/prometheus/common v0.64.0 + github.com/prometheus/prometheus v0.304.1 github.com/redis/go-redis/v9 v9.10.0 github.com/segmentio/fasthash v1.0.3 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c @@ -138,7 +138,7 @@ require ( github.com/parquet-go/parquet-go v0.25.1 github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea + github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb github.com/prometheus/sigv4 v0.1.2 github.com/richardartoul/molecule v1.0.0 github.com/schollz/progressbar/v3 v3.18.0 @@ -195,15 +195,20 @@ require ( github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-redsync/redsync/v4 v4.13.0 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect - github.com/gophercloud/gophercloud/v2 v2.4.0 // indirect + github.com/gophercloud/gophercloud/v2 v2.7.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/grafana/otel-profiling-go v0.5.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/jaegertracing/jaeger-idl v0.5.0 // indirect github.com/kamstrup/intmap v0.5.1 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/mattn/go-localereader v0.0.1 // indirect @@ -220,14 +225,16 @@ require ( github.com/muesli/termenv v0.16.0 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 // indirect + github.com/oklog/ulid/v2 v2.1.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/xattr v0.4.10 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect @@ -243,15 +250,20 @@ require ( github.com/zeebo/errs v1.4.0 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/component v0.118.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect - go.opentelemetry.io/collector/consumer v1.24.0 // indirect - go.opentelemetry.io/collector/pipeline v0.118.0 // indirect - go.opentelemetry.io/collector/processor v0.118.0 // indirect + go.opentelemetry.io/collector/component v1.30.0 // indirect + go.opentelemetry.io/collector/confmap v1.30.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect + go.opentelemetry.io/collector/consumer v1.30.0 // indirect + go.opentelemetry.io/collector/featuregate v1.30.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect + go.opentelemetry.io/collector/pipeline v0.124.0 // indirect + go.opentelemetry.io/collector/processor v1.30.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect + go.opentelemetry.io/otel/log v0.11.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect @@ -268,9 +280,9 @@ require ( cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/longrunning v0.6.7 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 // indirect @@ -280,7 +292,7 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/Code-Hex/go-generics-cache v1.5.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect @@ -309,7 +321,7 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/digitalocean/godo v1.132.0 // indirect + github.com/digitalocean/godo v1.144.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.5.0 // indirect @@ -379,7 +391,7 @@ require ( github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.63 // indirect + github.com/miekg/dns v1.1.65 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -393,7 +405,7 @@ require ( github.com/oschwald/maxminddb-golang v1.13.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/exporter-toolkit v0.13.2 // indirect + github.com/prometheus/exporter-toolkit v0.14.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/xid v1.6.0 // indirect @@ -413,7 +425,7 @@ require ( go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.mongodb.org/mongo-driver v1.17.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.124.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 go.opentelemetry.io/otel v1.36.0 @@ -431,7 +443,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect k8s.io/api v0.32.3 // indirect - k8s.io/client-go v0.32.1 // indirect + k8s.io/client-go v0.32.3 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect rsc.io/binaryregexp v0.2.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect diff --git a/go.sum b/go.sum index 3394af3858..cc53f59ef2 100644 --- a/go.sum +++ b/go.sum @@ -66,14 +66,14 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= @@ -113,8 +113,8 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4= -github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= @@ -355,8 +355,8 @@ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsY github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ= -github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0= +github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -513,8 +513,8 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA= github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ= -github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= -github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -647,8 +647,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= -github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg= -github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4= +github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= +github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -695,8 +695,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -763,8 +763,8 @@ github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY= github.com/heroku/x v0.4.3 h1:HF1P4Mu79BKDVk4pt+oRDpcOSTRTpHq28RYAOkuJmds= github.com/heroku/x v0.4.3/go.mod h1:htQnSDQPP7rNbrOQ8rczL7tbdNtQHXCPoSxYomu+eI8= -github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q= -github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= +github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84= +github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -781,8 +781,8 @@ github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68Z github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/influxdata/telegraf v1.34.1 h1:BWnIm52buIBv1hPRoMFNBE/wuoSZ0Yeny4EP0ngMSbE= github.com/influxdata/telegraf v1.34.1/go.mod h1:F/4F/nmAKRZlDNhrD5aIQi+AaiHaiNKku0kJFsF6iag= -github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k= -github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= +github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw= +github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48= github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE= github.com/jaegertracing/jaeger-idl v0.5.0/go.mod h1:ON90zFo9eoyXrt9F/KN8YeF3zxcnujaisMweFY/rg5k= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -825,8 +825,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kamstrup/intmap v0.5.1 h1:ENGAowczZA+PJPYYlreoqJvWgQVtAmX1l899WfYFVK0= github.com/kamstrup/intmap v0.5.1/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -839,8 +839,8 @@ github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2 github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= @@ -870,8 +870,8 @@ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39 github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU= -github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/linode/linodego v1.49.0 h1:MNd3qwvQzbXB5mCpvdCqlUIu1RPA9oC+50LyB9kK+GQ= +github.com/linode/linodego v1.49.0/go.mod h1:B+HAM3//4w1wOS0BwdaQBKwBxlfe6kYJ7bSC6jJ/xtc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= @@ -906,8 +906,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= -github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= +github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc= +github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= @@ -990,6 +990,8 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= +github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1001,14 +1003,14 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -1035,14 +1037,15 @@ github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo= github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU= github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o= -github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= -github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw= +github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -1101,14 +1104,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= +github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= -github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= -github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea h1:NacrTIqDsM6iOtfex6OAFvVmtxjbiLC2a34/ba6nM9Q= -github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea/go.mod h1:v1PzmPjSnNkmZSDvKJ9OmsWcmWMEF5+JdllEcXrRfzM= +github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= +github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb h1:wuS7VydG/rDWTbYMp07paPv3R1hiPC9WgingWs+xgi0= +github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb/go.mod h1:M7gjuJF83qnpgElJIPfhiK+YAHlvot5epcAV+Rie7eo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1119,10 +1122,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.302.1 h1:xqVdrwrB4WNpdgJqxsz5loqFWNUZitsK8myqLuSZ6Ag= -github.com/prometheus/prometheus v0.302.1/go.mod h1:YcyCoTbUR/TM8rY3Aoeqr0AWTu/pu1Ehh+trpX3eRzg= +github.com/prometheus/prometheus v0.304.1 h1:e4kpJMb2Vh/PcR6LInake+ofcvFYHT+bCfmBvOkaZbY= +github.com/prometheus/prometheus v0.304.1/go.mod h1:ioGx2SGKTY+fLnJSQCdTHqARVldGNS8OlIe3kvp98so= github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8= github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1149,8 +1154,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1312,38 +1317,44 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= -go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= -go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= -go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= -go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= -go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= -go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= -go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= -go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= -go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= -go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= -go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= -go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= -go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls= +go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E= +go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4= +go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38= +go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw= +go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E= +go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM= +go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA= +go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8= +go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0= +go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs= +go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0= +go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0= +go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg= +go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0= +go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4= +go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8= +go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk= +go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y= go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= -go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= -go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= -go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= -go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= -go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= -go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= -go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= -go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= -go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= -go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= -go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= -go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= -go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE= +go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo= +go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY= +go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o= +go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw= +go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk= +go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8= +go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE= +go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0= +go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w= +go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI= +go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA= +go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= +go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU= +go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0= go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= @@ -1361,12 +1372,14 @@ go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= +go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= @@ -1850,8 +1863,8 @@ k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= diff --git a/pkg/chunkenc/symbols.go b/pkg/chunkenc/symbols.go index cf1fe25256..28700cca28 100644 --- a/pkg/chunkenc/symbols.go +++ b/pkg/chunkenc/symbols.go @@ -45,14 +45,12 @@ type symbolizer struct { readOnly bool // Runtime-only map to track which symbols are label names and have been normalized normalizedNames map[uint32]string - normalizer *otlptranslator.LabelNamer } func newSymbolizer() *symbolizer { return &symbolizer{ symbolsMap: map[string]uint32{}, normalizedNames: map[uint32]string{}, - normalizer: &otlptranslator.LabelNamer{}, } } @@ -125,7 +123,7 @@ func (s *symbolizer) Lookup(syms symbols, buf *log.BufferedLabelsBuilder) labels } else { // If we haven't seen this name before, look it up and normalize it name = s.lookup(symbol.Name) - normalized := s.normalizer.Build(name) + normalized := otlptranslator.NormalizeLabel(name) s.mtx.Lock() s.normalizedNames[symbol.Name] = normalized s.mtx.Unlock() @@ -340,7 +338,6 @@ func symbolizerFromCheckpoint(b []byte) *symbolizer { // Labels are key-value pairs, preallocate to half the number to store just the keys, // likely less memory than the exponential growth Go will do. normalizedNames: make(map[uint32]string, numLabels/2), - normalizer: &otlptranslator.LabelNamer{}, } for i := 0; i < numLabels; i++ { @@ -371,7 +368,6 @@ func symbolizerFromEnc(b []byte, pool compression.ReaderPool) (*symbolizer, erro labels: make([]string, 0, numLabels), // Same as symbolizerFromCheckpoint normalizedNames: make(map[uint32]string, numLabels/2), - normalizer: &otlptranslator.LabelNamer{}, compressedSize: len(b), readOnly: true, } diff --git a/pkg/compactor/deletion/job_runner_test.go b/pkg/compactor/deletion/job_runner_test.go index 0ec39714f4..aa352836a4 100644 --- a/pkg/compactor/deletion/job_runner_test.go +++ b/pkg/compactor/deletion/job_runner_test.go @@ -11,7 +11,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" - "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/v3/pkg/chunkenc" "github.com/grafana/loki/v3/pkg/compactor/jobqueue" "github.com/grafana/loki/v3/pkg/compactor/retention" @@ -20,6 +19,8 @@ import ( "github.com/grafana/loki/v3/pkg/logql/syntax" "github.com/grafana/loki/v3/pkg/storage/chunk" "github.com/grafana/loki/v3/pkg/storage/chunk/client" + + "github.com/grafana/loki/pkg/push" ) type mockChunkClient struct { diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 6786495887..15f75a9b95 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -15,9 +15,16 @@ import ( "time" "unicode/utf8" + otlptranslate "github.com/prometheus/otlptranslator" + "go.opentelemetry.io/otel/trace" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gogo/status" + "github.com/prometheus/prometheus/model/labels" + "github.com/twmb/franz-go/pkg/kgo" + "google.golang.org/grpc/codes" + "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/limiter" @@ -29,12 +36,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/otlptranslator" - "github.com/prometheus/prometheus/model/labels" - "github.com/twmb/franz-go/pkg/kgo" - "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" - "google.golang.org/grpc/codes" "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/compactor/retention" @@ -211,9 +213,6 @@ type Distributor struct { kafkaWriteBytesTotal prometheus.Counter kafkaWriteLatency prometheus.Histogram kafkaRecordsPerRequest prometheus.Histogram - - // OTLP Label Normalizer - normalizer *otlptranslator.LabelNamer } // New a distributor creates. @@ -374,7 +373,6 @@ func New( partitionRing: partitionRing, ingestLimits: newIngestLimits(limitsFrontendClient, registerer), numMetadataPartitions: numMetadataPartitions, - normalizer: &otlptranslator.LabelNamer{}, } if overrides.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy { @@ -648,7 +646,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe var normalized string structuredMetadata := logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata) for i := range entry.StructuredMetadata { - normalized = d.normalizer.Build(structuredMetadata[i].Name) + normalized = otlptranslate.NormalizeLabel(structuredMetadata[i].Name) if normalized != structuredMetadata[i].Name { structuredMetadata[i].Name = normalized d.tenantPushSanitizedStructuredMetadata.WithLabelValues(tenantID).Inc() diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 0df163e4de..0df0a6c344 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -14,6 +14,10 @@ import ( "time" "unicode/utf8" + "github.com/prometheus/client_golang/prometheus/testutil" + + otlptranslate "github.com/prometheus/otlptranslator" + "github.com/c2h5oh/datasize" "github.com/go-kit/log" "github.com/grafana/dskit/flagext" @@ -26,9 +30,7 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" - "github.com/prometheus/otlptranslator" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2067,8 +2069,6 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr time.Sleep(i.succeedAfter) } - normalizer := &otlptranslator.LabelNamer{} - i.mu.Lock() defer i.mu.Unlock() for _, s := range in.Streams { @@ -2077,7 +2077,7 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr if strings.ContainsRune(sm.Value, utf8.RuneError) { return nil, fmt.Errorf("sm value was not sanitized before being pushed to ignester, invalid utf 8 rune %d", utf8.RuneError) } - if sm.Name != normalizer.Build(sm.Name) { + if sm.Name != otlptranslate.NormalizeLabel(sm.Name) { return nil, fmt.Errorf("sm name was not sanitized before being sent to ingester, contained characters %s", sm.Name) } diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go index b8c3d39e2f..73b40f0857 100644 --- a/pkg/ingester/checkpoint.go +++ b/pkg/ingester/checkpoint.go @@ -18,6 +18,7 @@ import ( tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" prompool "github.com/prometheus/prometheus/util/pool" "github.com/grafana/loki/v3/pkg/chunkenc" @@ -348,7 +349,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) { return false, fmt.Errorf("create checkpoint dir: %w", err) } - checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, wlog.CompressionNone) + checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, compression.None) if err != nil { return false, fmt.Errorf("open checkpoint: %w", err) } diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go index 06bdd2cb21..429f50388f 100644 --- a/pkg/ingester/wal.go +++ b/pkg/ingester/wal.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/grafana/loki/v3/pkg/ingester/wal" "github.com/grafana/loki/v3/pkg/util/flagext" @@ -82,7 +83,7 @@ func newWAL(cfg WALConfig, registerer prometheus.Registerer, metrics *ingesterMe return noopWAL{}, nil } - tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, wlog.CompressionNone) + tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, compression.None) if err != nil { return nil, err } diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go index b68d1b93aa..70c8d92cd3 100644 --- a/pkg/loghttp/push/otlp.go +++ b/pkg/loghttp/push/otlp.go @@ -522,13 +522,12 @@ func attributesToLabels(attrs pcommon.Map, prefix string) push.LabelsAdapter { func attributeToLabels(k string, v pcommon.Value, prefix string) push.LabelsAdapter { var labelsAdapter push.LabelsAdapter - normalizer := &otlptranslator.LabelNamer{} keyWithPrefix := k if prefix != "" { keyWithPrefix = prefix + "_" + k } - keyWithPrefix = normalizer.Build(keyWithPrefix) + keyWithPrefix = otlptranslator.NormalizeLabel(keyWithPrefix) typ := v.Type() if typ == pcommon.ValueTypeMap { diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go index 214b4124c7..644621d2c5 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache.go @@ -267,7 +267,11 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { } // This resolves the start() and end() used with the @ modifier. - expr = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd()) + expr, err = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd()) + if err != nil { + level.Warn(s.logger).Log("msg", "failed to preprocess query, considering @ modifier as not cachable", "query", query, "err", err) + return false + } end := r.GetEnd().UnixMilli() atModCachable := true diff --git a/pkg/ruler/base/api_test.go b/pkg/ruler/base/api_test.go index df5a9d74ae..574bd0def8 100644 --- a/pkg/ruler/base/api_test.go +++ b/pkg/ruler/base/api_test.go @@ -654,55 +654,19 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) { "test": { { Name: "group1", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{ - Value: "UP_RULE", - Tag: "!!str", - Kind: 8, - Line: 5, - Column: 19, - }, - Expr: yaml.Node{ - Value: "up", - Tag: "!!str", - Kind: 8, - Line: 6, - Column: 17, - }, + Record: "UP_RULE", + Expr: "up", }, { - Alert: yaml.Node{ - Value: "UP_ALERT", - Tag: "!!str", - Kind: 8, - Line: 7, - Column: 18, - }, - Expr: yaml.Node{ - Value: "up < 1", - Tag: "!!str", - Kind: 8, - Line: 8, - Column: 17, - }, + Alert: "UP_ALERT", + Expr: "up < 1", Labels: map[string]string{"foo": "bar"}, }, { - Alert: yaml.Node{ - Value: "DOWN_ALERT", - Tag: "!!str", - Kind: 8, - Line: 11, - Column: 18, - }, - Expr: yaml.Node{ - Value: "down < 1", - Tag: "!!str", - Kind: 8, - Line: 12, - Column: 17, - }, + Alert: "DOWN_ALERT", + Expr: "down < 1", Labels: map[string]string{"namespace": "delta"}, }, }, @@ -714,39 +678,15 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) { "test": { { Name: "group1", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{ - Value: "UP_ALERT", - Tag: "!!str", - Kind: 8, - Line: 5, - Column: 18, - }, - Expr: yaml.Node{ - Value: "up < 1", - Tag: "!!str", - Kind: 8, - Line: 6, - Column: 17, - }, + Alert: "UP_ALERT", + Expr: "up < 1", Labels: map[string]string{"foo": "bar"}, }, { - Alert: yaml.Node{ - Value: "DOWN_ALERT", - Tag: "!!str", - Kind: 8, - Line: 9, - Column: 18, - }, - Expr: yaml.Node{ - Value: "down < 1", - Tag: "!!str", - Kind: 8, - Line: 10, - Column: 17, - }, + Alert: "DOWN_ALERT", + Expr: "down < 1", Labels: map[string]string{"namespace": "delta"}, }, }, diff --git a/pkg/ruler/base/manager.go b/pkg/ruler/base/manager.go index 7b3ab42178..df21d55ae3 100644 --- a/pkg/ruler/base/manager.go +++ b/pkg/ruler/base/manager.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/net/context/ctxhttp" + "gopkg.in/yaml.v3" "github.com/grafana/loki/v3/pkg/ruler/rulespb" ) @@ -300,12 +301,17 @@ func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error } for i, r := range g.Rules { - for _, err := range r.Validate() { + ruleNode := rulefmt.RuleNode{ + Record: yaml.Node{Value: r.Record}, + Alert: yaml.Node{Value: r.Alert}, + Expr: yaml.Node{Value: r.Expr}, + } + for _, err := range r.Validate(ruleNode) { var ruleName string - if r.Alert.Value != "" { - ruleName = r.Alert.Value + if r.Alert != "" { + ruleName = r.Alert } else { - ruleName = r.Record.Value + ruleName = r.Record } errs = append(errs, &rulefmt.Error{ Group: g.Name, diff --git a/pkg/ruler/base/mapper_test.go b/pkg/ruler/base/mapper_test.go index a5519e1448..4eed3cbe94 100644 --- a/pkg/ruler/base/mapper_test.go +++ b/pkg/ruler/base/mapper_test.go @@ -10,7 +10,6 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/spf13/afero" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" ) var ( @@ -36,31 +35,28 @@ var ( ) func setupRuleSets() { - recordNode := yaml.Node{} - recordNode.SetString("example_rule") - exprNode := yaml.Node{} - exprNode.SetString("example_expr") - recordNodeUpdated := yaml.Node{} - recordNodeUpdated.SetString("example_ruleupdated") - exprNodeUpdated := yaml.Node{} - exprNodeUpdated.SetString("example_exprupdated") + + record := "example_rule" + expr := "example_expr" + recordUpdated := "example_ruleupdated" + exprUpdated := "example_exprupdated" initialRuleSet = map[string][]rulefmt.RuleGroup{ "file /one": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -70,19 +66,19 @@ func setupRuleSets() { "file /one": { { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -92,28 +88,28 @@ func setupRuleSets() { "file /one": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_three", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -123,19 +119,19 @@ func setupRuleSets() { "file /one": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -143,10 +139,10 @@ func setupRuleSets() { "file /two": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -156,19 +152,19 @@ func setupRuleSets() { "file /one": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -176,10 +172,10 @@ func setupRuleSets() { "file /two": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNodeUpdated, - Expr: exprNodeUpdated, + Record: recordUpdated, + Expr: exprUpdated, }, }, }, @@ -189,19 +185,19 @@ func setupRuleSets() { "file /one": { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, { Name: "rulegroup_two", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, @@ -211,10 +207,10 @@ func setupRuleSets() { specialCharFile: { { Name: "rulegroup_one", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: recordNode, - Expr: exprNode, + Record: record, + Expr: expr, }, }, }, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index a2bd951b73..d5f7ad4a84 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -236,7 +236,7 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) { set[g.Name] = struct{}{} for _, r := range g.Rules { - if err := validateRuleNode(&r, g.Name); err != nil { + if err := validateRule(&r, g.Name); err != nil { errs = append(errs, err) } } @@ -245,38 +245,38 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) { return errs } -func validateRuleNode(r *rulefmt.RuleNode, groupName string) error { - if r.Record.Value != "" && r.Alert.Value != "" { +func validateRule(r *rulefmt.Rule, groupName string) error { + if r.Record != "" && r.Alert != "" { return errors.Errorf("only one of 'record' and 'alert' must be set") } - if r.Record.Value == "" && r.Alert.Value == "" { + if r.Record == "" && r.Alert == "" { return errors.Errorf("one of 'record' or 'alert' must be set") } - if r.Expr.Value == "" { + if r.Expr == "" { return errors.Errorf("field 'expr' must be set in rule") - } else if _, err := syntax.ParseExpr(r.Expr.Value); err != nil { - if r.Record.Value != "" { - return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record.Value, groupName) + } else if _, err := syntax.ParseExpr(r.Expr); err != nil { + if r.Record != "" { + return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record, groupName) } - return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert.Value, groupName) + return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert, groupName) } - if r.Record.Value != "" { + if r.Record != "" { if len(r.Annotations) > 0 { return errors.Errorf("invalid field 'annotations' in recording rule") } if r.For != 0 { return errors.Errorf("invalid field 'for' in recording rule") } - if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { - return errors.Errorf("invalid recording rule name: %s", r.Record.Value) + if !model.IsValidLegacyMetricName(r.Record) { + return errors.Errorf("invalid recording rule name: %s", r.Record) } } for k, v := range r.Labels { - if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + if !model.LabelName(k).IsValidLegacy() || k == model.MetricNameLabel { return errors.Errorf("invalid label name: %s", k) } @@ -286,7 +286,7 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error { } for k := range r.Annotations { - if !model.LabelName(k).IsValid() { + if !model.LabelName(k).IsValidLegacy() { return errors.Errorf("invalid annotation name: %s", k) } } @@ -300,8 +300,8 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error { // testTemplateParsing checks if the templates used in labels and annotations // of the alerting rules are parsed correctly. -func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) { - if rl.Alert.Value == "" { +func testTemplateParsing(rl *rulefmt.Rule) (errs []error) { + if rl.Alert == "" { // Not an alerting rule. return errs } @@ -317,7 +317,7 @@ func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) { tmpl := template.NewTemplateExpander( context.TODO(), strings.Join(append(defs, text), ""), - "__alert_"+rl.Alert.Value, + "__alert_"+rl.Alert, tmplData, model.Time(timestamp.FromTime(time.Now())), nil, diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index a699da5062..0387731b94 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" - "github.com/grafana/loki/v3/pkg/iter" "github.com/grafana/loki/v3/pkg/logql" rulerbase "github.com/grafana/loki/v3/pkg/ruler/base" @@ -23,14 +21,14 @@ import ( func TestInvalidRuleGroup(t *testing.T) { ruleGroupValid := rulefmt.RuleGroup{ Name: "test", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "alert-1-name"}, - Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"}, + Alert: "alert-1-name", + Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)", }, { - Alert: yaml.Node{Value: "record-1-name"}, - Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"}, + Alert: "record-1-name", + Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)", }, }, } @@ -38,14 +36,14 @@ func TestInvalidRuleGroup(t *testing.T) { ruleGroupInValid := rulefmt.RuleGroup{ Name: "test", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "alert-1-name"}, - Expr: yaml.Node{Value: "bad_value"}, + Alert: "alert-1-name", + Expr: "bad_value", }, { - Record: yaml.Node{Value: "record-1-name"}, - Expr: yaml.Node{Value: "bad_value"}, + Record: "record-1-name", + Expr: "bad_value", }, }, } @@ -56,21 +54,21 @@ func TestInvalidRuleGroup(t *testing.T) { // TestInvalidRuleExprParsing tests that a validation error is raised when rule expression is invalid func TestInvalidRuleExprParsing(t *testing.T) { expectedAlertErrorMsg := "could not parse expression for alert 'alert-1-name' in group 'test': parse error" - alertRuleExprInvalid := &rulefmt.RuleNode{ - Alert: yaml.Node{Value: "alert-1-name"}, - Expr: yaml.Node{Value: "bad_value"}, + alertRuleExprInvalid := &rulefmt.Rule{ + Alert: "alert-1-name", + Expr: "bad_value", } - alertErr := validateRuleNode(alertRuleExprInvalid, "test") + alertErr := validateRule(alertRuleExprInvalid, "test") assert.Containsf(t, alertErr.Error(), expectedAlertErrorMsg, "expected error containing '%s', got '%s'", expectedAlertErrorMsg, alertErr) expectedRecordErrorMsg := "could not parse expression for record 'record-1-name' in group 'test': parse error" - recordRuleExprInvalid := &rulefmt.RuleNode{ - Record: yaml.Node{Value: "record-1-name"}, - Expr: yaml.Node{Value: "bad_value"}, + recordRuleExprInvalid := &rulefmt.Rule{ + Record: "record-1-name", + Expr: "bad_value", } - recordErr := validateRuleNode(recordRuleExprInvalid, "test") + recordErr := validateRule(recordRuleExprInvalid, "test") assert.Containsf(t, recordErr.Error(), expectedRecordErrorMsg, "expected error containing '%s', got '%s'", expectedRecordErrorMsg, recordErr) } diff --git a/pkg/ruler/grouploader.go b/pkg/ruler/grouploader.go index 37dfde3ecc..58383e7c7b 100644 --- a/pkg/ruler/grouploader.go +++ b/pkg/ruler/grouploader.go @@ -109,9 +109,9 @@ func (l *CachingGroupLoader) AlertingRules() []rulefmt.Rule { for _, g := range group.Groups { for _, rule := range g.Rules { rules = append(rules, rulefmt.Rule{ - Record: rule.Record.Value, - Alert: rule.Alert.Value, - Expr: rule.Expr.Value, + Record: rule.Record, + Alert: rule.Alert, + Expr: rule.Expr, For: rule.For, Labels: rule.Labels, Annotations: rule.Annotations, diff --git a/pkg/ruler/grouploader_test.go b/pkg/ruler/grouploader_test.go index 7685677ddf..06634003c3 100644 --- a/pkg/ruler/grouploader_test.go +++ b/pkg/ruler/grouploader_test.go @@ -7,17 +7,11 @@ import ( "testing" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/promql/parser" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" ) -func init() { - model.NameValidationScheme = model.LegacyValidation -} - func Test_GroupLoader(t *testing.T) { for _, tc := range []struct { desc string @@ -364,8 +358,8 @@ var ( ruleGroup1 = &rulefmt.RuleGroups{ Groups: []rulefmt.RuleGroup{ { - Rules: []rulefmt.RuleNode{ - {Alert: yaml.Node{Value: "alert-1-name"}}, + Rules: []rulefmt.Rule{ + {Alert: "alert-1-name"}, }, }, }, @@ -373,8 +367,8 @@ var ( ruleGroup2 = &rulefmt.RuleGroups{ Groups: []rulefmt.RuleGroup{ { - Rules: []rulefmt.RuleNode{ - {Alert: yaml.Node{Value: "alert-2-name"}}, + Rules: []rulefmt.Rule{ + {Alert: "alert-2-name"}, }, }, }, diff --git a/pkg/ruler/rulespb/compat.go b/pkg/ruler/rulespb/compat.go index 0c9de4185a..dc1f4dd4a3 100644 --- a/pkg/ruler/rulespb/compat.go +++ b/pkg/ruler/rulespb/compat.go @@ -6,7 +6,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" - "gopkg.in/yaml.v3" "github.com/grafana/loki/v3/pkg/logproto" //lint:ignore faillint allowed to import other protobuf ) @@ -24,13 +23,13 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc return &rg } -func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc { +func formattedRuleToProto(rls []rulefmt.Rule) []*RuleDesc { rules := make([]*RuleDesc, len(rls)) for i := range rls { rules[i] = &RuleDesc{ - Expr: rls[i].Expr.Value, - Record: rls[i].Record.Value, - Alert: rls[i].Alert.Value, + Expr: rls[i].Expr, + Record: rls[i].Record, + Alert: rls[i].Alert, For: time.Duration(rls[i].For), Labels: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)), Annotations: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)), @@ -45,29 +44,24 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { formattedRuleGroup := rulefmt.RuleGroup{ Name: rg.GetName(), Interval: model.Duration(rg.Interval), - Rules: make([]rulefmt.RuleNode, len(rg.GetRules())), + Rules: make([]rulefmt.Rule, len(rg.GetRules())), Limit: int(rg.GetLimit()), } for i, rl := range rg.GetRules() { - exprNode := yaml.Node{} - exprNode.SetString(rl.GetExpr()) + expr := rl.GetExpr() - newRule := rulefmt.RuleNode{ - Expr: exprNode, + newRule := rulefmt.Rule{ + Expr: expr, Labels: logproto.FromLabelAdaptersToLabels(rl.Labels).Map(), Annotations: logproto.FromLabelAdaptersToLabels(rl.Annotations).Map(), For: model.Duration(rl.GetFor()), } if rl.GetRecord() != "" { - recordNode := yaml.Node{} - recordNode.SetString(rl.GetRecord()) - newRule.Record = recordNode + newRule.Record = rl.GetRecord() } else { - alertNode := yaml.Node{} - alertNode.SetString(rl.GetAlert()) - newRule.Alert = alertNode + newRule.Alert = rl.GetAlert() } formattedRuleGroup.Rules[i] = newRule diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go index 0644238b21..b70fc9fbb4 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go @@ -106,7 +106,7 @@ func TestListRules(t *testing.T) { func TestLoadRules(t *testing.T) { runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) { groups := []testGroup{ - {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.RuleNode{{ + {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.Rule{{ For: model.Duration(5 * time.Minute), Labels: map[string]string{"label1": "value1"}, }}, Limit: 10}}, diff --git a/pkg/ruler/rulestore/local/local_test.go b/pkg/ruler/rulestore/local/local_test.go index ee6abc5b8e..6a2e35e287 100644 --- a/pkg/ruler/rulestore/local/local_test.go +++ b/pkg/ruler/rulestore/local/local_test.go @@ -30,10 +30,10 @@ func TestClient_LoadAllRuleGroups(t *testing.T) { { Name: "rule", Interval: model.Duration(100 * time.Second), - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Kind: yaml.ScalarNode, Value: "test_rule"}, - Expr: yaml.Node{Kind: yaml.ScalarNode, Value: "up"}, + Record: "test_rule", + Expr: "up", }, }, }, diff --git a/pkg/ruler/storage/instance/instance.go b/pkg/ruler/storage/instance/instance.go index 25501f9a51..eeaa2243be 100644 --- a/pkg/ruler/storage/instance/instance.go +++ b/pkg/ruler/storage/instance/instance.go @@ -284,6 +284,10 @@ func (n noopScrapeManager) Get() (*scrape.Manager, error) { return nil, errors.New("No-op Scrape manager not ready") } +func (n noopScrapeManager) Ready() bool { + return false +} + // initialize sets up the various Prometheus components with their initial // settings. initialize will be called each time the Instance is run. Prometheus // components cannot be reused after they are stopped so we need to recreate them @@ -304,7 +308,7 @@ func (i *Instance) initialize(_ context.Context, reg prometheus.Registerer, cfg // Setup the remote storage remoteLogger := log.With(i.logger, "component", "remote") - i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{}, false) + i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{}) err = i.remoteStore.ApplyConfig(&config.Config{ RemoteWriteConfigs: cfg.RemoteWrite, }) diff --git a/pkg/ruler/storage/wal/wal.go b/pkg/ruler/storage/wal/wal.go index 38a2a07134..7ed7d0ba39 100644 --- a/pkg/ruler/storage/wal/wal.go +++ b/pkg/ruler/storage/wal/wal.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "go.uber.org/atomic" util_log "github.com/grafana/loki/v3/pkg/util/log" @@ -70,7 +71,7 @@ type Storage struct { // NewStorage makes a new Storage. func NewStorage(logger log.Logger, metrics *Metrics, registerer prometheus.Registerer, path string, enableReplay bool) (*Storage, error) { - w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, wlog.CompressionSnappy) + w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, compression.Snappy) if err != nil { return nil, err } @@ -373,7 +374,7 @@ func (w *Storage) Truncate(mint int64) error { return nil } - keep := func(id chunks.HeadSeriesRef) bool { + keep := func(id chunks.HeadSeriesRef, _ int) bool { if w.series.getByID(id) != nil { return true } diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go b/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go index 66c18a5189..3a77f744fe 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" "github.com/grafana/loki/v3/pkg/util/encoding" @@ -205,7 +206,7 @@ func newHeadWAL(log log.Logger, dir string, t time.Time) (*headWAL, error) { // NB: if we use a non-nil Prometheus Registerer, ensure // that the underlying metrics won't conflict with existing WAL metrics in the ingester. // Likely, this can be done by adding extra label(s) - wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, wlog.CompressionNone) + wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, compression.None) if err != nil { return nil, err } diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go index b91da0f324..57a03a7ba6 100644 --- a/pkg/tool/commands/rules.go +++ b/pkg/tool/commands/rules.go @@ -628,7 +628,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error { } // Do not apply the aggregation label to excluded rule groups. - applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { + applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool { _, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name] return !excluded } @@ -749,11 +749,11 @@ func checkDuplicates(groups []rwrulefmt.RuleGroup) []compareRuleType { return duplicates } -func ruleMetric(rule rulefmt.RuleNode) string { - if rule.Alert.Value != "" { - return rule.Alert.Value +func ruleMetric(rule rulefmt.Rule) string { + if rule.Alert != "" { + return rule.Alert } - return rule.Record.Value + return rule.Record } // End taken from https://github.com/prometheus/prometheus/blob/8c8de46003d1800c9d40121b4a5e5de8582ef6e1/cmd/promtool/main.go#L403 diff --git a/pkg/tool/commands/rules_test.go b/pkg/tool/commands/rules_test.go index fe27da35f9..3abfca6596 100644 --- a/pkg/tool/commands/rules_test.go +++ b/pkg/tool/commands/rules_test.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt" ) @@ -21,14 +20,14 @@ func TestCheckDuplicates(t *testing.T) { in: []rwrulefmt.RuleGroup{{ RuleGroup: rulefmt.RuleGroup{ Name: "rulegroup", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "up"}, - Expr: yaml.Node{Value: "up==1"}, + Record: "up", + Expr: "up==1", }, { - Record: yaml.Node{Value: "down"}, - Expr: yaml.Node{Value: "up==0"}, + Record: "down", + Expr: "up==0", }, }, }, @@ -41,14 +40,14 @@ func TestCheckDuplicates(t *testing.T) { in: []rwrulefmt.RuleGroup{{ RuleGroup: rulefmt.RuleGroup{ Name: "rulegroup", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "up"}, - Expr: yaml.Node{Value: "up==1"}, + Record: "up", + Expr: "up==1", }, { - Record: yaml.Node{Value: "up"}, - Expr: yaml.Node{Value: "up==0"}, + Record: "up", + Expr: "up==0", }, }, }, diff --git a/pkg/tool/rules/compare.go b/pkg/tool/rules/compare.go index 78e105c347..de9493bf26 100644 --- a/pkg/tool/rules/compare.go +++ b/pkg/tool/rules/compare.go @@ -102,10 +102,10 @@ func CompareGroups(groupOne, groupTwo rwrulefmt.RuleGroup) error { return nil } -func rulesEqual(a, b *rulefmt.RuleNode) bool { - if a.Alert.Value != b.Alert.Value || - a.Record.Value != b.Record.Value || - a.Expr.Value != b.Expr.Value || +func rulesEqual(a, b *rulefmt.Rule) bool { + if a.Alert != b.Alert || + a.Record != b.Record || + a.Expr != b.Expr || a.For != b.For { return false } diff --git a/pkg/tool/rules/compare_test.go b/pkg/tool/rules/compare_test.go index 4df1aa2ee6..7e3cae7e4f 100644 --- a/pkg/tool/rules/compare_test.go +++ b/pkg/tool/rules/compare_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/prometheus/prometheus/model/rulefmt" - yaml "gopkg.in/yaml.v3" "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt" ) @@ -12,21 +11,21 @@ import ( func Test_rulesEqual(t *testing.T) { tests := []struct { name string - a *rulefmt.RuleNode - b *rulefmt.RuleNode + a *rulefmt.Rule + b *rulefmt.Rule want bool }{ { name: "rule_node_identical", - a: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + a: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, - b: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + b: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: map[string]string{"c": "d", "a": "b"}, Labels: nil, }, @@ -34,53 +33,53 @@ func Test_rulesEqual(t *testing.T) { }, { name: "rule_node_diff", - a: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + a: &rulefmt.Rule{ + Record: "one", + Expr: "up", }, - b: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "two"}, - Expr: yaml.Node{Value: "up"}, + b: &rulefmt.Rule{ + Record: "two", + Expr: "up", }, want: false, }, { name: "rule_node_annotations_diff", - a: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + a: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b"}, }, - b: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one", Column: 10}, - Expr: yaml.Node{Value: "up"}, + b: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: map[string]string{"c": "d"}, }, want: false, }, { name: "rule_node_annotations_nil_diff", - a: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + a: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b"}, }, - b: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one", Column: 10}, - Expr: yaml.Node{Value: "up"}, + b: &rulefmt.Rule{ + Record: "one", + Expr: "up", Annotations: nil, }, want: false, }, { name: "rule_node_yaml_diff", - a: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + a: &rulefmt.Rule{ + Record: "one", + Expr: "up", }, - b: &rulefmt.RuleNode{ - Record: yaml.Node{Value: "one", Column: 10}, - Expr: yaml.Node{Value: "up"}, + b: &rulefmt.Rule{ + Record: "one", + Expr: "up", }, want: true, }, @@ -106,10 +105,10 @@ func TestCompareGroups(t *testing.T) { groupOne: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -119,10 +118,10 @@ func TestCompareGroups(t *testing.T) { groupTwo: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -136,10 +135,10 @@ func TestCompareGroups(t *testing.T) { groupOne: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -149,16 +148,16 @@ func TestCompareGroups(t *testing.T) { groupTwo: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -172,10 +171,10 @@ func TestCompareGroups(t *testing.T) { groupOne: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -188,10 +187,10 @@ func TestCompareGroups(t *testing.T) { groupTwo: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -208,10 +207,10 @@ func TestCompareGroups(t *testing.T) { groupOne: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -224,10 +223,10 @@ func TestCompareGroups(t *testing.T) { groupTwo: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -245,10 +244,10 @@ func TestCompareGroups(t *testing.T) { groupOne: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, @@ -261,10 +260,10 @@ func TestCompareGroups(t *testing.T) { groupTwo: rwrulefmt.RuleGroup{ RuleGroup: rulefmt.RuleGroup{ Name: "example_group", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: "one"}, - Expr: yaml.Node{Value: "up"}, + Record: "one", + Expr: "up", Annotations: map[string]string{"a": "b", "c": "d"}, Labels: nil, }, diff --git a/pkg/tool/rules/parser_test.go b/pkg/tool/rules/parser_test.go index 35db097486..7348f39197 100644 --- a/pkg/tool/rules/parser_test.go +++ b/pkg/tool/rules/parser_test.go @@ -28,7 +28,7 @@ func TestParseFiles(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "testgrp2", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { // currently the tests only check length }, @@ -51,7 +51,7 @@ func TestParseFiles(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "testgrp2", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { // currently the tests only check length }, @@ -81,7 +81,7 @@ func TestParseFiles(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "testgrp2", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { // currently the tests only check length }, @@ -96,7 +96,7 @@ func TestParseFiles(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "other_testgrp2", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { // currently the tests only check length }, diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go index 4ac84f7da9..e05e994a29 100644 --- a/pkg/tool/rules/rules.go +++ b/pkg/tool/rules/rules.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/promql/parser" log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" logql "github.com/grafana/loki/v3/pkg/logql/syntax" @@ -40,13 +41,13 @@ func (r RuleNamespace) LintExpressions() (int, int, error) { for i, group := range r.Groups { for j, rule := range group.Rules { log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("linting %s", queryLanguage) - exp, err := parseFn(rule.Expr.Value) + exp, err := parseFn(rule.Expr) if err != nil { return count, mod, err } count++ - if rule.Expr.Value != exp.String() { + if rule.Expr != exp.String() { log.WithFields(log.Fields{ "rule": getRuleName(rule), "currentExpr": rule.Expr, @@ -54,7 +55,7 @@ func (r RuleNamespace) LintExpressions() (int, int, error) { }).Debugf("expression differs") mod++ - r.Groups[i].Rules[j].Expr.Value = exp.String() + r.Groups[i].Rules[j].Expr = exp.String() } } } @@ -75,10 +76,10 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int { for _, group := range r.Groups { for _, rule := range group.Rules { // Assume if there is a rule.Record that this is a recording rule. - if rule.Record.Value == "" { + if rule.Record == "" { continue } - name = rule.Record.Value + name = rule.Record log.WithFields(log.Fields{"rule": name}).Debugf("linting recording rule name") chunks := strings.Split(name, ":") if len(chunks) < reqChunks { @@ -98,7 +99,7 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int { // AggregateBy modifies the aggregation rules in groups to include a given Label. // If the applyTo function is provided, the aggregation is applied only to rules // for which the applyTo function returns true. -func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool) (int, int, error) { +func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool) (int, int, error) { // `count` represents the number of rules we evaluated. // `mod` represents the number of rules we modified - a modification can either be a lint or adding the // label in the aggregation. @@ -118,7 +119,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru } log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("evaluating...") - exp, err := parser.ParseExpr(rule.Expr.Value) + exp, err := parser.ParseExpr(rule.Expr) if err != nil { return count, mod, err } @@ -130,14 +131,14 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru parser.Inspect(exp, f) // Only modify the ones that actually changed. - if rule.Expr.Value != exp.String() { + if rule.Expr != exp.String() { log.WithFields(log.Fields{ "rule": getRuleName(rule), "currentExpr": rule.Expr, "afterExpr": exp.String(), }).Debugf("expression differs") mod++ - r.Groups[i].Rules[j].Expr.Value = exp.String() + r.Groups[i].Rules[j].Expr = exp.String() } } } @@ -147,7 +148,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru // exprNodeInspectorFunc returns a PromQL inspector. // It modifies most PromQL expressions to include a given label. -func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error { +func exprNodeInspectorFunc(rule rulefmt.Rule, label string) func(node parser.Node, path []parser.Node) error { return func(node parser.Node, _ []parser.Node) error { var err error switch n := node.(type) { @@ -239,12 +240,17 @@ func (r RuleNamespace) Validate() []error { func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error { var errs []error for i, r := range g.Rules { - for _, err := range r.Validate() { + ruleNode := rulefmt.RuleNode{ + Record: yaml.Node{Value: r.Record}, + Alert: yaml.Node{Value: r.Alert}, + Expr: yaml.Node{Value: r.Expr}, + } + for _, err := range r.Validate(ruleNode) { var ruleName string - if r.Alert.Value != "" { - ruleName = r.Alert.Value + if r.Alert != "" { + ruleName = r.Alert } else { - ruleName = r.Record.Value + ruleName = r.Record } errs = append(errs, &rulefmt.Error{ Group: g.Name, @@ -258,10 +264,10 @@ func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error { return errs } -func getRuleName(r rulefmt.RuleNode) string { - if r.Record.Value != "" { - return r.Record.Value +func getRuleName(r rulefmt.Rule) string { + if r.Record != "" { + return r.Record } - return r.Alert.Value + return r.Alert } diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go index 8c24a7d8ab..b06463b786 100644 --- a/pkg/tool/rules/rules_test.go +++ b/pkg/tool/rules/rules_test.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" "gotest.tools/assert" "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt" @@ -15,7 +14,7 @@ func TestAggregateBy(t *testing.T) { tt := []struct { name string rn RuleNamespace - applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool + applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool expectedExpr []string count, modified int expect error @@ -31,8 +30,8 @@ func TestAggregateBy(t *testing.T) { Groups: []rwrulefmt.RuleGroup{ { RuleGroup: rulefmt.RuleGroup{ - Name: "WithoutAggregation", Rules: []rulefmt.RuleNode{ - {Alert: yaml.Node{Value: "WithoutAggregation"}, Expr: yaml.Node{Value: "up != 1"}}, + Name: "WithoutAggregation", Rules: []rulefmt.Rule{ + {Alert: "WithoutAggregation", Expr: "up != 1"}, }, }, }, @@ -48,11 +47,10 @@ func TestAggregateBy(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "SkipWithout", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "SkipWithout"}, - Expr: yaml.Node{ - Value: ` + Alert: "SkipWithout", + Expr: ` min without (alertmanager) ( rate(prometheus_notifications_errors_total{job="default/prometheus"}[5m]) / @@ -60,7 +58,6 @@ func TestAggregateBy(t *testing.T) { ) * 100 > 3`, - }, }, }, }, @@ -77,16 +74,14 @@ func TestAggregateBy(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "WithAggregation", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "WithAggregation"}, - Expr: yaml.Node{ - Value: ` + Alert: "WithAggregation", + Expr: ` sum(rate(cortex_prometheus_rule_evaluation_failures_total[1m])) by (namespace, job) / sum(rate(cortex_prometheus_rule_evaluations_total[1m])) by (namespace, job) > 0.01`, - }, }, }, }, @@ -103,15 +98,11 @@ func TestAggregateBy(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "CountAggregation", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{ - Value: "CountAggregation", - }, - Expr: yaml.Node{ - Value: ` + Alert: "CountAggregation", + Expr: ` count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1`, - }, }, }, }, @@ -128,10 +119,10 @@ func TestAggregateBy(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "BinaryExpressions", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "VectorMatching"}, - Expr: yaml.Node{Value: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`}, + Alert: "VectorMatching", + Expr: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`, }, }, }, @@ -148,35 +139,27 @@ func TestAggregateBy(t *testing.T) { { RuleGroup: rulefmt.RuleGroup{ Name: "CountAggregation", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{ - Value: "CountAggregation", - }, - Expr: yaml.Node{ - Value: `count by (namespace) (test_series) > 1`, - }, + Alert: "CountAggregation", + Expr: `count by (namespace) (test_series) > 1`, }, }, }, }, { RuleGroup: rulefmt.RuleGroup{ Name: "CountSkipped", - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{ - Value: "CountSkipped", - }, - Expr: yaml.Node{ - Value: `count by (namespace) (test_series) > 1`, - }, + Alert: "CountSkipped", + Expr: `count by (namespace) (test_series) > 1`, }, }, }, }, }, }, - applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { + applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool { return group.Name != "CountSkipped" }, expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`}, @@ -196,7 +179,7 @@ func TestAggregateBy(t *testing.T) { expectedIdx := 0 for _, g := range tc.rn.Groups { for _, r := range g.Rules { - require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr.Value) + require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr) expectedIdx++ } } @@ -255,10 +238,10 @@ func TestLintExpressions(t *testing.T) { r := RuleNamespace{Groups: []rwrulefmt.RuleGroup{ { RuleGroup: rulefmt.RuleGroup{ - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Alert: yaml.Node{Value: "AName"}, - Expr: yaml.Node{Value: tc.expr}, + Alert: "AName", + Expr: tc.expr, }, }, }, @@ -267,7 +250,7 @@ func TestLintExpressions(t *testing.T) { } c, m, err := r.LintExpressions() - rexpr := r.Groups[0].Rules[0].Expr.Value + rexpr := r.Groups[0].Rules[0].Expr require.Equal(t, tc.count, c) require.Equal(t, tc.modified, m) @@ -325,10 +308,11 @@ func TestCheckRecordingRules(t *testing.T) { Groups: []rwrulefmt.RuleGroup{ { RuleGroup: rulefmt.RuleGroup{ - Rules: []rulefmt.RuleNode{ + Rules: []rulefmt.Rule{ { - Record: yaml.Node{Value: tc.ruleName}, - Expr: yaml.Node{Value: "rate(some_metric_total)[5m]"}}, + Record: tc.ruleName, + Expr: "rate(some_metric_total)[5m]", + }, }, }, }, diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 988de1695d..e5a9937c06 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -106,7 +106,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/exporter-toolkit v0.13.2 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/v9 v9.7.3 // indirect @@ -131,14 +131,14 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.38.0 // indirect golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.14.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index bc303f1bd8..492eb7238e 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -416,8 +416,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -531,8 +531,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -573,13 +573,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -588,8 +588,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -622,16 +622,16 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index cf422304e7..926ed3882c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,19 @@ # Release History +## 1.18.0 (2025-04-03) + +### Features Added + +* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token + + +## 1.17.1 (2025-03-20) + +### Other Changes + +* Upgraded to Go 1.23 +* Upgraded dependencies + ## 1.17.0 (2025-01-07) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index f2b296b6dc..460170034a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool { // AccessToken represents an Azure service bearer access token with expiry information. // Exported as azcore.AccessToken. type AccessToken struct { - Token string + // Token is the access token + Token string + // ExpiresOn indicates when the token expires ExpiresOn time.Time + // RefreshOn is a suggested time to refresh the token. + // Clients should ignore this value when it's zero. + RefreshOn time.Time } // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 44ab00d400..85514db3b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.17.0" + Version = "v1.18.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index b26db920b0..1950a2e5b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -51,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne return tk, tk.ExpiresOn, nil } +// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it. +var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool { + if tk.RefreshOn.IsZero() { + return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now()) + } + // no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn + return tk.RefreshOn.Before(time.Now()) +} + // NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. // cred: an azcore.TokenCredential implementation such as a credential object from azidentity // scopes: the list of permission scopes required for the token. @@ -69,11 +78,14 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * return authNZ(policy.TokenRequestOptions{Scopes: scopes}) } } + mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{ + ShouldRefresh: shouldRefresh, + }) return &BearerTokenPolicy{ authzHandler: ah, cred: cred, scopes: scopes, - mainResource: temporal.NewResource(acquire), + mainResource: mr, allowHTTP: opts.InsecureAllowCredentialWithHTTP, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 11c64eb294..485224197e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,17 @@ # Release History +## 1.9.0 (2025-04-08) + +### Features Added +* `GetToken()` sets `AccessToken.RefreshOn` when the token provider specifies a value + +### Other Changes +* `NewManagedIdentityCredential` logs the configured user-assigned identity, if any +* Deprecated `UsernamePasswordCredential` because it can't support multifactor + authentication (MFA), which Microsoft Entra ID requires for most tenants. See + https://aka.ms/azsdk/identity/mfa for migration guidance. +* Updated dependencies + ## 1.8.2 (2025-02-12) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 5cc64c08f2..069bc688d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -21,7 +21,7 @@ go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity ## Prerequisites - an [Azure subscription](https://azure.microsoft.com/free/) -- Go 1.18 +- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go ### Authenticating during local development @@ -146,7 +146,6 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |-|- |[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser |[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI -|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password ### Authenticating via Development Tools @@ -159,7 +158,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) `DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: -#### Service principal with secret +### Service principal with secret |variable name|value |-|- @@ -167,7 +166,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_SECRET`|one of the application's client secrets -#### Service principal with certificate +### Service principal with certificate |variable name|value |-|- @@ -176,16 +175,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key |`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any -#### Username and password - -|variable name|value -|-|- -|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application -|`AZURE_USERNAME`|a username (usually an email address) -|`AZURE_PASSWORD`|that user's password - -Configuration is attempted in the above order. For example, if values for a -client secret and certificate are both present, the client secret will be used. +Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used. ## Token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index 8fc7c64aa3..dd3f8e5b21 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -22,12 +22,11 @@ Some credential types support opt-in persistent token caching (see [the below ta Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Encryption facility | -| ---------------- | ---------------------------------------------- | -| Linux | kernel key retention service (keyctl) | -| macOS | Keychain (requires cgo and native build tools) | -| Windows | Data Protection API (DPAPI) | - +| Operating system | Encryption facility | Limitations | +| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. | +| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). | +| Windows | Data Protection API (DPAPI) | No specific limitations. | Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 045f87acd5..4118f99ef2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_c55452bbf6" + "Tag": "go/azidentity_191110b0dd" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 40a94154c6..bd196ddd32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -208,6 +209,10 @@ type msalConfidentialClient interface { AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) } +type msalManagedIdentityClient interface { + AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error) +} + // enables fakes for test scenarios type msalPublicClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 92f508094d..58c4b585c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -118,7 +118,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err } func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index b30f5474f5..ec1eab05c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -60,7 +60,10 @@ type EnvironmentCredentialOptions struct { // Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this // function isn't able to parse your certificate, use [ClientCertificateCredential] instead. // -// # User with username and password +// # Deprecated: User with username and password +// +// User password authentication is deprecated because it can't support multifactor authentication. See +// [Entra ID documentation] for migration guidance. // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". // @@ -75,6 +78,8 @@ type EnvironmentCredentialOptions struct { // To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants // the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set // AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type EnvironmentCredential struct { cred azcore.TokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work index 04ea962b42..6dd5b3d64d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -1,4 +1,4 @@ -go 1.18 +go 1.23.0 use ( . diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json index 1c3791777a..edd56f9d57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -9,7 +9,7 @@ } }, "GoVersion": [ - "1.22.1" + "env:GO_VERSION_PREVIOUS" ], "IDENTITY_IMDS_AVAILABLE": "1" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index cc07fd7015..b3a0f85883 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -8,24 +8,18 @@ package azidentity import ( "context" - "encoding/json" "errors" "fmt" "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "strconv" "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity" ) const ( @@ -41,59 +35,20 @@ const ( msiResID = "msi_res_id" msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" - azureArcAPIVersion = "2019-08-15" + azureArcAPIVersion = "2020-06-01" qpClientID = "client_id" serviceFabricAPIVersion = "2019-07-01-preview" ) var imdsProbeTimeout = time.Second -type msiType int - -const ( - msiTypeAppService msiType = iota - msiTypeAzureArc - msiTypeAzureML - msiTypeCloudShell - msiTypeIMDS - msiTypeServiceFabric -) - type managedIdentityClient struct { - azClient *azcore.Client - endpoint string - id ManagedIDKind - msiType msiType - probeIMDS bool + azClient *azcore.Client + imds, probeIMDS, userAssigned bool // chained indicates whether the client is part of a credential chain. If true, the client will return // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. - chained bool -} - -// arcKeyDirectory returns the directory expected to contain Azure Arc keys -var arcKeyDirectory = func() (string, error) { - switch runtime.GOOS { - case "linux": - return "/var/opt/azcmagent/tokens", nil - case "windows": - pd := os.Getenv("ProgramData") - if pd == "" { - return "", errors.New("environment variable ProgramData has no value") - } - return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil - default: - return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) - } -} - -type wrappedNumber json.Number - -func (n *wrappedNumber) UnmarshalJSON(b []byte) error { - c := string(b) - if c == "\"\"" { - return nil - } - return json.Unmarshal(b, (*json.Number)(n)) + chained bool + msalClient msalManagedIdentityClient } // setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS @@ -141,51 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag options = &ManagedIdentityCredentialOptions{} } cp := options.ClientOptions - c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} - env := "IMDS" - if endpoint, ok := os.LookupEnv(identityEndpoint); ok { - if _, ok := os.LookupEnv(identityHeader); ok { - if _, ok := os.LookupEnv(identityServerThumbprint); ok { - if options.ID != nil { - return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") - } - env = "Service Fabric" - c.endpoint = endpoint - c.msiType = msiTypeServiceFabric - } else { - env = "App Service" - c.endpoint = endpoint - c.msiType = msiTypeAppService - } - } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { - if options.ID != nil { - return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime") - } - env = "Azure Arc" - c.endpoint = endpoint - c.msiType = msiTypeAzureArc - } - } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - c.endpoint = endpoint - if _, ok := os.LookupEnv(msiSecret); ok { - if options.ID != nil && options.ID.idKind() != miClientID { - return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only") - } - env = "Azure ML" - c.msiType = msiTypeAzureML - } else { - if options.ID != nil { - return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities") - } - env = "Cloud Shell" - c.msiType = msiTypeCloudShell - } - } else { + c := managedIdentityClient{} + source, err := managedidentity.GetSource() + if err != nil { + return nil, err + } + env := string(source) + if source == managedidentity.DefaultToIMDS { + env = "IMDS" + c.imds = true c.probeIMDS = options.dac setIMDSRetryOptionDefaults(&cp.Retry) } - client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{ Tracing: azruntime.TracingOptions{ Namespace: traceNamespace, }, @@ -193,28 +117,53 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if err != nil { return nil, err } - c.azClient = client + + id := managedidentity.SystemAssigned() + if options.ID != nil { + c.userAssigned = true + switch s := options.ID.String(); options.ID.idKind() { + case miClientID: + id = managedidentity.UserAssignedClientID(s) + case miObjectID: + id = managedidentity.UserAssignedObjectID(s) + case miResourceID: + id = managedidentity.UserAssignedResourceID(s) + } + } + msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled()) + if err != nil { + return nil, err + } + c.msalClient = &msalClient if log.Should(EventAuthentication) { - log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env) + if options.ID != nil { + kind := "client" + switch options.ID.(type) { + case ObjectID: + kind = "object" + case ResourceID: + kind = "resource" + } + msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String()) + } + log.Write(EventAuthentication, msg) } return &c, nil } -// provideToken acquires a token for MSAL's confidential.Client, which caches the token -func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { - result := confidential.TokenProviderResult{} - tk, err := c.authenticate(ctx, c.id, params.Scopes) - if err == nil { - result.AccessToken = tk.Token - result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) - } - return result, err +func (*managedIdentityClient) CloseIdleConnections() { + // do nothing +} + +func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) } // authenticate acquires an access token -func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { +func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { @@ -222,7 +171,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) - req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) + req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint) if err != nil { return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } @@ -237,32 +186,26 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi c.probeIMDS = false } - msg, err := c.createAuthRequest(ctx, id, scopes) - if err != nil { - return azcore.AccessToken{}, err - } - - resp, err := c.azClient.Pipeline().Do(msg) - if err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) - } - - if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { - tk, err := c.createAccessToken(resp) - if err != nil && c.chained && c.msiType == msiTypeIMDS { - // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at + ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims)) + if err == nil { + msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err + } + if c.imds { + var ije msalerrors.InvalidJsonErr + if c.chained && errors.As(err, &ije) { + // an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at // the same address. Return a credentialUnavailableError so credential chains continue to their next credential - err = newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } + resp := getResponseFromError(err) + if resp == nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err) } - return tk, err - } - - if c.msiType == msiTypeIMDS { switch resp.StatusCode { case http.StatusBadRequest: - if id != nil { - // return authenticationFailedError, halting any encompassing credential chain, - // because the explicit user-assigned identity implies the developer expected this to work + if c.userAssigned { return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" @@ -278,237 +221,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } - if c.chained { - // the response may be from something other than IMDS, for example a proxy returning - // 404. Return credentialUnavailableError so credential chains continue to their - // next credential, include the response in the error message to help debugging - err = newAuthenticationFailedError(credNameManagedIdentity, "", resp) - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) - } - } - - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) -} - -func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { - value := struct { - // these are the only fields that we use - Token string `json:"access_token,omitempty"` - RefreshToken string `json:"refresh_token,omitempty"` - ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid - ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string - }{} - if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res) - } - if value.ExpiresIn != "" { - expiresIn, err := json.Number(value.ExpiresIn).Int64() - if err != nil { - return azcore.AccessToken{}, err - } - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil - } - switch v := value.ExpiresOn.(type) { - case float64: - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil - case string: - if expiresOn, err := strconv.Atoi(v); err == nil { - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil - } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) - default: - msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) - } -} - -func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - switch c.msiType { - case msiTypeIMDS: - return c.createIMDSAuthRequest(ctx, id, scopes) - case msiTypeAppService: - return c.createAppServiceAuthRequest(ctx, id, scopes) - case msiTypeAzureArc: - // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service - key, err := c.getAzureArcSecretKey(ctx, scopes) - if err != nil { - msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) - return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) - } - return c.createAzureArcAuthRequest(ctx, scopes, key) - case msiTypeAzureML: - return c.createAzureMLAuthRequest(ctx, id, scopes) - case msiTypeServiceFabric: - return c.createServiceFabricAuthRequest(ctx, scopes) - case msiTypeCloudShell: - return c.createCloudShellAuthRequest(ctx, scopes) - default: - return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") - } -} - -func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - q := request.Raw().URL.Query() - q.Set("api-version", imdsAPIVersion) - q.Set("resource", strings.Join(scopes, " ")) - if id != nil { - switch id.idKind() { - case miClientID: - q.Set(qpClientID, id.String()) - case miObjectID: - q.Set("object_id", id.String()) - case miResourceID: - q.Set(msiResID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) - q := request.Raw().URL.Query() - q.Set("api-version", "2019-08-01") - q.Set("resource", scopes[0]) - if id != nil { - switch id.idKind() { - case miClientID: - q.Set(qpClientID, id.String()) - case miObjectID: - q.Set("principal_id", id.String()) - case miResourceID: - q.Set(miResID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set("secret", os.Getenv(msiSecret)) - q := request.Raw().URL.Query() - q.Set("api-version", "2017-09-01") - q.Set("resource", strings.Join(scopes, " ")) - q.Set("clientid", os.Getenv(defaultIdentityClientID)) - if id != nil { - switch id.idKind() { - case miClientID: - q.Set("clientid", id.String()) - case miObjectID: - return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil) - case miResourceID: - return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - q := request.Raw().URL.Query() - request.Raw().Header.Set("Accept", "application/json") - request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) - q.Set("api-version", serviceFabricAPIVersion) - q.Set("resource", strings.Join(scopes, " ")) - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { - // create the request to retreive the secret key challenge provided by the HIMDS service - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return "", err - } - request.Raw().Header.Set(headerMetadata, "true") - q := request.Raw().URL.Query() - q.Set("api-version", azureArcAPIVersion) - q.Set("resource", strings.Join(resources, " ")) - request.Raw().URL.RawQuery = q.Encode() - // send the initial request to get the short-lived secret key - response, err := c.azClient.Pipeline().Do(request) - if err != nil { - return "", err - } - // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location - // of the secret key file. Any other status code indicates an error in the request. - if response.StatusCode != 401 { - msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) - return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) - } - header := response.Header.Get("WWW-Authenticate") - if len(header) == 0 { - return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil) - } - // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key - _, p, found := strings.Cut(header, "=") - if !found { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil) - } - expected, err := arcKeyDirectory() - if err != nil { - return "", err - } - if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil) - } - f, err := os.Stat(p) - if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil) - } - if s := f.Size(); s > 4096 { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil) - } - key, err := os.ReadFile(p) - if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil) - } - return string(key), nil -} - -func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) - q := request.Raw().URL.Query() - q.Set("api-version", azureArcAPIVersion) - q.Set("resource", strings.Join(resources, " ")) - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - data := url.Values{} - data.Set("resource", strings.Join(scopes, " ")) - dataEncoded := data.Encode() - body := streaming.NopCloser(strings.NewReader(dataEncoded)) - if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { - return nil, err } - return request, nil + err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err) + return azcore.AccessToken{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 1d53579cf3..11b686ccda 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -14,7 +14,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const credNameManagedIdentity = "ManagedIdentityCredential" @@ -110,8 +109,7 @@ type ManagedIdentityCredentialOptions struct { // // [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - client *confidentialClient - mic *managedIdentityClient + mic *managedIdentityClient } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -123,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if err != nil { return nil, err } - cred := confidential.NewCredFromTokenProvider(mic.provideToken) - - // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. - // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. - clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" - if options.ID != nil { - clientID = options.ID.String() - } - // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value - c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ - ClientOptions: options.ClientOptions, - }) - if err != nil { - return nil, err - } - return &ManagedIdentityCredential{client: c, mic: mic}, nil + return &ManagedIdentityCredential{mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { var err error - ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil) defer func() { endSpan(err) }() if len(opts.Scopes) != 1 { err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - tk, err := c.client.GetToken(ctx, opts) - return tk, err + return c.mic.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index ef5e4d7212..053d1785f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -243,7 +243,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke } else { err = newAuthenticationFailedErrorFromMSAL(p.name, err) } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err } // resolveTenant returns the correct WithTenantID() argument for a token request given the client's diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index efa8c6d3eb..67f97fbb2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -72,6 +72,7 @@ az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --cpu 1 ` + --ip-address Public ` --memory 1.0 ` --os-type Linux ` --role "Storage Blob Data Reader" ` @@ -82,7 +83,8 @@ az container create -g $rg -n $aciName --image $image ` AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) ` AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) ` FUNCTIONS_CUSTOMHANDLER_PORT=80 -Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" +$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip Write-Host "Deploying to Azure Functions" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 740abd4709..5791e7d224 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -17,6 +17,11 @@ import ( const credNameUserPassword = "UsernamePasswordCredential" // UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +// +// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor +// authentication. See [Entra ID documentation] for migration guidance. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type UsernamePasswordCredentialOptions struct { azcore.ClientOptions @@ -43,8 +48,13 @@ type UsernamePasswordCredentialOptions struct { // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, // because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible -// with any form of multi-factor authentication, and the application must already have user or admin consent. +// with any form of multifactor authentication, and the application must already have user or admin consent. // This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +// +// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation] +// for migration guidance. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type UsernamePasswordCredential struct { client *publicClient } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index fec0419ca7..584aabe1cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.8.2" + version = "v1.9.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go index 4f1dcf1b78..76dadf7d35 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -44,7 +44,7 @@ func Should(cls Event) bool { if log.lst == nil { return false } - if log.cls == nil || len(log.cls) == 0 { + if len(log.cls) == 0 { return true } for _, c := range log.cls { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go index 238ef42ed0..02aa1fb3bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -11,9 +11,17 @@ import ( "time" ) +// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it. +var backoff = func(now, lastAttempt time.Time) bool { + return lastAttempt.Add(30 * time.Second).After(now) +} + // AcquireResource abstracts a method for refreshing a temporal resource. type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) +// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration. +type ShouldRefresh[TResource, TState any] func(TResource, TState) bool + // Resource is a temporal resource (usually a credential) that requires periodic refreshing. type Resource[TResource, TState any] struct { // cond is used to synchronize access to the shared resource embodied by the remaining fields @@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct { // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource lastAttempt time.Time + // shouldRefresh indicates whether the resource should be refreshed before expiration + shouldRefresh ShouldRefresh[TResource, TState] + // acquireResource is the callback function that actually acquires the resource acquireResource AcquireResource[TResource, TState] } // NewResource creates a new Resource that uses the specified AcquireResource for refreshing. func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { - return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} + r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})} + r.shouldRefresh = r.expiringSoon + return r +} + +// ResourceOptions contains optional configuration for Resource +type ResourceOptions[TResource, TState any] struct { + // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite + // the currently held resource not having expired. [Resource.Get] ignores all errors from + // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh + // when the resource has expired (it unconditionally updates expired resources). When + // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5 + // minutes. + ShouldRefresh ShouldRefresh[TResource, TState] +} + +// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] { + r := NewResource(ar) + if opts.ShouldRefresh != nil { + r.shouldRefresh = opts.ShouldRefresh + } + return r } // Get returns the underlying resource. // If the resource is fresh, no refresh is performed. func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { - // If the resource is expiring within this time window, update it eagerly. - // This allows other threads/goroutines to keep running by using the not-yet-expired - // resource value while one thread/goroutine updates the resource. - const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration - const backoff = 30 * time.Second // Minimum wait time between eager update attempts - now, acquire, expired := time.Now(), false, false // acquire exclusive lock @@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { break } // Getting here means that this thread/goroutine will wait for the updated resource - } else if er.expiration.Add(-window).Before(now) { - // The resource is valid but is expiring within the time window - if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + } else if er.shouldRefresh(resource, state) { + if !(er.acquiring || backoff(now, er.lastAttempt)) { // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted // to do so within the last 30 seconds, this thread/goroutine will do it er.acquiring, acquire = true, true @@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() { // Reset the expiration as if we never got this resource to begin with er.expiration = time.Time{} } + +func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool { + // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter + return er.expiration.Add(-5 * time.Minute).Before(time.Now()) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 22c17d2012..549d68ab99 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -65,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + // CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file // must contain the public certificate and the private key. If a PEM block is encrypted and // password is not an empty string, it attempts to decrypt the PEM blocks using the password. @@ -639,7 +646,7 @@ func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s if err != nil { return AuthResult{}, err } - return cca.base.AuthResultFromToken(ctx, authParams, token, true) + return cca.base.AuthResultFromToken(ctx, authParams, token) } // acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. @@ -733,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, if err != nil { return AuthResult{}, err } - return cca.base.AuthResultFromToken(ctx, authParams, token, true) + return cca.base.AuthResultFromToken(ctx, authParams, token) } // acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go index c9b8dbed08..b5cbb57217 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -64,11 +64,20 @@ type CallErr struct { Err error } +type InvalidJsonErr struct { + Err error +} + // Errors implements error.Error(). func (e CallErr) Error() string { return e.Err.Error() } +// Errors implements error.Error(). +func (e InvalidJsonErr) Error() string { + return e.Err.Error() +} + // Verbose prints a versbose error message with the request or response. func (e CallErr) Verbose() string { e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index e473d1267d..61c1c4cec1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -5,16 +5,17 @@ package base import ( "context" - "errors" "fmt" "net/url" "reflect" "strings" "sync" + "sync/atomic" "time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" @@ -94,6 +95,7 @@ type AuthResult struct { // AuthResultMetadata which contains meta data for the AuthResult type AuthResultMetadata struct { + RefreshOn time.Time TokenSource TokenSource } @@ -101,9 +103,8 @@ type TokenSource int // These are all the types of token flows. const ( - SourceUnknown TokenSource = 0 - IdentityProvider TokenSource = 1 - Cache TokenSource = 2 + TokenSourceIdentityProvider TokenSource = 0 + TokenSourceCache TokenSource = 1 ) // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). @@ -111,7 +112,6 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu if err := storageTokenResponse.AccessToken.Validate(); err != nil { return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) } - account := storageTokenResponse.Account accessToken := storageTokenResponse.AccessToken.Secret grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) @@ -132,7 +132,8 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu GrantedScopes: grantedScopes, DeclinedScopes: nil, Metadata: AuthResultMetadata{ - TokenSource: Cache, + TokenSource: TokenSourceCache, + RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T, }, }, nil } @@ -146,10 +147,11 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco Account: account, IDToken: tokenResponse.IDToken, AccessToken: tokenResponse.AccessToken, - ExpiresOn: tokenResponse.ExpiresOn.T, + ExpiresOn: tokenResponse.ExpiresOn, GrantedScopes: tokenResponse.GrantedScopes.Slice, Metadata: AuthResultMetadata{ - TokenSource: IdentityProvider, + TokenSource: TokenSourceIdentityProvider, + RefreshOn: tokenResponse.RefreshOn.T, }, }, nil } @@ -165,6 +167,8 @@ type Client struct { AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). cacheAccessor cache.ExportReplace cacheAccessorMu *sync.RWMutex + canRefresh map[string]*atomic.Value + canRefreshMu *sync.Mutex } // Option is an optional argument to the New constructor. @@ -241,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O cacheAccessorMu: &sync.RWMutex{}, manager: storage.New(token), pmanager: storage.NewPartitionedManager(token), + canRefresh: make(map[string]*atomic.Value), + canRefreshMu: &sync.Mutex{}, } for _, o := range options { if err = o(&client); err != nil { @@ -345,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if silent.Claims == "" { ar, err = AuthResultFromStorage(storageTokenResponse) if err == nil { + if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) { + b.canRefreshMu.Lock() + refreshValue, ok := b.canRefresh[tenant] + if !ok { + refreshValue = &atomic.Value{} + refreshValue.Store(false) + b.canRefresh[tenant] = refreshValue + } + b.canRefreshMu.Unlock() + if refreshValue.CompareAndSwap(false, true) { + defer refreshValue.Store(false) + // Added a check to see if the token is still same because there is a chance + // that the token is already refreshed by another thread. + // If the token is not same, we don't need to refresh it. + // Which means it refreshed. + if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken { + if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil { + return b.AuthResultFromToken(ctx, authParams, tr) + } + } + } + } ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } @@ -362,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if err != nil { return ar, err } - return b.AuthResultFromToken(ctx, authParams, token, true) + return b.AuthResultFromToken(ctx, authParams, token) } func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { @@ -391,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui return AuthResult{}, err } - return b.AuthResultFromToken(ctx, authParams, token, true) + return b.AuthResultFromToken(ctx, authParams, token) } // AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. @@ -420,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq authParams.UserAssertion = onBehalfOfParams.UserAssertion token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) if err == nil { - ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + ar, err = b.AuthResultFromToken(ctx, authParams, token) } return ar, err } -func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { - if !cacheWrite { - return NewAuthResult(token, shared.Account{}) - } +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) { var m manager = b.manager if authParams.AuthorizationType == authority.ATOnBehalfOf { m = b.pmanager @@ -458,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au return ar, err } +// This function wraps time.Now() and is used for refreshing the application +// was created to test the function against refreshin +var Now = time.Now + func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { if b.cacheAccessor != nil { b.cacheAccessorMu.RLock() diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go similarity index 95% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go index f9be90276d..7379e2233c 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go @@ -72,6 +72,7 @@ type AccessToken struct { ClientID string `json:"client_id,omitempty"` Secret string `json:"secret,omitempty"` Scopes string `json:"target,omitempty"` + RefreshOn internalTime.Unix `json:"refresh_on,omitempty"` ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` CachedAt internalTime.Unix `json:"cached_at,omitempty"` @@ -83,7 +84,7 @@ type AccessToken struct { } // NewAccessToken is the constructor for AccessToken. -func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { return AccessToken{ HomeAccountID: homeID, Environment: env, @@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex Secret: token, Scopes: scopes, CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + RefreshOn: internalTime.Unix{T: refreshOn.UTC()}, ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, TokenType: tokenType, @@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex // Key outputs the key that can be used to uniquely look up this entry in a map. func (a AccessToken) Key() string { + ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes} key := strings.Join( - []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + ks, shared.CacheKeySeparator, ) // add token type to key for new access tokens types. skip for bearer token type to diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go similarity index 99% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go index c093183306..ff07d4b5a4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go @@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes realm, clientID, cachedAt, - tokenResponse.ExpiresOn.T, + tokenResponse.RefreshOn.T, + tokenResponse.ExpiresOn, tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go similarity index 98% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go index 2221e60c43..84a234967f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go @@ -173,6 +173,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) cachedAt := time.Now() authnSchemeKeyID := authParameters.AuthnScheme.KeyID() @@ -193,7 +194,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces realm, clientID, cachedAt, - tokenResponse.ExpiresOn.T, + tokenResponse.RefreshOn.T, + tokenResponse.ExpiresOn, tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, @@ -265,6 +267,9 @@ func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo author } func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + if m.requests == nil { + return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil") + } m.aadCacheMu.Lock() defer m.aadCacheMu.Unlock() discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) @@ -459,6 +464,7 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s func (m *Manager) writeAccount(account shared.Account) error { key := account.Key() + m.contractMu.Lock() defer m.contractMu.Unlock() m.contract.Accounts[key] = account diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go index 7b673e3fe1..de1bf381f4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -31,4 +31,6 @@ type TokenProviderResult struct { AccessToken string // ExpiresInSeconds is the lifetime of the token in seconds ExpiresInSeconds int + // RefreshInSeconds indicates the suggested time to refresh the token, if any + RefreshInSeconds int } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index fda5d7dd33..cda678e334 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -146,7 +146,8 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) - s.putResult(Result{Err: fmt.Errorf(desc)}) + s.putResult(Result{Err: fmt.Errorf("%s", desc)}) + return } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index e065313444..738a29eb9d 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -111,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams Scopes: scopes, TenantID: authParams.AuthorityInfo.Tenant, } - tr, err := cred.TokenProvider(ctx, params) + pr, err := cred.TokenProvider(ctx, params) if err != nil { if len(scopes) == 0 { err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) @@ -119,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams } return accesstokens.TokenResponse{}, err } - return accesstokens.TokenResponse{ - TokenType: authParams.AuthnScheme.AccessTokenType(), - AccessToken: tr.AccessToken, - ExpiresOn: internalTime.DurationTime{ - T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), - }, + tr := accesstokens.TokenResponse{ + TokenType: authParams.AuthnScheme.AccessTokenType(), + AccessToken: pr.AccessToken, + ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second), GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, - }, nil + } + if pr.RefreshInSeconds > 0 { + tr.RefreshOn = internalTime.DurationTime{ + T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second), + } + } + return tr, nil } if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index a7b7b0742d..d738c7591e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -17,6 +17,7 @@ import ( /* #nosec */ "crypto/sha1" + "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/json" @@ -68,7 +69,7 @@ type DeviceCodeResponse struct { UserCode string `json:"user_code"` DeviceCode string `json:"device_code"` - VerificationURL string `json:"verification_url"` + VerificationURL string `json:"verification_uri"` ExpiresIn int `json:"expires_in"` Interval int `json:"interval"` Message string `json:"message"` @@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) ( } return c.AssertionCallback(ctx, options) } - - token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + claims := jwt.MapClaims{ "aud": authParams.Endpoints.TokenEndpoint, "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), "iss": authParams.ClientID, "jti": uuid.New().String(), "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), "sub": authParams.ClientID, - }) + } + + isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS || + authParams.AuthorityInfo.AuthorityType == authority.DSTS + + var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256 + thumbprintKey := "x5t#S256" + + if isADFSorDSTS { + signingMethod = jwt.SigningMethodRS256 + thumbprintKey = "x5t" + } + + token := jwt.NewWithClaims(signingMethod, claims) token.Header = map[string]interface{}{ - "alg": "RS256", - "typ": "JWT", - "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + "alg": signingMethod.Alg(), + "typ": "JWT", + thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())), } if authParams.SendX5C { @@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) ( assertion, err := token.SignedString(c.Key) if err != nil { - return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + return "", fmt.Errorf("unable to sign JWT token: %w", err) } + return assertion, nil } // thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. // https://tools.ietf.org/html/rfc7517#section-4.8 -func thumbprint(cert *x509.Certificate) []byte { - /* #nosec */ - a := sha1.Sum(cert.Raw) - return a[:] +func thumbprint(cert *x509.Certificate, alg string) []byte { + switch alg { + case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this + hash := sha1.Sum(cert.Raw) /* #nosec */ + return hash[:] + default: + hash := sha256.Sum256(cert.Raw) + return hash[:] + } } // Client represents the REST calls to get tokens from token generator backends. @@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A qv.Set(clientID, authParameters.ClientID) addScopeQueryParam(qv, authParameters) - token, err := c.doTokenResp(ctx, authParameters, qv) - if err != nil { - return token, fmt.Errorf("FromClientSecret(): %w", err) - } - return token, nil + return c.doTokenResp(ctx, authParameters, qv) } func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { @@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth qv.Set(clientInfo, clientInfoVal) addScopeQueryParam(qv, authParameters) - token, err := c.doTokenResp(ctx, authParameters, qv) - if err != nil { - return token, fmt.Errorf("FromAssertion(): %w", err) - } - return token, nil + return c.doTokenResp(ctx, authParameters, qv) } func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go index 3107b45c11..32dde7b76b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "reflect" + "strconv" "strings" "time" @@ -173,14 +174,75 @@ type TokenResponse struct { FamilyID string `json:"foci"` IDToken IDToken `json:"id_token"` ClientInfo ClientInfo `json:"client_info"` - ExpiresOn internalTime.DurationTime `json:"expires_in"` + RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"` + ExpiresOn time.Time `json:"-"` ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` GrantedScopes Scopes `json:"scope"` DeclinedScopes []string // This is derived AdditionalFields map[string]interface{} + scopesComputed bool +} + +func (tr *TokenResponse) UnmarshalJSON(data []byte) error { + type Alias TokenResponse + aux := &struct { + ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"` + ExpiresOn any `json:"expires_on,omitempty"` + *Alias + }{ + Alias: (*Alias)(tr), + } + + // Unmarshal the JSON data into the aux struct + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Function to parse different date formats + // This is a workaround for the issue described here: + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963 + parseExpiresOn := func(expiresOn string) (time.Time, error) { + var formats = []string{ + "01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss + "2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss + time.RFC3339Nano, // ISO 8601 (with nanosecond precision) + } + + for _, format := range formats { + if t, err := time.Parse(format, expiresOn); err == nil { + return t, nil + } + } + return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn) + } - scopesComputed bool + if expiresOnStr, ok := aux.ExpiresOn.(string); ok { + if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil { + tr.ExpiresOn = time.Unix(ts, 0) + return nil + } + if expiresOnStr != "" { + if t, err := parseExpiresOn(expiresOnStr); err != nil { + return err + } else { + tr.ExpiresOn = t + return nil + } + } + } + + // Check if ExpiresOn is a number (Unix timestamp or ISO 8601) + if expiresOnNum, ok := aux.ExpiresOn.(float64); ok { + tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0) + return nil + } + + if !aux.ExpiresIn.T.IsZero() { + tr.ExpiresOn = aux.ExpiresIn.T + return nil + } + return errors.New("expires_in and expires_on are both missing or invalid") } // ComputeScope computes the final scopes based on what was granted by the server and diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index d62aac74eb..7906803669 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -98,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea if resp != nil { if err := unmarshal(data, resp); err != nil { - return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))} } } return nil @@ -221,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values } if resp != nil { if err := unmarshal(data, resp); err != nil { - return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))} } } return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index eb16b405c4..5e551abc83 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "1.2.0" +const Version = "1.4.2" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go new file mode 100644 index 0000000000..d7cffc295e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "net/http" + "os" +) + +func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil) + if err != nil { + return nil, err + } + + req.Header.Set("secret", os.Getenv(msiSecretEnvVar)) + q := req.URL.Query() + q.Set(apiVersionQueryParameterName, azureMLAPIVersion) + q.Set(resourceQueryParameterName, resource) + q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID")) + if cid, ok := id.(UserAssignedClientID); ok { + q.Set("clientid", string(cid)) + } + req.URL.RawQuery = q.Encode() + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go new file mode 100644 index 0000000000..be9a0bca38 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" +) + +func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) { + msiEndpoint := os.Getenv(msiEndpointEnvVar) + msiEndpointParsed, err := url.Parse(msiEndpoint) + if err != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err) + } + + data := url.Values{} + data.Set(resourceQueryParameterName, resource) + msiDataEncoded := data.Encode() + body := io.NopCloser(strings.NewReader(msiDataEncoded)) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + + req.Header.Set(metaHTTPHeaderName, "true") + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go new file mode 100644 index 0000000000..ca3de4325f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go @@ -0,0 +1,717 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package managedidentity provides a client for retrieval of Managed Identity applications. +The Managed Identity Client is used to acquire a token for managed identity assigned to +an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token +without using credentials. +*/ +package managedidentity + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + +const ( + // DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set. + DefaultToIMDS Source = "DefaultToIMDS" + AzureArc Source = "AzureArc" + ServiceFabric Source = "ServiceFabric" + CloudShell Source = "CloudShell" + AzureML Source = "AzureML" + AppService Source = "AppService" + + // General request query parameter names + metaHTTPHeaderName = "Metadata" + apiVersionQueryParameterName = "api-version" + resourceQueryParameterName = "resource" + wwwAuthenticateHeaderName = "www-authenticate" + + // UAMI query parameter name + miQueryParameterClientId = "client_id" + miQueryParameterObjectId = "object_id" + miQueryParameterPrincipalId = "principal_id" + miQueryParameterResourceIdIMDS = "msi_res_id" + miQueryParameterResourceId = "mi_res_id" + + // IMDS + imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + imdsAPIVersion = "2018-02-01" + systemAssignedManagedIdentity = "system_assigned_managed_identity" + + // Azure Arc + azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token" + azureArcAPIVersion = "2020-06-01" + azureArcFileExtension = ".key" + azureArcMaxFileSizeBytes int64 = 4096 + linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101 + linuxHimdsPath = "/opt/azcmagent/bin/himds" + azureConnectedMachine = "AzureConnectedMachineAgent" + himdsExecutableName = "himds.exe" + tokenName = "Tokens" + + // App Service + appServiceAPIVersion = "2019-08-01" + + // AzureML + azureMLAPIVersion = "2017-09-01" + // Service Fabric + serviceFabricAPIVersion = "2019-07-01-preview" + + // Environment Variables + identityEndpointEnvVar = "IDENTITY_ENDPOINT" + identityHeaderEnvVar = "IDENTITY_HEADER" + azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST" + imdsEndVar = "IMDS_ENDPOINT" + msiEndpointEnvVar = "MSI_ENDPOINT" + msiSecretEnvVar = "MSI_SECRET" + identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT" + + defaultRetryCount = 3 +) + +var retryCodesForIMDS = []int{ + http.StatusNotFound, // 404 + http.StatusGone, // 410 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 +} + +var retryStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 +} + +var getAzureArcPlatformPath = func(platform string) string { + switch platform { + case "windows": + return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName) + case "linux": + return linuxTokenPath + default: + return "" + } +} + +var getAzureArcHimdsFilePath = func(platform string) string { + switch platform { + case "windows": + return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName) + case "linux": + return linuxHimdsPath + default: + return "" + } +} + +type Source string + +type ID interface { + value() string +} + +type systemAssignedValue string // its private for a reason to make the input consistent. +type UserAssignedClientID string +type UserAssignedObjectID string +type UserAssignedResourceID string + +func (s systemAssignedValue) value() string { return string(s) } +func (c UserAssignedClientID) value() string { return string(c) } +func (o UserAssignedObjectID) value() string { return string(o) } +func (r UserAssignedResourceID) value() string { return string(r) } +func SystemAssigned() ID { + return systemAssignedValue(systemAssignedManagedIdentity) +} + +// cache never uses the client because instance discovery is always disabled. +var cacheManager *storage.Manager = storage.New(nil) + +type Client struct { + httpClient ops.HTTPClient + miType ID + source Source + authParams authority.AuthParams + retryPolicyEnabled bool + canRefresh *atomic.Value +} + +type AcquireTokenOptions struct { + claims string +} + +type ClientOption func(*Client) + +type AcquireTokenOption func(o *AcquireTokenOptions) + +// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +func WithClaims(claims string) AcquireTokenOption { + return func(o *AcquireTokenOptions) { + o.claims = claims + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) ClientOption { + return func(c *Client) { + c.httpClient = httpClient + } +} + +func WithRetryPolicyDisabled() ClientOption { + return func(c *Client) { + c.retryPolicyEnabled = false + } +} + +// Client to be used to acquire tokens for managed identity. +// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID] +// +// Options: [WithHTTPClient] +func New(id ID, options ...ClientOption) (Client, error) { + source, err := GetSource() + if err != nil { + return Client{}, err + } + + // Check for user-assigned restrictions based on the source + switch source { + case AzureArc: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities") + } + case AzureML: + switch id.(type) { + case UserAssignedObjectID, UserAssignedResourceID: + return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only") + } + case CloudShell: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities") + } + case ServiceFabric: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") + } + } + + switch t := id.(type) { + case UserAssignedClientID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case UserAssignedResourceID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case UserAssignedObjectID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case systemAssignedValue: + default: + return Client{}, fmt.Errorf("unsupported type %T", id) + } + zero := atomic.Value{} + zero.Store(false) + client := Client{ + miType: id, + httpClient: shared.DefaultClient, + retryPolicyEnabled: true, + source: source, + canRefresh: &zero, + } + for _, option := range options { + option(&client) + } + fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true) + if err != nil { + return Client{}, err + } + client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo) + return client, nil +} + +// GetSource detects and returns the managed identity source available on the environment. +func GetSource() (Source, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + identityHeader := os.Getenv(identityHeaderEnvVar) + identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar) + msiEndpoint := os.Getenv(msiEndpointEnvVar) + msiSecret := os.Getenv(msiSecretEnvVar) + imdsEndpoint := os.Getenv(imdsEndVar) + + if identityEndpoint != "" && identityHeader != "" { + if identityServerThumbprint != "" { + return ServiceFabric, nil + } + return AppService, nil + } else if msiEndpoint != "" { + if msiSecret != "" { + return AzureML, nil + } else { + return CloudShell, nil + } + } else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) { + return AzureArc, nil + } + + return DefaultToIMDS, nil +} + +// This function wraps time.Now() and is used for refreshing the application +// was created to test the function against refreshin +var now = time.Now + +// Acquires tokens from the configured managed identity on an azure resource. +// +// Resource: scopes application is requesting access to +// Options: [WithClaims] +func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) { + resource = strings.TrimSuffix(resource, "/.default") + o := AcquireTokenOptions{} + for _, option := range options { + option(&o) + } + c.authParams.Scopes = []string{resource} + + // ignore cached access tokens when given claims + if o.claims == "" { + stResp, err := cacheManager.Read(ctx, c.authParams) + if err != nil { + return AuthResult{}, err + } + ar, err := base.AuthResultFromStorage(stResp) + if err == nil { + if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) { + defer c.canRefresh.Store(false) + if tr, er := c.getToken(ctx, resource); er == nil { + return tr, nil + } + } + ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err + } + } + return c.getToken(ctx, resource) +} + +func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) { + switch c.source { + case AzureArc: + return c.acquireTokenForAzureArc(ctx, resource) + case AzureML: + return c.acquireTokenForAzureML(ctx, resource) + case CloudShell: + return c.acquireTokenForCloudShell(ctx, resource) + case DefaultToIMDS: + return c.acquireTokenForIMDS(ctx, resource) + case AppService: + return c.acquireTokenForAppService(ctx, resource) + case ServiceFabric: + return c.acquireTokenForServiceFabric(ctx, resource) + default: + return AuthResult{}, fmt.Errorf("unsupported source %q", c.source) + } +} + +func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAppServiceAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) { + req, err := createIMDSAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) { + req, err := createCloudShellAuthRequest(ctx, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAzureMLAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) { + req, err := createServiceFabricAuthRequest(ctx, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAzureArcAuthRequest(ctx, resource, "") + if err != nil { + return AuthResult{}, err + } + + response, err := c.httpClient.Do(req) + if err != nil { + return AuthResult{}, err + } + defer response.Body.Close() + + if response.StatusCode != http.StatusUnauthorized { + return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode) + } + + secret, err := c.getAzureArcSecretKey(response, runtime.GOOS) + if err != nil { + return AuthResult{}, err + } + + secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret)) + if err != nil { + return AuthResult{}, err + } + + tokenResponse, err := c.getTokenForRequest(secondRequest, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) { + if cacheManager == nil { + return AuthResult{}, errors.New("cache instance is nil") + } + account, err := cacheManager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + // if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away + if token.RefreshOn.T.IsZero() { + if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour { + token.RefreshOn.T = time.Now().Add(lifetime / 2) + } + } + ar, err := base.NewAuthResult(token, account) + if err != nil { + return AuthResult{}, err + } + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err +} + +// contains checks if the element is present in the list. +func contains[T comparable](list []T, element T) bool { + for _, v := range list { + if v == element { + return true + } + } + return false +} + +// retry performs an HTTP request with retries based on the provided options. +func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + for attempt := 0; attempt < maxRetries; attempt++ { + tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute) + defer tryCancel() + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + cloneReq := req.Clone(tryCtx) + resp, err = c.httpClient.Do(cloneReq) + retrylist := retryStatusCodes + if c.source == DefaultToIMDS { + retrylist = retryCodesForIMDS + } + if err == nil && !contains(retrylist, resp.StatusCode) { + return resp, nil + } + select { + case <-time.After(time.Second): + case <-req.Context().Done(): + err = req.Context().Err() + return resp, err + } + } + return resp, err +} + +func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) { + r := accesstokens.TokenResponse{} + var resp *http.Response + var err error + + if c.retryPolicyEnabled { + resp, err = c.retry(defaultRetryCount, req) + } else { + resp, err = c.httpClient.Do(req) + } + if err != nil { + return r, err + } + responseBytes, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return r, err + } + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + default: + sd := strings.TrimSpace(string(responseBytes)) + if sd != "" { + return r, errors.CallErr{ + Req: req, + Resp: resp, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", + req.URL.String(), + req.Method, + resp.StatusCode, + sd), + } + } + return r, errors.CallErr{ + Req: req, + Resp: resp, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode), + } + } + + err = json.Unmarshal(responseBytes, &r) + if err != nil { + return r, errors.InvalidJsonErr{ + Err: fmt.Errorf("error parsing the json error: %s", err), + } + } + r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource) + + return r, err +} + +func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar)) + q := req.URL.Query() + q.Set("api-version", appServiceAPIVersion) + q.Set("resource", resource) + switch t := id.(type) { + case UserAssignedClientID: + q.Set(miQueryParameterClientId, string(t)) + case UserAssignedResourceID: + q.Set(miQueryParameterResourceId, string(t)) + case UserAssignedObjectID: + q.Set(miQueryParameterObjectId, string(t)) + case systemAssignedValue: + default: + return nil, fmt.Errorf("unsupported type %T", id) + } + req.URL.RawQuery = q.Encode() + return req, nil +} + +func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + msiEndpoint, err := url.Parse(imdsDefaultEndpoint) + if err != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err) + } + msiParameters := msiEndpoint.Query() + msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion) + msiParameters.Set(resourceQueryParameterName, resource) + + switch t := id.(type) { + case UserAssignedClientID: + msiParameters.Set(miQueryParameterClientId, string(t)) + case UserAssignedResourceID: + msiParameters.Set(miQueryParameterResourceIdIMDS, string(t)) + case UserAssignedObjectID: + msiParameters.Set(miQueryParameterObjectId, string(t)) + case systemAssignedValue: // not adding anything + default: + return nil, fmt.Errorf("unsupported type %T", id) + } + + msiEndpoint.RawQuery = msiParameters.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + req.Header.Set(metaHTTPHeaderName, "true") + return req, nil +} + +func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + if identityEndpoint == "" { + identityEndpoint = azureArcEndpoint + } + msiEndpoint, parseErr := url.Parse(identityEndpoint) + + if parseErr != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr) + } + + msiParameters := msiEndpoint.Query() + msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion) + msiParameters.Set(resourceQueryParameterName, resource) + + msiEndpoint.RawQuery = msiParameters.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + req.Header.Set(metaHTTPHeaderName, "true") + + if key != "" { + req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + } + + return req, nil +} + +func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool { + if identityEndpoint != "" && imdsEndpoint != "" { + return true + } + himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS) + if himdsFilePath != "" { + if _, err := os.Stat(himdsFilePath); err == nil { + return true + } + } + return false +} + +func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) { + wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName) + + if len(wwwAuthenticateHeader) == 0 { + return "", errors.New("response has no www-authenticate header") + } + + // check if the platform is supported + expectedSecretFilePath := getAzureArcPlatformPath(platform) + if expectedSecretFilePath == "" { + return "", errors.New("platform not supported, expected linux or windows") + } + + parts := strings.Split(wwwAuthenticateHeader, "Basic realm=") + if len(parts) < 2 { + return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader) + } + + secretFilePath := parts + + // check that the file in the file path is a .key file + fileName := filepath.Base(secretFilePath[1]) + if !strings.HasSuffix(fileName, azureArcFileExtension) { + return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName)) + } + + // check that file path from header matches the expected file path for the platform + if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) { + return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1])) + } + + fileInfo, err := os.Stat(secretFilePath[1]) + if err != nil { + return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err) + } + + // Throw an error if the secret file's size is greater than 4096 bytes + if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes { + return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s) + } + + // Attempt to read the contents of the secret file + secret, err := os.ReadFile(secretFilePath[1]) + if err != nil { + return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err) + } + + return string(secret), nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go new file mode 100644 index 0000000000..535065e9d9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "net/http" + "os" +) + +func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar)) + q := req.URL.Query() + q.Set("api-version", serviceFabricAPIVersion) + q.Set("resource", resource) + req.URL.RawQuery = q.Encode() + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 392e5e43f7..7beed26174 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") // clientOptions configures the Client's behavior. @@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s if err != nil { return AuthResult{}, err } - return pca.base.AuthResultFromToken(ctx, authParams, token, true) + return pca.base.AuthResultFromToken(ctx, authParams, token) } type DeviceCodeResult = accesstokens.DeviceCodeResult @@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error if err != nil { return AuthResult{}, err } - return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) + return d.client.base.AuthResultFromToken(ctx, d.authParams, token) } // acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode @@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, return AuthResult{}, err } - return pca.base.AuthResultFromToken(ctx, authParams, token, true) + return pca.base.AuthResultFromToken(ctx, authParams, token) } type interactiveAuthResult struct { diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 64ae168931..70b50206e6 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,72 @@ # Change Log +## [v1.144.0] - 2025-04-24 + +- #818 - @dweinshenker - Support Valkey in DatabaseOptions + +## [v1.143.0] - 2025-04-22 + +- #815 - @StephenVarela - Support Load Balancers tls-cipher-policy + +## [v1.142.0] - 2025-03-27 + +- #813 - @lfundaro-do - partner-network-connect: fix typo +- #811 - @lfundaro-do - fix partner attachment rename +- #810 - @apinonformoso - VPC-4359: remove custom unmarshaler for PNCs +- #809 - @apinonformoso - hotfix: json field name +- #808 - @apinonformoso - fix partner network connect json tags +- #807 - @bentranter - Bump Go version to v1.23 + +## [v1.141.0] - 2025-03-20 + +- #805 - @singhsaubhikdo - BLOCK-4316: Adds region param in ListSnapshot for resource type volume +- #802 - @apinonformoso - VPC-4312: rename partner interconnect attachment to partner network connect +- #774 - @blesswinsamuel - APPS-10284 Remove "closed beta" note in archive feature to prep for GA release +- #797 - @kperath - add support for cluster status messages + +## [v1.140.0] - 2025-03-14 + +- #800 - @lee-aaron - support Spaces Keys GET by Access Key ID + +## [v1.139.0] - 2025-03-12 + +- #798 - @dylanrhysscott - Fix: Update godo to use simplified template response and provide consistent struct naming +- #796 - @apinonformoso - fix partner interconnect attachment json request response +- #795 - @dylanrhysscott - CON-11904 Ensure taints are correctly returned via node template endpoint +- #794 - @brunograsselli - Update partner interconnect attachment comments +- #793 - @apinonformoso - add auth_key field +- #789 - @guptado - [VPC-3917] Update get service key response model + +## [v1.138.0] - 2025-02-18 + +- #785 - @guptado - Support partner interconnect GetBgpAuthKey and RegenerateServiceKey operations +- #787 - @andrewsomething - ci: upgrade to actions/cache@v4 +- #786 - @m3co-code - add flags for doks routing-agent plugin +- #784 - @asaha2 - Support name and id filters for list op + +## [v1.137.0] - 2025-02-12 + +- #782 - @apinonformoso - fix partner interconnect json tag +- #781 - @dylanrhysscott - CON-11810 Implement GetNodePoolTemplate endpoint for DOKS godo client + +## [v1.136.0] - 2025-01-28 + +- #776 - @danaelhe - Databases: Support online-migrations +- #777 - @apinonformoso - update bgp to be a pointer + +## [v1.135.0] - 2025-01-27 +- #766 - @dhij - kubernetes: add cluster autoscaler config +- #775 - @jvasilevsky - LBASA-3620: add network_stack field to load balancers model +- #773 - @blesswinsamuel - Add field to customize the offline page during app maintenance + +## [v1.134.0] - 2025-01-15 +- #771 - @d-honeybadger - add ID field to KubernetesClusterUser response +- #768 - @lee-aaron - support Spaces Keys API + +## [v1.133.0] - 2025-01-10 +- #769 - @guptado - support partner interconnect attachment operations +- #767 - @loosla - [kubernetes]: make kubernetes maintenance_policy day case insensitive + ## [v1.132.0] - 2024-12-17 - #764 - @greeshmapill - APPS-9365: Add bitbucket source to App Spec diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 63457cda44..99fc2e90ce 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -468,8 +468,10 @@ type AppLogDestinationSpecPapertrail struct { type AppMaintenanceSpec struct { // Indicates whether maintenance mode should be enabled for the app. Enabled bool `json:"enabled,omitempty"` - // Indicates whether the app should be archived. Setting this to true implies that enabled is set to true. Note that this feature is currently in closed beta. + // Indicates whether the app should be archived. Setting this to true implies that enabled is set to true. Archive bool `json:"archive,omitempty"` + // A custom offline page to display when maintenance mode is enabled or the app is archived. + OfflinePageURL string `json:"offline_page_url,omitempty"` } // AppRouteSpec struct for AppRouteSpec diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index 4d9a214ae3..9a5bf60122 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -1453,6 +1453,14 @@ func (a *AppMaintenanceSpec) GetEnabled() bool { return a.Enabled } +// GetOfflinePageURL returns the OfflinePageURL field. +func (a *AppMaintenanceSpec) GetOfflinePageURL() string { + if a == nil { + return "" + } + return a.OfflinePageURL +} + // GetAppID returns the AppID field. func (a *AppProposeRequest) GetAppID() string { if a == nil { diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 1217ef05e8..3a09fd72ec 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -42,6 +42,8 @@ const ( databaseIndexPath = databaseBasePath + "/%s/indexes/%s" databaseLogsinkPath = databaseBasePath + "/%s/logsink/%s" databaseLogsinksPath = databaseBasePath + "/%s/logsink" + databaseOnlineMigrationsPath = databaseBasePath + "/%s/online-migration" + databaseOnlineMigrationPath = databaseBasePath + "/%s/online-migration/%s" ) // SQL Mode constants allow for MySQL-specific SQL flavor configuration. @@ -179,6 +181,9 @@ type DatabasesService interface { ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) + StartOnlineMigration(ctx context.Context, databaseID string, onlineMigrationRequest *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error) + StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error) + GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -366,6 +371,13 @@ type DatabaseLogsink struct { Config *DatabaseLogsinkConfig `json:"config,omitempty"` } +// DatabaseOnlineMigrationStatus represents an online migration status +type DatabaseOnlineMigrationStatus struct { + ID string `json:"id"` + Status string `json:"status"` + CreatedAt string `json:"created_at"` +} + // TopicPartition represents the state of a Kafka topic partition type TopicPartition struct { EarliestOffset uint64 `json:"earliest_offset,omitempty"` @@ -515,6 +527,13 @@ type DatabaseFirewallRule struct { CreatedAt time.Time `json:"created_at"` } +// DatabaseStartOnlineMigrationRequest is used to start an online migration for a database cluster +type DatabaseStartOnlineMigrationRequest struct { + Source *DatabaseOnlineMigrationConfig `json:"source"` + DisableSSL bool `json:"disable_ssl,omitempty"` + IgnoreDBs []string `json:"ignore_dbs,omitempty"` +} + // DatabaseCreateLogsinkRequest is used to create logsink for a database cluster type DatabaseCreateLogsinkRequest struct { Name string `json:"sink_name"` @@ -544,6 +563,15 @@ type DatabaseLogsinkConfig struct { Cert string `json:"cert,omitempty"` } +// DatabaseOnlineMigrationConfig represents the configuration options for database online migrations. +type DatabaseOnlineMigrationConfig struct { + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + DatabaseName string `json:"dbname,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` +} + // PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. type PostgreSQLConfig struct { AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"` @@ -871,6 +899,7 @@ type DatabaseOptions struct { RedisOptions DatabaseEngineOptions `json:"redis"` KafkaOptions DatabaseEngineOptions `json:"kafka"` OpensearchOptions DatabaseEngineOptions `json:"opensearch"` + ValkeyOptions DatabaseEngineOptions `json:"valkey"` } // DatabaseEngineOptions represents the configuration options that are available for a given database engine @@ -1975,3 +2004,50 @@ func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, lo } return resp, nil } + +// StartOnlineMigration starts an online migration for a database. Migrating a cluster establishes a connection with an existing cluster +// and replicates its contents to the target cluster. Online migration is only available for MySQL, PostgreSQL, and Redis clusters. +func (svc *DatabasesServiceOp) StartOnlineMigration(ctx context.Context, databaseID string, onlineMigration *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error) { + path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, onlineMigration) + if err != nil { + return nil, nil, err + } + + root := new(DatabaseOnlineMigrationStatus) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + +// GetOnlineMigrationStatus retrieves the status of the most recent online migration +func (svc *DatabasesServiceOp) GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error) { + path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DatabaseOnlineMigrationStatus) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + +// StopOnlineMigration stops an online migration +func (svc *DatabasesServiceOp) StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error) { + path := fmt.Sprintf(databaseOnlineMigrationPath, databaseID, migrationID) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 2469c14a08..2aedbcdb62 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.132.0" + libraryVersion = "1.144.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -88,11 +88,13 @@ type Client struct { ReservedIPV6Actions ReservedIPV6ActionsService Sizes SizesService Snapshots SnapshotsService + SpacesKeys SpacesKeysService Storage StorageService StorageActions StorageActionsService Tags TagsService UptimeChecks UptimeChecksService VPCs VPCsService + PartnerAttachment PartnerAttachmentService // Optional function called after every successful request made to the DO APIs onRequestCompleted RequestCompletionCallback @@ -302,11 +304,13 @@ func NewClient(httpClient *http.Client) *Client { c.ReservedIPV6Actions = &ReservedIPV6ActionsServiceOp{client: c} c.Sizes = &SizesServiceOp{client: c} c.Snapshots = &SnapshotsServiceOp{client: c} + c.SpacesKeys = &SpacesKeysServiceOp{client: c} c.Storage = &StorageServiceOp{client: c} c.StorageActions = &StorageActionsServiceOp{client: c} c.Tags = &TagsServiceOp{client: c} c.UptimeChecks = &UptimeChecksServiceOp{client: c} c.VPCs = &VPCsServiceOp{client: c} + c.PartnerAttachment = &PartnerAttachmentServiceOp{client: c} c.headers = make(map[string]string) diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 9b3bcfa1a6..9d97432172 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -40,6 +40,7 @@ type KubernetesService interface { CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) + GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) // RecycleNodePoolNodes is DEPRECATED please use DeleteNode @@ -54,6 +55,8 @@ type KubernetesService interface { RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error) GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error) + + GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error) } var _ KubernetesService = &KubernetesServiceOp{} @@ -78,20 +81,24 @@ type KubernetesClusterCreateRequest struct { NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"` - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"` - AutoUpgrade bool `json:"auto_upgrade"` - SurgeUpgrade bool `json:"surge_upgrade"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"` + AutoUpgrade bool `json:"auto_upgrade"` + SurgeUpgrade bool `json:"surge_upgrade"` + ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"` + RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"` } // KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. type KubernetesClusterUpdateRequest struct { - Name string `json:"name,omitempty"` - Tags []string `json:"tags,omitempty"` - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` - AutoUpgrade *bool `json:"auto_upgrade,omitempty"` - SurgeUpgrade bool `json:"surge_upgrade,omitempty"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + Name string `json:"name,omitempty"` + Tags []string `json:"tags,omitempty"` + MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` + AutoUpgrade *bool `json:"auto_upgrade,omitempty"` + SurgeUpgrade bool `json:"surge_upgrade,omitempty"` + ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"` + RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"` // Convert cluster to run highly available control plane HA *bool `json:"ha,omitempty"` @@ -187,6 +194,19 @@ type KubernetesGetClusterlintRequest struct { RunId string `json:"run_id"` } +type clusterStatusMessagesRoot struct { + Messages []*KubernetesClusterStatusMessage `json:"messages"` +} + +type KubernetesClusterStatusMessage struct { + Message string `json:"message"` + Timestamp time.Time `json:"timestamp"` +} + +type KubernetesGetClusterStatusMessagesRequest struct { + Since *time.Time `json:"since"` +} + // KubernetesCluster represents a Kubernetes cluster. type KubernetesCluster struct { ID string `json:"id,omitempty"` @@ -205,11 +225,13 @@ type KubernetesCluster struct { NodePools []*KubernetesNodePool `json:"node_pools,omitempty"` - MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` - AutoUpgrade bool `json:"auto_upgrade,omitempty"` - SurgeUpgrade bool `json:"surge_upgrade,omitempty"` - RegistryEnabled bool `json:"registry_enabled,omitempty"` - ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` + AutoUpgrade bool `json:"auto_upgrade,omitempty"` + SurgeUpgrade bool `json:"surge_upgrade,omitempty"` + RegistryEnabled bool `json:"registry_enabled,omitempty"` + ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"` + ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"` + RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"` Status *KubernetesClusterStatus `json:"status,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` @@ -223,6 +245,7 @@ func (kc KubernetesCluster) URN() string { // KubernetesClusterUser represents a Kubernetes cluster user. type KubernetesClusterUser struct { + ID string `json:"id,omitempty"` Username string `json:"username,omitempty"` Groups []string `json:"groups,omitempty"` } @@ -251,6 +274,17 @@ type KubernetesControlPlaneFirewall struct { AllowedAddresses []string `json:"allowed_addresses"` } +// KubernetesRoutingAgent represents information about the routing-agent cluster plugin. +type KubernetesRoutingAgent struct { + Enabled *bool `json:"enabled"` +} + +// KubernetesClusterAutoscalerConfiguration represents Kubernetes cluster autoscaler configuration. +type KubernetesClusterAutoscalerConfiguration struct { + ScaleDownUtilizationThreshold *float64 `json:"scale_down_utilization_threshold"` + ScaleDownUnneededTime *string `json:"scale_down_unneeded_time"` +} + // KubernetesMaintenancePolicyDay represents the possible days of a maintenance // window type KubernetesMaintenancePolicyDay int @@ -315,7 +349,7 @@ var ( // KubernetesMaintenanceToDay returns the appropriate KubernetesMaintenancePolicyDay for the given string. func KubernetesMaintenanceToDay(day string) (KubernetesMaintenancePolicyDay, error) { - d, ok := toDay[day] + d, ok := toDay[strings.ToLower(day)] if !ok { return 0, fmt.Errorf("unknown day: %q", day) } @@ -416,6 +450,20 @@ type KubernetesNodePool struct { Nodes []*KubernetesNode `json:"nodes,omitempty"` } +// KubernetesNodePool represents the node pool template data for a given pool. +type KubernetesNodePoolTemplate struct { + Template *KubernetesNodeTemplate +} + +// KubernetesNodePoolResources represents the resources within a given template for a node pool +// This follows https://pkg.go.dev/k8s.io/kubernetes@v1.32.1/pkg/scheduler/framework#Resource to represent +// node resources within the node object. +type KubernetesNodePoolResources struct { + CPU int64 `json:"cpu,omitempty"` + Memory string `json:"memory,omitempty"` + Pods int64 `json:"pods,omitempty"` +} + // KubernetesNode represents a Node in a node pool in a Kubernetes cluster. type KubernetesNode struct { ID string `json:"id,omitempty"` @@ -427,6 +475,17 @@ type KubernetesNode struct { UpdatedAt time.Time `json:"updated_at,omitempty"` } +// KubernetesNodeTemplate represents a template in a node pool in a Kubernetes cluster. +type KubernetesNodeTemplate struct { + ClusterUUID string `json:"cluster_uuid,omitempty"` + Name string `json:"name,omitempty"` + Slug string `json:"slug,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Taints []string `json:"taints,omitempty"` + Capacity *KubernetesNodePoolResources `json:"capacity,omitempty"` + Allocatable *KubernetesNodePoolResources `json:"allocatable,omitempty"` +} + // KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster. type KubernetesNodeStatus struct { State string `json:"state,omitempty"` @@ -794,6 +853,24 @@ func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, pool return root.NodePool, resp, nil } +// GetNodePoolTemplate retrieves the template used for a given node pool to scale up from zero. +func (svc *KubernetesServiceOp) GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error) { + path, err := url.JoinPath(kubernetesClustersPath, clusterID, "node_pools_template", nodePoolName) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(KubernetesNodePoolTemplate) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + // ListNodePools lists all the node pools found in a Kubernetes cluster. func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) { path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID) @@ -980,3 +1057,28 @@ func (svc *KubernetesServiceOp) GetClusterlintResults(ctx context.Context, clust } return root.Diagnostics, resp, nil } + +func (svc *KubernetesServiceOp) GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error) { + path := fmt.Sprintf("%s/%s/status_messages", kubernetesClustersPath, clusterID) + + if req != nil { + v := make(url.Values) + if req.Since != nil { + v.Set("since", req.Since.Format(time.RFC3339)) + } + if query := v.Encode(); query != "" { + path = path + "?" + query + } + } + + request, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(clusterStatusMessagesRoot) + resp, err := svc.client.Do(ctx, request, root) + if err != nil { + return nil, resp, err + } + return root.Messages, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go index a12729dd63..11d8d35bc3 100644 --- a/vendor/github.com/digitalocean/godo/load_balancers.go +++ b/vendor/github.com/digitalocean/godo/load_balancers.go @@ -22,6 +22,14 @@ const ( // Load Balancer network types LoadBalancerNetworkTypeExternal = "EXTERNAL" LoadBalancerNetworkTypeInternal = "INTERNAL" + + // Load Balancer network_stack types + LoadBalancerNetworkStackIPv4 = "IPV4" + LoadBalancerNetworkStackDualstack = "DUALSTACK" + + // Supported TLS Cipher policies + LoadBalancerTLSCipherPolicyDefault = "DEFAULT" + LoadBalancerTLSCipherPolicyStrong = "STRONG" ) // LoadBalancersService is an interface for managing load balancers with the DigitalOcean API. @@ -29,6 +37,8 @@ const ( type LoadBalancersService interface { Get(context.Context, string) (*LoadBalancer, *Response, error) List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error) + ListByNames(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error) + ListByUUIDs(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error) Create(context.Context, *LoadBalancerRequest) (*LoadBalancer, *Response, error) Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) Delete(ctx context.Context, lbID string) (*Response, error) @@ -74,6 +84,8 @@ type LoadBalancer struct { GLBSettings *GLBSettings `json:"glb_settings,omitempty"` TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` Network string `json:"network,omitempty"` + NetworkStack string `json:"network_stack,omitempty"` + TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"` } // String creates a human-readable description of a LoadBalancer. @@ -108,6 +120,8 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest { HTTPIdleTimeoutSeconds: l.HTTPIdleTimeoutSeconds, TargetLoadBalancerIDs: append([]string(nil), l.TargetLoadBalancerIDs...), Network: l.Network, + NetworkStack: l.NetworkStack, + TLSCipherPolicy: l.TLSCipherPolicy, } if l.DisableLetsEncryptDNSRecords != nil { @@ -247,6 +261,8 @@ type LoadBalancerRequest struct { GLBSettings *GLBSettings `json:"glb_settings,omitempty"` TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"` Network string `json:"network,omitempty"` + NetworkStack string `json:"network_stack,omitempty"` + TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"` } // String creates a human-readable description of a LoadBalancerRequest. @@ -396,6 +412,72 @@ func (l *LoadBalancersServiceOp) List(ctx context.Context, opt *ListOptions) ([] return root.LoadBalancers, resp, err } +// ListByNames lists load balancers filtered by resource names, with optional pagination. +func (l *LoadBalancersServiceOp) ListByNames(ctx context.Context, names []string, opt *ListOptions) ([]LoadBalancer, *Response, error) { + path, err := addOptions(loadBalancersBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + q := req.URL.Query() + for _, name := range names { + q.Add("names", name) + } + req.URL.RawQuery = q.Encode() + + root := new(loadBalancersRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.LoadBalancers, resp, err +} + +// ListByUUIDs lists load balancers filtered by resource UUIDs, with optional pagination. +func (l *LoadBalancersServiceOp) ListByUUIDs(ctx context.Context, uuids []string, opt *ListOptions) ([]LoadBalancer, *Response, error) { + path, err := addOptions(loadBalancersBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + q := req.URL.Query() + for _, uuid := range uuids { + q.Add("uuids", uuid) + } + req.URL.RawQuery = q.Encode() + + root := new(loadBalancersRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.LoadBalancers, resp, err +} + // Create a new load balancer with a given configuration. func (l *LoadBalancersServiceOp) Create(ctx context.Context, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) { req, err := l.client.NewRequest(ctx, http.MethodPost, loadBalancersBasePath, lbr) diff --git a/vendor/github.com/digitalocean/godo/partner_network_connect.go b/vendor/github.com/digitalocean/godo/partner_network_connect.go new file mode 100644 index 0000000000..37f508cc87 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/partner_network_connect.go @@ -0,0 +1,415 @@ +package godo + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" +) + +const partnerNetworkConnectBasePath = "/v2/partner_network_connect/attachments" + +// PartnerAttachmentService is an interface for managing Partner Attachments with the +// DigitalOcean API. +// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/PartnerNetworkConnect +type PartnerAttachmentService interface { + List(context.Context, *ListOptions) ([]*PartnerAttachment, *Response, error) + Create(context.Context, *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error) + Get(context.Context, string) (*PartnerAttachment, *Response, error) + Update(context.Context, string, *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error) + Delete(context.Context, string) (*Response, error) + GetServiceKey(context.Context, string) (*ServiceKey, *Response, error) + SetRoutes(context.Context, string, *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error) + ListRoutes(context.Context, string, *ListOptions) ([]*RemoteRoute, *Response, error) + GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error) + RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error) +} + +var _ PartnerAttachmentService = &PartnerAttachmentServiceOp{} + +// PartnerAttachmentServiceOp interfaces with the Partner Attachment endpoints in the DigitalOcean API. +type PartnerAttachmentServiceOp struct { + client *Client +} + +// PartnerAttachmentCreateRequest represents a request to create a Partner Attachment. +type PartnerAttachmentCreateRequest struct { + // Name is the name of the Partner Attachment + Name string `json:"name,omitempty"` + // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps + ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"` + // Region is the region where the Partner Attachment is created + Region string `json:"region,omitempty"` + // NaaSProvider is the name of the Network as a Service provider + NaaSProvider string `json:"naas_provider,omitempty"` + // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to + VPCIDs []string `json:"vpc_ids,omitempty"` + // BGP is the BGP configuration of the Partner Attachment + BGP BGP `json:"bgp,omitempty"` +} + +type partnerAttachmentRequestBody struct { + // Name is the name of the Partner Attachment + Name string `json:"name,omitempty"` + // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps + ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"` + // Region is the region where the Partner Attachment is created + Region string `json:"region,omitempty"` + // NaaSProvider is the name of the Network as a Service provider + NaaSProvider string `json:"naas_provider,omitempty"` + // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to + VPCIDs []string `json:"vpc_ids,omitempty"` + // BGP is the BGP configuration of the Partner Attachment + BGP *BGPInput `json:"bgp,omitempty"` +} + +func (req *PartnerAttachmentCreateRequest) buildReq() *partnerAttachmentRequestBody { + request := &partnerAttachmentRequestBody{ + Name: req.Name, + ConnectionBandwidthInMbps: req.ConnectionBandwidthInMbps, + Region: req.Region, + NaaSProvider: req.NaaSProvider, + VPCIDs: req.VPCIDs, + } + + if req.BGP != (BGP{}) { + request.BGP = &BGPInput{ + LocalASN: req.BGP.LocalASN, + LocalRouterIP: req.BGP.LocalRouterIP, + PeerASN: req.BGP.PeerASN, + PeerRouterIP: req.BGP.PeerRouterIP, + AuthKey: req.BGP.AuthKey, + } + } + + return request +} + +// PartnerAttachmentUpdateRequest represents a request to update a Partner Attachment. +type PartnerAttachmentUpdateRequest struct { + // Name is the name of the Partner Attachment + Name string `json:"name,omitempty"` + //VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to + VPCIDs []string `json:"vpc_ids,omitempty"` +} + +type PartnerAttachmentSetRoutesRequest struct { + // Routes is the list of routes to be used for the Partner Attachment + Routes []string `json:"routes,omitempty"` +} + +// BGP represents the BGP configuration of a Partner Attachment. +type BGP struct { + // LocalASN is the local ASN + LocalASN int `json:"local_asn,omitempty"` + // LocalRouterIP is the local router IP + LocalRouterIP string `json:"local_router_ip,omitempty"` + // PeerASN is the peer ASN + PeerASN int `json:"peer_asn,omitempty"` + // PeerRouterIP is the peer router IP + PeerRouterIP string `json:"peer_router_ip,omitempty"` + // AuthKey is the authentication key + AuthKey string `json:"auth_key,omitempty"` +} + +func (b *BGP) UnmarshalJSON(data []byte) error { + type Alias BGP + aux := &struct { + LocalASN *int `json:"local_asn,omitempty"` + LocalRouterASN *int `json:"local_router_asn,omitempty"` + PeerASN *int `json:"peer_asn,omitempty"` + PeerRouterASN *int `json:"peer_router_asn,omitempty"` + *Alias + }{ + Alias: (*Alias)(b), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux.LocalASN != nil { + b.LocalASN = *aux.LocalASN + } else if aux.LocalRouterASN != nil { + b.LocalASN = *aux.LocalRouterASN + } + + if aux.PeerASN != nil { + b.PeerASN = *aux.PeerASN + } else if aux.PeerRouterASN != nil { + b.PeerASN = *aux.PeerRouterASN + } + return nil +} + +// BGPInput represents the BGP configuration of a Partner Attachment. +type BGPInput struct { + // LocalASN is the local ASN + LocalASN int `json:"local_router_asn,omitempty"` + // LocalRouterIP is the local router IP + LocalRouterIP string `json:"local_router_ip,omitempty"` + // PeerASN is the peer ASN + PeerASN int `json:"peer_router_asn,omitempty"` + // PeerRouterIP is the peer router IP + PeerRouterIP string `json:"peer_router_ip,omitempty"` + // AuthKey is the authentication key + AuthKey string `json:"auth_key,omitempty"` +} + +// ServiceKey represents the service key of a Partner Attachment. +type ServiceKey struct { + Value string `json:"value,omitempty"` + State string `json:"state,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` +} + +// RemoteRoute represents a route for a Partner Attachment. +type RemoteRoute struct { + // ID is the generated ID of the Route + ID string `json:"id,omitempty"` + // Cidr is the CIDR of the route + Cidr string `json:"cidr,omitempty"` +} + +// PartnerAttachment represents a DigitalOcean Partner Attachment. +type PartnerAttachment struct { + // ID is the generated ID of the Partner Attachment + ID string `json:"id,omitempty"` + // Name is the name of the Partner Attachment + Name string `json:"name,omitempty"` + // State is the state of the Partner Attachment + State string `json:"state,omitempty"` + // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps + ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"` + // Region is the region where the Partner Attachment is created + Region string `json:"region,omitempty"` + // NaaSProvider is the name of the Network as a Service provider + NaaSProvider string `json:"naas_provider,omitempty"` + // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to + VPCIDs []string `json:"vpc_ids,omitempty"` + // BGP is the BGP configuration of the Partner Attachment + BGP BGP `json:"bgp,omitempty"` + // CreatedAt is time when this Partner Attachment was first created + CreatedAt time.Time `json:"created_at,omitempty"` +} + +type partnerNetworkConnectAttachmentRoot struct { + PartnerAttachment *PartnerAttachment `json:"partner_attachment"` +} + +type partnerNetworkConnectAttachmentsRoot struct { + PartnerAttachments []*PartnerAttachment `json:"partner_attachments"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type serviceKeyRoot struct { + ServiceKey *ServiceKey `json:"service_key"` +} + +type remoteRoutesRoot struct { + RemoteRoutes []*RemoteRoute `json:"remote_routes"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type BgpAuthKey struct { + Value string `json:"value"` +} + +type bgpAuthKeyRoot struct { + BgpAuthKey *BgpAuthKey `json:"bgp_auth_key"` +} + +type RegenerateServiceKey struct { +} + +type regenerateServiceKeyRoot struct { + RegenerateServiceKey *RegenerateServiceKey `json:"-"` +} + +// List returns a list of all Partner Attachment, with optional pagination. +func (s *PartnerAttachmentServiceOp) List(ctx context.Context, opt *ListOptions) ([]*PartnerAttachment, *Response, error) { + path, err := addOptions(partnerNetworkConnectBasePath, opt) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(partnerNetworkConnectAttachmentsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + return root.PartnerAttachments, resp, nil +} + +// Create creates a new Partner Attachment. +func (s *PartnerAttachmentServiceOp) Create(ctx context.Context, create *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error) { + path := partnerNetworkConnectBasePath + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, create.buildReq()) + if err != nil { + return nil, nil, err + } + + root := new(partnerNetworkConnectAttachmentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.PartnerAttachment, resp, nil +} + +// Get returns the details of a Partner Attachment. +func (s *PartnerAttachmentServiceOp) Get(ctx context.Context, id string) (*PartnerAttachment, *Response, error) { + path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(partnerNetworkConnectAttachmentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.PartnerAttachment, resp, nil +} + +// Update updates a Partner Attachment properties. +func (s *PartnerAttachmentServiceOp) Update(ctx context.Context, id string, update *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error) { + path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodPatch, path, update) + if err != nil { + return nil, nil, err + } + + root := new(partnerNetworkConnectAttachmentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.PartnerAttachment, resp, nil +} + +// Delete deletes a Partner Attachment. +func (s *PartnerAttachmentServiceOp) Delete(ctx context.Context, id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +func (s *PartnerAttachmentServiceOp) GetServiceKey(ctx context.Context, id string) (*ServiceKey, *Response, error) { + path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(serviceKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.ServiceKey, resp, nil +} + +// ListRoutes lists all remote routes for a Partner Attachment. +func (s *PartnerAttachmentServiceOp) ListRoutes(ctx context.Context, id string, opt *ListOptions) ([]*RemoteRoute, *Response, error) { + path, err := addOptions(fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id), opt) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(remoteRoutesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.RemoteRoutes, resp, nil +} + +// SetRoutes updates specific properties of a Partner Attachment. +func (s *PartnerAttachmentServiceOp) SetRoutes(ctx context.Context, id string, set *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error) { + path := fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodPut, path, set) + if err != nil { + return nil, nil, err + } + + root := new(partnerNetworkConnectAttachmentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.PartnerAttachment, resp, nil +} + +// GetBGPAuthKey returns Partner Attachment bgp auth key +func (s *PartnerAttachmentServiceOp) GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error) { + path := fmt.Sprintf("%s/%s/bgp_auth_key", partnerNetworkConnectBasePath, iaID) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(bgpAuthKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.BgpAuthKey, resp, nil +} + +// RegenerateServiceKey regenerates the service key of a Partner Attachment. +func (s *PartnerAttachmentServiceOp) RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error) { + path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, iaID) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(regenerateServiceKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.RegenerateServiceKey, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/snapshots.go b/vendor/github.com/digitalocean/godo/snapshots.go index 13a06ca316..31fd494c4c 100644 --- a/vendor/github.com/digitalocean/godo/snapshots.go +++ b/vendor/github.com/digitalocean/godo/snapshots.go @@ -14,6 +14,7 @@ const snapshotBasePath = "v2/snapshots" type SnapshotsService interface { List(context.Context, *ListOptions) ([]Snapshot, *Response, error) ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error) + ListVolumeSnapshotByRegion(context.Context, string, *ListOptions) ([]Snapshot, *Response, error) ListDroplet(context.Context, *ListOptions) ([]Snapshot, *Response, error) Get(context.Context, string) (*Snapshot, *Response, error) Delete(context.Context, string) (*Response, error) @@ -52,6 +53,7 @@ type snapshotsRoot struct { type listSnapshotOptions struct { ResourceType string `url:"resource_type,omitempty"` + Region string `url:"region,omitempty"` } func (s Snapshot) String() string { @@ -75,6 +77,12 @@ func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ( return s.list(ctx, opt, &listOpt) } +// ListVolumeSnapshotByRegion lists all the volume snapshot for given region +func (s *SnapshotsServiceOp) ListVolumeSnapshotByRegion(ctx context.Context, region string, opt *ListOptions) ([]Snapshot, *Response, error) { + listOpt := listSnapshotOptions{ResourceType: "volume", Region: region} + return s.list(ctx, opt, &listOpt) +} + // Get retrieves a snapshot by id. func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) { return s.get(ctx, snapshotID) diff --git a/vendor/github.com/digitalocean/godo/spaces_keys.go b/vendor/github.com/digitalocean/godo/spaces_keys.go new file mode 100644 index 0000000000..8aee31dbba --- /dev/null +++ b/vendor/github.com/digitalocean/godo/spaces_keys.go @@ -0,0 +1,186 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const spacesKeysBasePath = "v2/spaces/keys" + +// SpacesKeysService is an interface for managing Spaces keys with the DigitalOcean API. +type SpacesKeysService interface { + List(context.Context, *ListOptions) ([]*SpacesKey, *Response, error) + Update(context.Context, string, *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error) + Create(context.Context, *SpacesKeyCreateRequest) (*SpacesKey, *Response, error) + Delete(context.Context, string) (*Response, error) + Get(context.Context, string) (*SpacesKey, *Response, error) +} + +// SpacesKeysServiceOp handles communication with the Spaces key related methods of the +// DigitalOcean API. +type SpacesKeysServiceOp struct { + client *Client +} + +var _ SpacesKeysService = &SpacesKeysServiceOp{} + +// SpacesKeyPermission represents a permission for a Spaces grant +type SpacesKeyPermission string + +const ( + // SpacesKeyRead grants read-only access to the Spaces bucket + SpacesKeyRead SpacesKeyPermission = "read" + // SpacesKeyReadWrite grants read and write access to the Spaces bucket + SpacesKeyReadWrite SpacesKeyPermission = "readwrite" + // SpacesKeyFullAccess grants full access to the Spaces bucket + SpacesKeyFullAccess SpacesKeyPermission = "fullaccess" +) + +// Grant represents a Grant for a Spaces key +type Grant struct { + Bucket string `json:"bucket"` + Permission SpacesKeyPermission `json:"permission"` +} + +// SpacesKey represents a DigitalOcean Spaces key +type SpacesKey struct { + Name string `json:"name"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + Grants []*Grant `json:"grants"` + CreatedAt string `json:"created_at"` +} + +// SpacesKeyRoot represents a response from the DigitalOcean API +type spacesKeyRoot struct { + Key *SpacesKey `json:"key"` +} + +// SpacesKeyCreateRequest represents a request to create a Spaces key. +type SpacesKeyCreateRequest struct { + Name string `json:"name"` + Grants []*Grant `json:"grants"` +} + +// SpacesKeyUpdateRequest represents a request to update a Spaces key. +type SpacesKeyUpdateRequest struct { + Name string `json:"name"` + Grants []*Grant `json:"grants"` +} + +// spacesListKeysRoot represents a response from the DigitalOcean API +type spacesListKeysRoot struct { + Keys []*SpacesKey `json:"keys,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta"` +} + +// Create creates a new Spaces key. +func (s *SpacesKeysServiceOp) Create(ctx context.Context, createRequest *SpacesKeyCreateRequest) (*SpacesKey, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest(ctx, http.MethodPost, spacesKeysBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(spacesKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Key, resp, nil +} + +// Delete deletes a Spaces key. +func (s *SpacesKeysServiceOp) Delete(ctx context.Context, accessKey string) (*Response, error) { + if accessKey == "" { + return nil, NewArgError("accessKey", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// Update updates a Spaces key. +func (s *SpacesKeysServiceOp) Update(ctx context.Context, accessKey string, updateRequest *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error) { + if accessKey == "" { + return nil, nil, NewArgError("accessKey", "cannot be empty") + } + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey) + req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest) + if err != nil { + return nil, nil, err + } + root := new(spacesKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Key, resp, nil +} + +// List returns a list of Spaces keys. +func (s *SpacesKeysServiceOp) List(ctx context.Context, opts *ListOptions) ([]*SpacesKey, *Response, error) { + path, err := addOptions(spacesKeysBasePath, opts) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(spacesListKeysRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if root.Links != nil { + resp.Links = root.Links + } + if root.Meta != nil { + resp.Meta = root.Meta + } + + return root.Keys, resp, nil +} + +// Get retrieves a Spaces key. +func (s *SpacesKeysServiceOp) Get(ctx context.Context, accessKey string) (*SpacesKey, *Response, error) { + if accessKey == "" { + return nil, nil, NewArgError("accessKey", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(spacesKeyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Key, resp, nil +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 0000000000..1f664d13a5 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 0000000000..2e0f9f5f71 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 0000000000..470e7ca2bd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 0000000000..763143aa77 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 0000000000..afd44e5f5f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,104 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 0000000000..dd5ec69ddf --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,80 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 0000000000..1f3c69d4b8 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,630 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + return url.Parse(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 0000000000..4bea8154e0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 0000000000..4ed0f53311 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 0000000000..d1c15e474f --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 0000000000..d74e3a0b5a --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 0000000000..700b40229c --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 0000000000..e77e63ba38 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1620 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input interface{}) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil + } + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + if !decodeNil { + return nil + } + } + if !inputVal.IsValid() { + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]interface{} + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []interface{} + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) + } + } + + if d.cachedDecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = d.cachedDecodeHook(inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %w", name, err) + } + } + if isNil(input) { + return nil + } + + var err error + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'", + name, val, dataVal, data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + var errs []error + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errs = append(errs, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errs = append(errs, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + var errs []error + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errs = append(errs, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + return errors.Join(errs...) +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + + var errs []error + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == d.config.SquashTagOption { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + switch fieldVal.Kind() { + case reflect.Struct: + structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errs = append(errs, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errs = append(errs, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errs = append(errs, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errs = append(errs, err) + } + + if err := errors.Join(errs...); err != nil { + return err + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 0000000000..d0913fff6c --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 0000000000..f8255a1b17 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md index 859a950867..73fe513468 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md @@ -1,3 +1,29 @@ +## v2.7.0 (2025-04-03) + +* [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID +* [GH-3325](https://github.com/gophercloud/gophercloud/pull/3325) [v2] Switch to a version of gocovmerge compatible with go 1.22 +* [GH-3327](https://github.com/gophercloud/gophercloud/pull/3327) Merge pull request #3209 from shiftstack/proper-service-discovery +* [GH-3328](https://github.com/gophercloud/gophercloud/pull/3328) [v2] Improve support for `network standard-attr-*` extensions +* [GH-3330](https://github.com/gophercloud/gophercloud/pull/3330) [v2] Enhance Snapshot struct and add ListDetail function in V3 blockstorage +* [GH-3333](https://github.com/gophercloud/gophercloud/pull/3333) [v2] vpnaas: add support for more ciphers (auth, encryption, pfs modes) +* [GH-3334](https://github.com/gophercloud/gophercloud/pull/3334) [v2] Added support for VIF's in Baremetal +* [GH-3335](https://github.com/gophercloud/gophercloud/pull/3335) [v2] Baremetal virtual media Get API + +## v2.6.0 (2025-03-03) + +* [GH-3309](https://github.com/gophercloud/gophercloud/pull/3309) Backport: Added support for hypervisor_hostname to v2 + +## v2.5.0 (2025-02-11) + +* [GH-3278](https://github.com/gophercloud/gophercloud/pull/3278) [v2] test: Ensure that randomly created secgroup rules don't conflict +* [GH-3287](https://github.com/gophercloud/gophercloud/pull/3287) [v2] Fix panic in ExtractIntoStructPtr +* [GH-3288](https://github.com/gophercloud/gophercloud/pull/3288) [v2] Fix JSON field name hints in APIVersion structs +* [GH-3292](https://github.com/gophercloud/gophercloud/pull/3292) [v2] Add permissions to the label-issue workflow +* [GH-3294](https://github.com/gophercloud/gophercloud/pull/3294) [v2] Add support for zone sharing in DNS v2 +* [GH-3296](https://github.com/gophercloud/gophercloud/pull/3296) build(deps): bump golang.org/x/crypto from 0.30.0 to 0.31.0 +* [GH-3297](https://github.com/gophercloud/gophercloud/pull/3297) [v2] build(deps): bump golang.org/x/crypto from 0.31.0 to 0.32.0 +* [GH-3298](https://github.com/gophercloud/gophercloud/pull/3298) [v2] build(deps): bump golang.org/x/crypto from 0.32.0 to 0.33.0 + ## v2.4.0 (2024-12-18) * [GH-3270](https://github.com/gophercloud/gophercloud/pull/3270) [v2] SG rules: implement bulk create diff --git a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go index 2fbc3c97f1..8818e769b8 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go @@ -1,5 +1,7 @@ package gophercloud +import "slices" + // Availability indicates to whom a specific service endpoint is accessible: // the internet at large, internal networks only, or only to administrators. // Different identity services use different terminology for these. Identity v2 @@ -22,6 +24,31 @@ const ( AvailabilityInternal Availability = "internal" ) +// ServiceTypeAliases contains a mapping of service types to any aliases, as +// defined by the OpenStack Service Types Authority. Only service types that +// we support are included. +var ServiceTypeAliases = map[string][]string{ + "application-container": {"container"}, + "baremetal": {"bare-metal"}, + "baremetal-introspection": {}, + "block-storage": {"block-store", "volume", "volumev2", "volumev3"}, + "compute": {}, + "container-infrastructure-management": {"container-infrastructure", "container-infra"}, + "database": {}, + "dns": {}, + "identity": {}, + "image": {}, + "key-manager": {}, + "load-balancer": {}, + "message": {"messaging"}, + "networking": {}, + "object-store": {}, + "orchestration": {}, + "placement": {}, + "shared-file-system": {"sharev2", "share"}, + "workflow": {"workflowv2"}, +} + // EndpointOpts specifies search criteria used by queries against an // OpenStack service catalog. The options must contain enough information to // unambiguously identify one, and only one, endpoint within the catalog. @@ -30,8 +57,9 @@ const ( // package, like "openstack.NewComputeV2()". type EndpointOpts struct { // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. + // "object-store"), as defined by the OpenStack Service Types Authority. + // This will generally be supplied by the service client function, but a + // user-given value will be honored if provided. Type string // Name [optional] is the service name for the client (e.g., "nova") as it @@ -39,6 +67,13 @@ type EndpointOpts struct { // different Name, which is why both Type and Name are sometimes needed. Name string + // Aliases [optional] is the set of aliases of the service type (e.g. + // "volumev2"/"volumev3", "volume" and "block-store" for the + // "block-storage" service type), as defined by the OpenStack Service Types + // Authority. As with Type, this will generally be supplied by the service + // client function, but a user-given value will be honored if provided. + Aliases []string + // Region [required] is the geographic region in which the endpoint resides, // generally specifying which datacenter should house your resources. // Required only for services that span multiple regions. @@ -73,4 +108,26 @@ func (eo *EndpointOpts) ApplyDefaults(t string) { if eo.Availability == "" { eo.Availability = AvailabilityPublic } + if len(eo.Aliases) == 0 { + if aliases, ok := ServiceTypeAliases[eo.Type]; ok { + // happy path: user requested a service type by its official name + eo.Aliases = aliases + } else { + // unhappy path: user requested a service type by its alias or an + // invalid/unsupported service type + // TODO(stephenfin): This should probably be an error in v3 + for t, aliases := range ServiceTypeAliases { + if slices.Contains(aliases, eo.Type) { + // we intentionally override the service type, even if it + // was explicitly requested by the user + eo.Type = t + eo.Aliases = aliases + } + } + } + } +} + +func (eo *EndpointOpts) Types() []string { + return append([]string{eo.Type}, eo.Aliases...) } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go index 43b569d3b4..122a3ee699 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go @@ -344,6 +344,7 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp }, nil } +// TODO(stephenfin): Allow passing aliases to all New${SERVICE}V${VERSION} methods in v3 func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { sc := new(gophercloud.ServiceClient) eo.ApplyDefaults(clientType) @@ -393,6 +394,7 @@ func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpt return sc, err } +// TODO(stephenfin): Remove this in v3. We no longer support the V1 Block Storage service. // NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 // block storage service. func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { @@ -402,17 +404,17 @@ func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoi // NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 // block storage service. func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev2") + return initClientOpts(client, eo, "block-storage") } // NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev3") + return initClientOpts(client, eo, "block-storage") } // NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "sharev2") + return initClientOpts(client, eo, "shared-file-system") } // NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 @@ -457,14 +459,14 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi // NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging // service. func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "messaging") + sc, err := initClientOpts(client, eo, "message") sc.MoreHeaders = map[string]string{"Client-ID": clientID} return sc, err } // NewContainerV1 creates a ServiceClient that may be used with v1 container package func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container") + return initClientOpts(client, eo, "application-container") } // NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key @@ -478,12 +480,12 @@ func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoint // NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management // package. func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infra") + return initClientOpts(client, eo, "container-infrastructure-management") } // NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflowv2") + return initClientOpts(client, eo, "workflow") } // NewPlacementV1 creates a ServiceClient that may be used with the placement package. diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go index dd3b132d1d..44e8cccaeb 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go @@ -508,6 +508,9 @@ type CreateOpts struct { // DiskConfig [optional] controls how the created server's disk is partitioned. DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"` + + // HypervisorHostname is the name of the hypervisor to which the server is scheduled. + HypervisorHostname string `json:"hypervisor_hostname,omitempty"` } // ToServerCreateMap assembles a request body based on the contents of a diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go index 2cdbd3e7f7..14cff0d755 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go @@ -1,6 +1,8 @@ package openstack import ( + "slices" + "github.com/gophercloud/gophercloud/v2" tokens2 "github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tokens" tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens" @@ -20,7 +22,7 @@ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpt // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. var endpoints = make([]tokens2.Endpoint, 0, 1) for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { if opts.Region == "" || endpoint.Region == opts.Region { endpoints = append(endpoints, endpoint) @@ -74,7 +76,7 @@ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpt // Name if provided, and Region if provided. var endpoints = make([]tokens3.Endpoint, 0, 1) for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) { for _, endpoint := range entry.Endpoints { if opts.Availability != gophercloud.AvailabilityAdmin && opts.Availability != gophercloud.AvailabilityPublic && diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go index a3afb0403c..be8949d693 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -2,6 +2,7 @@ package floatingips import ( "context" + "fmt" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/pagination" @@ -37,6 +38,7 @@ type ListOpts struct { TagsAny string `q:"tags-any"` NotTags string `q:"not-tags"` NotTagsAny string `q:"not-tags-any"` + RevisionNumber *int `q:"revision_number"` } // ToNetworkListQuery formats a ListOpts into a query string. @@ -144,6 +146,11 @@ type UpdateOpts struct { Description *string `json:"description,omitempty"` PortID *string `json:"port_id,omitempty"` FixedIP string `json:"fixed_ip_address,omitempty"` + + // RevisionNumber implements extension:standard-attr-revisions. If != "" it + // will set revision_number=%s. If the revision number does not match, the + // update will fail. + RevisionNumber *int `json:"-" h:"If-Match"` } // ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder @@ -171,8 +178,19 @@ func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts U r.Err = err return } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + r.Err = err + return + } + for k := range h { + if k == "If-Match" { + h[k] = fmt.Sprintf("revision_number=%s", h[k]) + } + } resp, err := c.Put(ctx, resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, + MoreHeaders: h, + OkCodes: []int{200}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go index 50740ebf30..7ea6160032 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -56,6 +56,9 @@ type FloatingIP struct { // Tags optionally set via extensions/attributestags Tags []string `json:"tags"` + + // RevisionNumber optionally set via extensions/standard-attr-revisions + RevisionNumber int `json:"revision_number"` } func (r *FloatingIP) UnmarshalJSON(b []byte) error { diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go index 218c2897f7..bfff2dffb2 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go @@ -41,6 +41,7 @@ type ListOpts struct { TagsAny string `q:"tags-any"` NotTags string `q:"not-tags"` NotTagsAny string `q:"not-tags-any"` + RevisionNumber *int `q:"revision_number"` SecurityGroups []string `q:"security_groups"` FixedIPs []FixedIPOpts } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go index ad3edc92d6..52fcd38ab3 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go @@ -13,7 +13,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v2.4.0" + DefaultUserAgent = "gophercloud/v2.7.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/results.go b/vendor/github.com/gophercloud/gophercloud/v2/results.go index 9e6f630abb..b12c15a026 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/results.go @@ -184,10 +184,19 @@ func (r Result) ExtractIntoStructPtr(to any, label string) error { return r.Err } + if to == nil { + return fmt.Errorf("Expected pointer, got %T", to) + } + t := reflect.TypeOf(to) if k := t.Kind(); k != reflect.Ptr { return fmt.Errorf("Expected pointer, got %v", k) } + + if reflect.ValueOf(to).IsNil() { + return fmt.Errorf("Expected pointer, got %T", to) + } + switch t.Elem().Kind() { case reflect.Struct: return r.extractIntoPtr(to, label) @@ -210,10 +219,19 @@ func (r Result) ExtractIntoSlicePtr(to any, label string) error { return r.Err } + if to == nil { + return fmt.Errorf("Expected pointer, got %T", to) + } + t := reflect.TypeOf(to) if k := t.Kind(); k != reflect.Ptr { return fmt.Errorf("Expected pointer, got %v", k) } + + if reflect.ValueOf(to).IsNil() { + return fmt.Errorf("Expected pointer, got %T", to) + } + switch t.Elem().Kind() { case reflect.Slice: return r.extractIntoPtr(to, label) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go index 11b80108c3..c1f9f41d4d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go @@ -115,13 +115,17 @@ func (client *ServiceClient) Head(ctx context.Context, url string, opts *Request } func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + serviceType := client.Type + switch client.Type { case "compute": opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion - case "sharev2": + case "shared-file-system", "sharev2", "share": opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion - case "volume": + case "block-storage", "block-store", "volume", "volumev3": opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + // cinder should accept block-storage but (as of Dalmatian) does not + serviceType = "volume" case "baremetal": opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion case "baremetal-introspection": @@ -129,7 +133,7 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { } if client.Type != "" { - opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + opts.MoreHeaders["OpenStack-API-Version"] = serviceType + " " + client.Microversion } } diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 0000000000..6d48174bfb --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,64 @@ +# 1.7.0 (May 24, 2024) + +ENHANCEMENTS: + +- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91)) +- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133)) + +INTERNAL: + +- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115)) +- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105)) +- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116)) +- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111)) +- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112)) +- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103)) +- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107)) +- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124)) +- update readme ([#104](https://github.com/hashicorp/go-version/pull/104)) + +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 0000000000..1409d6ab92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 0000000000..4b7806cd96 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 0000000000..29bdc4d2b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,298 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return v.equalSegments(c) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 0000000000..7c683c2813 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,441 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +import ( + "bytes" + "database/sql/driver" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + // If the segments are the same, we must compare on prerelease info + if v.equalSegments(other) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + // if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func (v *Version) equalSegments(other *Version) bool { + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + if len(segmentsSelf) != len(segmentsOther) { + return false + } + for i, v := range segmentsSelf { + if v != segmentsOther[i] { + return false + } + } + return true +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the sql.Scanner interface. +func (v *Version) Scan(src interface{}) error { + switch src := src.(type) { + case string: + return v.UnmarshalText([]byte(src)) + case nil: + return nil + default: + return fmt.Errorf("cannot scan %T as Version", src) + } +} + +// Value implements the driver.Valuer interface. +func (v *Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 0000000000..83547fe13d --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/knadh/koanf/maps/LICENSE b/vendor/github.com/knadh/koanf/maps/LICENSE new file mode 100644 index 0000000000..c78ef52fb1 --- /dev/null +++ b/vendor/github.com/knadh/koanf/maps/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2019, Kailash Nadh. https://github.com/knadh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/knadh/koanf/maps/maps.go b/vendor/github.com/knadh/koanf/maps/maps.go new file mode 100644 index 0000000000..bdb9d62b80 --- /dev/null +++ b/vendor/github.com/knadh/koanf/maps/maps.go @@ -0,0 +1,303 @@ +// Package maps provides reusable functions for manipulating nested +// map[string]interface{} maps are common unmarshal products from +// various serializers such as json, yaml etc. +package maps + +import ( + "fmt" + "reflect" + "strings" + + "github.com/mitchellh/copystructure" +) + +// Flatten takes a map[string]interface{} and traverses it and flattens +// nested children into keys delimited by delim. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +// +// eg: `{ "parent": { "child": 123 }}` becomes `{ "parent.child": 123 }` +// In addition, it keeps track of and returns a map of the delimited keypaths with +// a slice of key parts, for eg: { "parent.child": ["parent", "child"] }. This +// parts list is used to remember the key path's original structure to +// unflatten later. +func Flatten(m map[string]interface{}, keys []string, delim string) (map[string]interface{}, map[string][]string) { + var ( + out = make(map[string]interface{}) + keyMap = make(map[string][]string) + ) + + flatten(m, keys, delim, out, keyMap) + return out, keyMap +} + +func flatten(m map[string]interface{}, keys []string, delim string, out map[string]interface{}, keyMap map[string][]string) { + for key, val := range m { + // Copy the incoming key paths into a fresh list + // and append the current key in the iteration. + kp := make([]string, 0, len(keys)+1) + kp = append(kp, keys...) + kp = append(kp, key) + + switch cur := val.(type) { + case map[string]interface{}: + // Empty map. + if len(cur) == 0 { + newKey := strings.Join(kp, delim) + out[newKey] = val + keyMap[newKey] = kp + continue + } + + // It's a nested map. Flatten it recursively. + flatten(cur, kp, delim, out, keyMap) + default: + newKey := strings.Join(kp, delim) + out[newKey] = val + keyMap[newKey] = kp + } + } +} + +// Unflatten takes a flattened key:value map (non-nested with delimited keys) +// and returns a nested map where the keys are split into hierarchies by the given +// delimiter. For instance, `parent.child.key: 1` to `{parent: {child: {key: 1}}}` +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func Unflatten(m map[string]interface{}, delim string) map[string]interface{} { + out := make(map[string]interface{}) + + // Iterate through the flat conf map. + for k, v := range m { + var ( + keys []string + next = out + ) + + if delim != "" { + keys = strings.Split(k, delim) + } else { + keys = []string{k} + } + + // Iterate through key parts, for eg:, parent.child.key + // will be ["parent", "child", "key"] + for _, k := range keys[:len(keys)-1] { + sub, ok := next[k] + if !ok { + // If the key does not exist in the map, create it. + sub = make(map[string]interface{}) + next[k] = sub + } + if n, ok := sub.(map[string]interface{}); ok { + next = n + } + } + + // Assign the value. + next[keys[len(keys)-1]] = v + } + return out +} + +// Merge recursively merges map a into b (left to right), mutating +// and expanding map b. Note that there's no copying involved, so +// map b will retain references to map a. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func Merge(a, b map[string]interface{}) { + for key, val := range a { + // Does the key exist in the target map? + // If no, add it and move on. + bVal, ok := b[key] + if !ok { + b[key] = val + continue + } + + // If the incoming val is not a map, do a direct merge. + if _, ok := val.(map[string]interface{}); !ok { + b[key] = val + continue + } + + // The source key and target keys are both maps. Merge them. + switch v := bVal.(type) { + case map[string]interface{}: + Merge(val.(map[string]interface{}), v) + default: + b[key] = val + } + } +} + +// MergeStrict recursively merges map a into b (left to right), mutating +// and expanding map b. Note that there's no copying involved, so +// map b will retain references to map a. +// If an equal key in either of the maps has a different value type, it will return the first error. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func MergeStrict(a, b map[string]interface{}) error { + return mergeStrict(a, b, "") +} + +func mergeStrict(a, b map[string]interface{}, fullKey string) error { + for key, val := range a { + // Does the key exist in the target map? + // If no, add it and move on. + bVal, ok := b[key] + if !ok { + b[key] = val + continue + } + + newFullKey := key + if fullKey != "" { + newFullKey = fmt.Sprintf("%v.%v", fullKey, key) + } + + // If the incoming val is not a map, do a direct merge between the same types. + if _, ok := val.(map[string]interface{}); !ok { + if reflect.TypeOf(b[key]) == reflect.TypeOf(val) { + b[key] = val + } else { + return fmt.Errorf("incorrect types at key %v, type %T != %T", fullKey, b[key], val) + } + continue + } + + // The source key and target keys are both maps. Merge them. + switch v := bVal.(type) { + case map[string]interface{}: + if err := mergeStrict(val.(map[string]interface{}), v, newFullKey); err != nil { + return err + } + default: + b[key] = val + } + } + return nil +} + +// Delete removes the entry present at a given path, from the map. The path +// is the key map slice, for eg:, parent.child.key -> [parent child key]. +// Any empty, nested map on the path, is recursively deleted. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func Delete(mp map[string]interface{}, path []string) { + next, ok := mp[path[0]] + if ok { + if len(path) == 1 { + delete(mp, path[0]) + return + } + switch nval := next.(type) { + case map[string]interface{}: + Delete(nval, path[1:]) + // Delete map if it has no keys. + if len(nval) == 0 { + delete(mp, path[0]) + } + } + } +} + +// Search recursively searches a map for a given path. The path is +// the key map slice, for eg:, parent.child.key -> [parent child key]. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func Search(mp map[string]interface{}, path []string) interface{} { + next, ok := mp[path[0]] + if ok { + if len(path) == 1 { + return next + } + switch m := next.(type) { + case map[string]interface{}: + return Search(m, path[1:]) + default: + return nil + } // + // It's important to note that all nested maps should be + // map[string]interface{} and not map[interface{}]interface{}. + // Use IntfaceKeysToStrings() to convert if necessary. + } + return nil +} + +// Copy returns a deep copy of a conf map. +// +// It's important to note that all nested maps should be +// map[string]interface{} and not map[interface{}]interface{}. +// Use IntfaceKeysToStrings() to convert if necessary. +func Copy(mp map[string]interface{}) map[string]interface{} { + out, _ := copystructure.Copy(&mp) + if res, ok := out.(*map[string]interface{}); ok { + return *res + } + return map[string]interface{}{} +} + +// IntfaceKeysToStrings recursively converts map[interface{}]interface{} to +// map[string]interface{}. Some parses such as YAML unmarshal return this. +func IntfaceKeysToStrings(mp map[string]interface{}) { + for key, val := range mp { + switch cur := val.(type) { + case map[interface{}]interface{}: + x := make(map[string]interface{}) + for k, v := range cur { + x[fmt.Sprintf("%v", k)] = v + } + mp[key] = x + IntfaceKeysToStrings(x) + case []interface{}: + for i, v := range cur { + switch sub := v.(type) { + case map[interface{}]interface{}: + x := make(map[string]interface{}) + for k, v := range sub { + x[fmt.Sprintf("%v", k)] = v + } + cur[i] = x + IntfaceKeysToStrings(x) + case map[string]interface{}: + IntfaceKeysToStrings(sub) + } + } + case map[string]interface{}: + IntfaceKeysToStrings(cur) + } + } +} + +// StringSliceToLookupMap takes a slice of strings and returns a lookup map +// with the slice values as keys with true values. +func StringSliceToLookupMap(s []string) map[string]bool { + mp := make(map[string]bool, len(s)) + for _, v := range s { + mp[v] = true + } + return mp +} + +// Int64SliceToLookupMap takes a slice of int64s and returns a lookup map +// with the slice values as keys with true values. +func Int64SliceToLookupMap(s []int64) map[int64]bool { + mp := make(map[int64]bool, len(s)) + for _, v := range s { + mp[v] = true + } + return mp +} diff --git a/vendor/github.com/knadh/koanf/providers/confmap/LICENSE b/vendor/github.com/knadh/koanf/providers/confmap/LICENSE new file mode 100644 index 0000000000..c78ef52fb1 --- /dev/null +++ b/vendor/github.com/knadh/koanf/providers/confmap/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2019, Kailash Nadh. https://github.com/knadh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/knadh/koanf/providers/confmap/confmap.go b/vendor/github.com/knadh/koanf/providers/confmap/confmap.go new file mode 100644 index 0000000000..b6415fc2c5 --- /dev/null +++ b/vendor/github.com/knadh/koanf/providers/confmap/confmap.go @@ -0,0 +1,37 @@ +// Package confmap implements a koanf.Provider that takes nested +// and flat map[string]interface{} config maps and provides them +// to koanf. +package confmap + +import ( + "errors" + + "github.com/knadh/koanf/maps" +) + +// Confmap implements a raw map[string]interface{} provider. +type Confmap struct { + mp map[string]interface{} +} + +// Provider returns a confmap Provider that takes a flat or nested +// map[string]interface{}. If a delim is provided, it indicates that the +// keys are flat and the map needs to be unflatted by delim. +func Provider(mp map[string]interface{}, delim string) *Confmap { + cp := maps.Copy(mp) + maps.IntfaceKeysToStrings(cp) + if delim != "" { + cp = maps.Unflatten(cp, delim) + } + return &Confmap{mp: cp} +} + +// ReadBytes is not supported by the confmap provider. +func (e *Confmap) ReadBytes() ([]byte, error) { + return nil, errors.New("confmap provider does not support this method") +} + +// Read returns the loaded map[string]interface{}. +func (e *Confmap) Read() (map[string]interface{}, error) { + return e.mp, nil +} diff --git a/vendor/github.com/knadh/koanf/v2/.gitignore b/vendor/github.com/knadh/koanf/v2/.gitignore new file mode 100644 index 0000000000..3777c0be01 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/.gitignore @@ -0,0 +1,4 @@ +.env + +# IDE +.idea diff --git a/vendor/github.com/knadh/koanf/v2/LICENSE b/vendor/github.com/knadh/koanf/v2/LICENSE new file mode 100644 index 0000000000..c78ef52fb1 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2019, Kailash Nadh. https://github.com/knadh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md new file mode 100644 index 0000000000..b4947e556f --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/README.md @@ -0,0 +1,714 @@ + + +![koanf](https://user-images.githubusercontent.com/547147/72681838-6981dd00-3aed-11ea-8f5d-310816c70c08.png) + +**koanf** is a library for reading configuration from different sources in different formats in Go applications. It is a cleaner, lighter [alternative to spf13/viper](#alternative-to-viper) with better abstractions and extensibility and far fewer dependencies. + +koanf v2 has modules (Providers) for reading configuration from a variety of sources such as files, command line flags, environment variables, Vault, and S3 and for parsing (Parsers) formats such as JSON, YAML, TOML, Hashicorp HCL. It is easy to plug in custom parsers and providers. + +All external dependencies in providers and parsers are detached from the core and can be installed separately as necessary. + +[![Run Tests](https://github.com/knadh/koanf/actions/workflows/test.yml/badge.svg)](https://github.com/knadh/koanf/actions/workflows/test.yml) [![GoDoc](https://pkg.go.dev/badge/github.com/knadh/koanf?utm_source=godoc)](https://pkg.go.dev/github.com/knadh/koanf/v2) + +### Installation + +```shell +# Install the core. +go get -u github.com/knadh/koanf/v2 + +# Install the necessary Provider(s). +# Available: file, env, posflag, basicflag, confmap, rawbytes, +# structs, fs, s3, appconfig/v2, consul/v2, etcd/v2, vault/v2, parameterstore/v2 +# eg: go get -u github.com/knadh/koanf/providers/s3 +# eg: go get -u github.com/knadh/koanf/providers/consul/v2 + +go get -u github.com/knadh/koanf/providers/file + + +# Install the necessary Parser(s). +# Available: toml, toml/v2, json, yaml, dotenv, hcl, hjson, nestedtext +# go get -u github.com/knadh/koanf/parsers/$parser + +go get -u github.com/knadh/koanf/parsers/toml +``` + +[See the list](#api) of all bundled Providers and Parsers. + +### Contents + +- [Concepts](#concepts) +- [Reading config from files](#reading-config-from-files) +- [Watching file for changes](#watching-file-for-changes) +- [Reading from command line](#reading-from-command-line) +- [Reading environment variables](#reading-environment-variables) +- [Reading raw bytes](#reading-raw-bytes) +- [Reading from maps and structs](#reading-from-nested-maps) +- [Unmarshalling and marshalling](#unmarshalling-and-marshalling) +- [Order of merge and key case sensitivity](#order-of-merge-and-key-case-sensitivity) +- [Custom Providers and Parsers](#custom-providers-and-parsers) +- [Custom merge strategies](#custom-merge-strategies) +- [List of installable Providers and Parsers](#api) + +### Concepts + +- `koanf.Provider` is a generic interface that provides configuration, for example, from files, environment variables, HTTP sources, or anywhere. The configuration can either be raw bytes that a parser can parse, or it can be a nested `map[string]interface{}` that can be directly loaded. +- `koanf.Parser` is a generic interface that takes raw bytes, parses, and returns a nested `map[string]interface{}`. For example, JSON and YAML parsers. +- Once loaded into koanf, configuration are values queried by a delimited key path syntax. eg: `app.server.port`. Any delimiter can be chosen. +- Configuration from multiple sources can be loaded and merged into a koanf instance, for example, load from a file first and override certain values with flags from the command line. + +With these two interface implementations, koanf can obtain configuration in any format from any source, parse it, and make it available to an application. + +### Reading config from files + +```go +package main + +import ( + "fmt" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/file" +) + +// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. +var k = koanf.New(".") + +func main() { + // Load JSON config. + if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + // Load YAML config and merge into the previously loaded config (because we can). + k.Load(file.Provider("mock/mock.yml"), yaml.Parser()) + + fmt.Println("parent's name is = ", k.String("parent1.name")) + fmt.Println("parent's ID is = ", k.Int("parent1.id")) +} + +``` + +### Watching file for changes +Some providers expose a `Watch()` method that makes the provider watch for changes +in configuration and trigger a callback to reload the configuration. +This is not goroutine safe if there are concurrent `*Get()` calls happening on the +koanf object while it is doing a `Load()`. Such scenarios will need mutex locking. + +`file, appconfig, vault, consul` providers have a `Watch()` method. + + +```go +package main + +import ( + "fmt" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/file" +) + +// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. +var k = koanf.New(".") + +func main() { + // Load JSON config. + f := file.Provider("mock/mock.json") + if err := k.Load(f, json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + // Load YAML config and merge into the previously loaded config (because we can). + k.Load(file.Provider("mock/mock.yml"), yaml.Parser()) + + fmt.Println("parent's name is = ", k.String("parent1.name")) + fmt.Println("parent's ID is = ", k.Int("parent1.id")) + + // Watch the file and get a callback on change. The callback can do whatever, + // like re-load the configuration. + // File provider always returns a nil `event`. + f.Watch(func(event interface{}, err error) { + if err != nil { + log.Printf("watch error: %v", err) + return + } + + // Throw away the old config and load a fresh copy. + log.Println("config changed. Reloading ...") + k = koanf.New(".") + k.Load(f, json.Parser()) + k.Print() + }) + + // To stop a file watcher, call: + // f.Unwatch() + + // Block forever (and manually make a change to mock/mock.json) to + // reload the config. + log.Println("waiting forever. Try making a change to mock/mock.json to live reload") + <-make(chan bool) +} +``` + + +### Reading from command line + +The following example shows the use of `posflag.Provider`, a wrapper over the [spf13/pflag](https://github.com/spf13/pflag) library, an advanced commandline lib. For Go's built in `flag` package, use `basicflag.Provider`. + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/toml" + + // TOML version 2 is available at: + // "github.com/knadh/koanf/parsers/toml/v2" + + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/providers/posflag" + flag "github.com/spf13/pflag" +) + +// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. +var k = koanf.New(".") + +func main() { + // Use the POSIX compliant pflag lib instead of Go's flag lib. + f := flag.NewFlagSet("config", flag.ContinueOnError) + f.Usage = func() { + fmt.Println(f.FlagUsages()) + os.Exit(0) + } + // Path to one or more config files to load into koanf along with some config params. + f.StringSlice("conf", []string{"mock/mock.toml"}, "path to one or more .toml config files") + f.String("time", "2020-01-01", "a time string") + f.String("type", "xxx", "type of the app") + f.Parse(os.Args[1:]) + + // Load the config files provided in the commandline. + cFiles, _ := f.GetStringSlice("conf") + for _, c := range cFiles { + if err := k.Load(file.Provider(c), toml.Parser()); err != nil { + log.Fatalf("error loading file: %v", err) + } + } + + // "time" and "type" may have been loaded from the config file, but + // they can still be overridden with the values from the command line. + // The bundled posflag.Provider takes a flagset from the spf13/pflag lib. + // Passing the Koanf instance to posflag helps it deal with default command + // line flag values that are not present in conf maps from previously loaded + // providers. + if err := k.Load(posflag.Provider(f, ".", k), nil); err != nil { + log.Fatalf("error loading config: %v", err) + } + + fmt.Println("time is = ", k.String("time")) +} +``` + +### Reading environment variables + +```go +package main + +import ( + "fmt" + "log" + "strings" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/providers/env" + "github.com/knadh/koanf/providers/file" +) + +// Global koanf instance. Use . as the key path delimiter. This can be / or anything. +var k = koanf.New(".") + +func main() { + // Load JSON config. + if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + // Load environment variables and merge into the loaded config. + // "MYVAR" is the prefix to filter the env vars by. + // "." is the delimiter used to represent the key hierarchy in env vars. + // The (optional, or can be nil) function can be used to transform + // the env var names, for instance, to lowercase them. + // + // For example, env vars: MYVAR_TYPE and MYVAR_PARENT1_CHILD1_NAME + // will be merged into the "type" and the nested "parent1.child1.name" + // keys in the config file here as we lowercase the key, + // replace `_` with `.` and strip the MYVAR_ prefix so that + // only "parent1.child1.name" remains. + k.Load(env.Provider("MYVAR_", ".", func(s string) string { + return strings.Replace(strings.ToLower( + strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1) + }), nil) + + fmt.Println("name is = ", k.String("parent1.child1.name")) +} +``` + +You can also use the `env.ProviderWithValue` with a callback that supports mutating both the key and value +to return types other than a string. For example, here, env values separated by spaces are +returned as string slices or arrays. eg: `MYVAR_slice=a b c` becomes `slice: [a, b, c]`. + +```go + k.Load(env.ProviderWithValue("MYVAR_", ".", func(s string, v string) (string, interface{}) { + // Strip out the MYVAR_ prefix and lowercase and get the key while also replacing + // the _ character with . in the key (koanf delimeter). + key := strings.Replace(strings.ToLower(strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1) + + // If there is a space in the value, split the value into a slice by the space. + if strings.Contains(v, " ") { + return key, strings.Split(v, " ") + } + + // Otherwise, return the plain string. + return key, v + }), nil) +``` + +### Reading from an S3 bucket + +```go +// Load JSON config from s3. +if err := k.Load(s3.Provider(s3.Config{ + AccessKey: os.Getenv("AWS_S3_ACCESS_KEY"), + SecretKey: os.Getenv("AWS_S3_SECRET_KEY"), + Region: os.Getenv("AWS_S3_REGION"), + Bucket: os.Getenv("AWS_S3_BUCKET"), + ObjectKey: "dir/config.json", +}), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) +} +``` + +### Reading raw bytes + +The bundled `rawbytes` Provider can be used to read arbitrary bytes from a source, like a database or an HTTP call. + +```go +package main + +import ( + "fmt" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/providers/rawbytes" +) + +// Global koanf instance. Use . as the key path delimiter. This can be / or anything. +var k = koanf.New(".") + +func main() { + b := []byte(`{"type": "rawbytes", "parent1": {"child1": {"type": "rawbytes"}}}`) + k.Load(rawbytes.Provider(b), json.Parser()) + fmt.Println("type is = ", k.String("parent1.child1.type")) +} +``` + +### Unmarshalling and marshalling +`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example to JSON or YAML files + +```go +package main + +import ( + "fmt" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/providers/file" +) + +// Global koanf instance. Use . as the key path delimiter. This can be / or anything. +var ( + k = koanf.New(".") + parser = json.Parser() +) + +func main() { + // Load JSON config. + if err := k.Load(file.Provider("mock/mock.json"), parser); err != nil { + log.Fatalf("error loading config: %v", err) + } + + // Structure to unmarshal nested conf to. + type childStruct struct { + Name string `koanf:"name"` + Type string `koanf:"type"` + Empty map[string]string `koanf:"empty"` + GrandChild struct { + Ids []int `koanf:"ids"` + On bool `koanf:"on"` + } `koanf:"grandchild1"` + } + + var out childStruct + + // Quick unmarshal. + k.Unmarshal("parent1.child1", &out) + fmt.Println(out) + + // Unmarshal with advanced config. + out = childStruct{} + k.UnmarshalWithConf("parent1.child1", &out, koanf.UnmarshalConf{Tag: "koanf"}) + fmt.Println(out) + + // Marshal the instance back to JSON. + // The parser instance can be anything, eg: json.Parser(), yaml.Parser() etc. + b, _ := k.Marshal(parser) + fmt.Println(string(b)) +} +``` + +### Unmarshalling with flat paths + +Sometimes it is necessary to unmarshal an assortment of keys from various nested structures into a flat target structure. This is possible with the `UnmarshalConf.FlatPaths` flag. + +```go +package main + +import ( + "fmt" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/providers/file" +) + +// Global koanf instance. Use . as the key path delimiter. This can be / or anything. +var k = koanf.New(".") + +func main() { + // Load JSON config. + if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + type rootFlat struct { + Type string `koanf:"type"` + Empty map[string]string `koanf:"empty"` + Parent1Name string `koanf:"parent1.name"` + Parent1ID int `koanf:"parent1.id"` + Parent1Child1Name string `koanf:"parent1.child1.name"` + Parent1Child1Type string `koanf:"parent1.child1.type"` + Parent1Child1Empty map[string]string `koanf:"parent1.child1.empty"` + Parent1Child1Grandchild1IDs []int `koanf:"parent1.child1.grandchild1.ids"` + Parent1Child1Grandchild1On bool `koanf:"parent1.child1.grandchild1.on"` + } + + // Unmarshal the whole root with FlatPaths: True. + var o1 rootFlat + k.UnmarshalWithConf("", &o1, koanf.UnmarshalConf{Tag: "koanf", FlatPaths: true}) + fmt.Println(o1) + + // Unmarshal a child structure of "parent1". + type subFlat struct { + Name string `koanf:"name"` + ID int `koanf:"id"` + Child1Name string `koanf:"child1.name"` + Child1Type string `koanf:"child1.type"` + Child1Empty map[string]string `koanf:"child1.empty"` + Child1Grandchild1IDs []int `koanf:"child1.grandchild1.ids"` + Child1Grandchild1On bool `koanf:"child1.grandchild1.on"` + } + + var o2 subFlat + k.UnmarshalWithConf("parent1", &o2, koanf.UnmarshalConf{Tag: "koanf", FlatPaths: true}) + fmt.Println(o2) +} +``` + +#### Reading from nested maps + +The bundled `confmap` provider takes a `map[string]interface{}` that can be loaded into a koanf instance. + +```go +package main + +import ( + "fmt" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/providers/confmap" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/yaml" +) + +// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. +var k = koanf.New(".") + +func main() { + // Load default values using the confmap provider. + // We provide a flat map with the "." delimiter. + // A nested map can be loaded by setting the delimiter to an empty string "". + k.Load(confmap.Provider(map[string]interface{}{ + "parent1.name": "Default Name", + "parent3.name": "New name here", + }, "."), nil) + + // Load JSON config on top of the default values. + if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + // Load YAML config and merge into the previously loaded config (because we can). + k.Load(file.Provider("mock/mock.yml"), yaml.Parser()) + + fmt.Println("parent's name is = ", k.String("parent1.name")) + fmt.Println("parent's ID is = ", k.Int("parent1.id")) +} +``` + +#### Reading from struct + +The bundled `structs` provider can be used to read data from a struct to load into a koanf instance. + +```go +package main + +import ( + "fmt" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/providers/structs" +) + +// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. +var k = koanf.New(".") + +type parentStruct struct { + Name string `koanf:"name"` + ID int `koanf:"id"` + Child1 childStruct `koanf:"child1"` +} +type childStruct struct { + Name string `koanf:"name"` + Type string `koanf:"type"` + Empty map[string]string `koanf:"empty"` + Grandchild1 grandchildStruct `koanf:"grandchild1"` +} +type grandchildStruct struct { + Ids []int `koanf:"ids"` + On bool `koanf:"on"` +} +type sampleStruct struct { + Type string `koanf:"type"` + Empty map[string]string `koanf:"empty"` + Parent1 parentStruct `koanf:"parent1"` +} + +func main() { + // Load default values using the structs provider. + // We provide a struct along with the struct tag `koanf` to the + // provider. + k.Load(structs.Provider(sampleStruct{ + Type: "json", + Empty: make(map[string]string), + Parent1: parentStruct{ + Name: "parent1", + ID: 1234, + Child1: childStruct{ + Name: "child1", + Type: "json", + Empty: make(map[string]string), + Grandchild1: grandchildStruct{ + Ids: []int{1, 2, 3}, + On: true, + }, + }, + }, + }, "koanf"), nil) + + fmt.Printf("name is = `%s`\n", k.String("parent1.child1.name")) +} +``` +### Merge behavior +#### Default behavior +The default behavior when you create Koanf this way is: `koanf.New(delim)` that the latest loaded configuration will +merge with the previous one. + +For example: +`first.yml` +```yaml +key: [1,2,3] +``` +`second.yml` +```yaml +key: 'string' +``` +When `second.yml` is loaded it will override the type of the `first.yml`. + +If this behavior is not desired, you can merge 'strictly'. In the same scenario, `Load` will return an error. + +```go +package main + +import ( + "errors" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/maps" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/file" +) + +var conf = koanf.Conf{ + Delim: ".", + StrictMerge: true, +} +var k = koanf.NewWithConf(conf) + +func main() { + yamlPath := "mock/mock.yml" + if err := k.Load(file.Provider(yamlPath), yaml.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + jsonPath := "mock/mock.json" + if err := k.Load(file.Provider(jsonPath), json.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } +} +``` +**Note:** When merging different extensions, each parser can treat his types differently, + meaning even though you the load same types there is a probability that it will fail with `StrictMerge: true`. + +For example: merging JSON and YAML will most likely fail because JSON treats integers as float64 and YAML treats them as integers. + +### Order of merge and key case sensitivity + +- Config keys are case-sensitive in koanf. For example, `app.server.port` and `APP.SERVER.port` are not the same. +- koanf does not impose any ordering on loading config from various providers. Every successive `Load()` or `Merge()` merges new config into the existing config. That is, it is possible to load environment variables first, then files on top of it, and then command line variables on top of it, or any such order. + +### Custom Providers and Parsers + +A Provider returns a nested `map[string]interface{}` config that can be loaded directly into koanf with `koanf.Load()` or it can return raw bytes that can be parsed with a Parser (again, loaded using `koanf.Load()`. Writing Providers and Parsers are easy. See the bundled implementations in the [providers](https://github.com/knadh/koanf/tree/master/providers) and [parsers](https://github.com/knadh/koanf/tree/master/parsers) directories. + +### Custom merge strategies + +By default, when merging two config sources using `Load()`, koanf recursively merges keys of nested maps (`map[string]interface{}`), +while static values are overwritten (slices, strings, etc). This behaviour can be changed by providing a custom merge function with the `WithMergeFunc` option. + +```go +package main + +import ( + "errors" + "log" + + "github.com/knadh/koanf/v2" + "github.com/knadh/koanf/maps" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/file" +) + +var conf = koanf.Conf{ + Delim: ".", + StrictMerge: true, +} +var k = koanf.NewWithConf(conf) + +func main() { + yamlPath := "mock/mock.yml" + if err := k.Load(file.Provider(yamlPath), yaml.Parser()); err != nil { + log.Fatalf("error loading config: %v", err) + } + + jsonPath := "mock/mock.json" + if err := k.Load(file.Provider(jsonPath), json.Parser(), koanf.WithMergeFunc(func(src, dest map[string]interface{}) error { + // Your custom logic, copying values from src into dst + return nil + })); err != nil { + log.Fatalf("error loading config: %v", err) + } +} +``` + +## API + +See the full API documentation of all available methods at https://pkg.go.dev/github.com/knadh/koanf/v2#section-documentation + +### Bundled Providers + +Install with `go get -u github.com/knadh/koanf/providers/$provider` + +| Package | Provider | Description | +| ------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| file | `file.Provider(filepath string)` | Reads a file and returns the raw bytes to be parsed. | +| fs | `fs.Provider(f fs.FS, filepath string)` | (**Experimental**) Reads a file from fs.FS and returns the raw bytes to be parsed. The provider requires `go v1.16` or higher. | +| basicflag | `basicflag.Provider(f *flag.FlagSet, delim string)` | Takes an stdlib `flag.FlagSet` | +| posflag | `posflag.Provider(f *pflag.FlagSet, delim string)` | Takes an `spf13/pflag.FlagSet` (advanced POSIX compatible flags with multiple types) and provides a nested config map based on delim. | +| env | `env.Provider(prefix, delim string, f func(s string) string)` | Takes an optional prefix to filter env variables by, an optional function that takes and returns a string to transform env variables, and returns a nested config map based on delim. | +| confmap | `confmap.Provider(mp map[string]interface{}, delim string)` | Takes a premade `map[string]interface{}` conf map. If delim is provided, the keys are assumed to be flattened, thus unflattened using delim. | +| structs | `structs.Provider(s interface{}, tag string)` | Takes a struct and struct tag. | +| s3 | `s3.Provider(s3.S3Config{})` | Takes a s3 config struct. | +| rawbytes | `rawbytes.Provider(b []byte)` | Takes a raw `[]byte` slice to be parsed with a koanf.Parser | +| vault/v2 | `vault.Provider(vault.Config{})` | Hashicorp Vault provider | +| appconfig/v2 | `vault.AppConfig(appconfig.Config{})` | AWS AppConfig provider | +| etcd/v2 | `etcd.Provider(etcd.Config{})` | CNCF etcd provider | +| consul/v2 | `consul.Provider(consul.Config{})` | Hashicorp Consul provider | +| parameterstore/v2 | `parameterstore.Provider(parameterstore.Config{})` | AWS Systems Manager Parameter Store provider | + + +### Bundled Parsers + +Install with `go get -u github.com/knadh/koanf/parsers/$parser` + +| Package | Parser | Description | +| ------------ | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| json | `json.Parser()` | Parses JSON bytes into a nested map | +| yaml | `yaml.Parser()` | Parses YAML bytes into a nested map | +| toml | `toml.Parser()` | Parses TOML bytes into a nested map | +| toml/v2 | `toml.Parser()` | Parses TOML bytes into a nested map (using go-toml v2) | +| dotenv | `dotenv.Parser()` | Parses DotEnv bytes into a flat map | +| hcl | `hcl.Parser(flattenSlices bool)` | Parses Hashicorp HCL bytes into a nested map. `flattenSlices` is recommended to be set to true. [Read more](https://github.com/hashicorp/hcl/issues/162). | +| nestedtext | `nestedtext.Parser()` | Parses NestedText bytes into a flat map | +| hjson | `hjson.Parser()` | Parses HJSON bytes into a nested map + | + + +### Third-party Providers +| Package | Provider | Description | +| ------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| github.com/defensestation/koanf/providers/secretsmanager | `vault.SecretsMananger(secretsmanager.Config{}, f func(s string) string)` | AWS Secrets Manager provider, takes map or string as a value from store | +| github.com/defensestation/koanf/providers/parameterstore | `vault.ParameterStore(parameterstore.Config{}, f func(s string) string)` | AWS ParameterStore provider, an optional function that takes and returns a string to transform env variables | + + +### Alternative to viper + +koanf is a [lightweight](https://github.com/knadh/koanf/blob/master/go.mod) alternative to the popular [spf13/viper](https://github.com/spf13/viper). It was written as a result of multiple stumbling blocks encountered with some of viper's fundamental flaws. + +- viper breaks JSON, YAML, TOML, HCL language specs by [forcibly lowercasing keys](https://github.com/spf13/viper/pull/635). +- Significantly bloats [build sizes](https://github.com/knadh/koanf/wiki/Comparison-with-spf13-viper). +- Tightly couples config parsing with file extensions. +- Has poor semantics and abstractions. Commandline, env, file etc. and various parses are hardcoded in the core. There are no primitives that can be extended. +- Pulls a large number of [third party dependencies](https://github.com/spf13/viper/issues/707) into the core package. For instance, even if you do not use YAML or flags, the dependencies are still pulled as a result of the coupling. +- Imposes arbitrary ordering conventions (eg: flag -> env -> config etc.) +- `Get()` returns references to slices and maps. Mutations made outside change the underlying values inside the conf map. +- Does non-idiomatic things such as [throwing away O(1) on flat maps](https://github.com/spf13/viper/blob/3b4aca75714a37276c4b1883630bd98c02498b73/viper.go#L1524). +- Viper treats keys that contain an empty map (eg: `my_key: {}`) as if they were not set (ie: `IsSet("my_key") == false`). +- There are a large number of [open issues](https://github.com/spf13/viper/issues). diff --git a/vendor/github.com/knadh/koanf/v2/getters.go b/vendor/github.com/knadh/koanf/v2/getters.go new file mode 100644 index 0000000000..266230f747 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/getters.go @@ -0,0 +1,649 @@ +package koanf + +import ( + "fmt" + "time" +) + +// Int64 returns the int64 value of a given key path or 0 if the path +// does not exist or if the value is not a valid int64. +func (ko *Koanf) Int64(path string) int64 { + if v := ko.Get(path); v != nil { + i, _ := toInt64(v) + return i + } + return 0 +} + +// MustInt64 returns the int64 value of a given key path or panics +// if the value is not set or set to default value of 0. +func (ko *Koanf) MustInt64(path string) int64 { + val := ko.Int64(path) + if val == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Int64s returns the []int64 slice value of a given key path or an +// empty []int64 slice if the path does not exist or if the value +// is not a valid int slice. +func (ko *Koanf) Int64s(path string) []int64 { + o := ko.Get(path) + if o == nil { + return []int64{} + } + + var out []int64 + switch v := o.(type) { + case []int64: + return v + case []int: + out = make([]int64, 0, len(v)) + for _, vi := range v { + i, err := toInt64(vi) + + // On error, return as it's not a valid + // int slice. + if err != nil { + return []int64{} + } + out = append(out, i) + } + return out + case []interface{}: + out = make([]int64, 0, len(v)) + for _, vi := range v { + i, err := toInt64(vi) + + // On error, return as it's not a valid + // int slice. + if err != nil { + return []int64{} + } + out = append(out, i) + } + return out + } + + return []int64{} +} + +// MustInt64s returns the []int64 slice value of a given key path or panics +// if the value is not set or its default value. +func (ko *Koanf) MustInt64s(path string) []int64 { + val := ko.Int64s(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Int64Map returns the map[string]int64 value of a given key path +// or an empty map[string]int64 if the path does not exist or if the +// value is not a valid int64 map. +func (ko *Koanf) Int64Map(path string) map[string]int64 { + var ( + out = map[string]int64{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + mp, ok := o.(map[string]interface{}) + if !ok { + return out + } + + out = make(map[string]int64, len(mp)) + for k, v := range mp { + switch i := v.(type) { + case int64: + out[k] = i + default: + // Attempt a conversion. + iv, err := toInt64(i) + if err != nil { + return map[string]int64{} + } + out[k] = iv + } + } + return out +} + +// MustInt64Map returns the map[string]int64 value of a given key path +// or panics if it isn't set or set to default value. +func (ko *Koanf) MustInt64Map(path string) map[string]int64 { + val := ko.Int64Map(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Int returns the int value of a given key path or 0 if the path +// does not exist or if the value is not a valid int. +func (ko *Koanf) Int(path string) int { + return int(ko.Int64(path)) +} + +// MustInt returns the int value of a given key path or panics +// if it isn't set or set to default value of 0. +func (ko *Koanf) MustInt(path string) int { + val := ko.Int(path) + if val == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Ints returns the []int slice value of a given key path or an +// empty []int slice if the path does not exist or if the value +// is not a valid int slice. +func (ko *Koanf) Ints(path string) []int { + o := ko.Get(path) + if o == nil { + return []int{} + } + + var out []int + switch v := o.(type) { + case []int: + return v + case []int64: + out = make([]int, 0, len(v)) + for _, vi := range v { + out = append(out, int(vi)) + } + return out + case []interface{}: + out = make([]int, 0, len(v)) + for _, vi := range v { + i, err := toInt64(vi) + + // On error, return as it's not a valid + // int slice. + if err != nil { + return []int{} + } + out = append(out, int(i)) + } + return out + } + + return []int{} +} + +// MustInts returns the []int slice value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustInts(path string) []int { + val := ko.Ints(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// IntMap returns the map[string]int value of a given key path +// or an empty map[string]int if the path does not exist or if the +// value is not a valid int map. +func (ko *Koanf) IntMap(path string) map[string]int { + var ( + mp = ko.Int64Map(path) + out = make(map[string]int, len(mp)) + ) + for k, v := range mp { + out[k] = int(v) + } + return out +} + +// MustIntMap returns the map[string]int value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustIntMap(path string) map[string]int { + val := ko.IntMap(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Float64 returns the float64 value of a given key path or 0 if the path +// does not exist or if the value is not a valid float64. +func (ko *Koanf) Float64(path string) float64 { + if v := ko.Get(path); v != nil { + f, _ := toFloat64(v) + return f + } + return 0 +} + +// MustFloat64 returns the float64 value of a given key path or panics +// if it isn't set or set to default value 0. +func (ko *Koanf) MustFloat64(path string) float64 { + val := ko.Float64(path) + if val == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Float64s returns the []float64 slice value of a given key path or an +// empty []float64 slice if the path does not exist or if the value +// is not a valid float64 slice. +func (ko *Koanf) Float64s(path string) []float64 { + o := ko.Get(path) + if o == nil { + return []float64{} + } + + var out []float64 + switch v := o.(type) { + case []float64: + return v + case []interface{}: + out = make([]float64, 0, len(v)) + for _, vi := range v { + i, err := toFloat64(vi) + + // On error, return as it's not a valid + // int slice. + if err != nil { + return []float64{} + } + out = append(out, i) + } + return out + } + + return []float64{} +} + +// MustFloat64s returns the []Float64 slice value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustFloat64s(path string) []float64 { + val := ko.Float64s(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Float64Map returns the map[string]float64 value of a given key path +// or an empty map[string]float64 if the path does not exist or if the +// value is not a valid float64 map. +func (ko *Koanf) Float64Map(path string) map[string]float64 { + var ( + out = map[string]float64{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + mp, ok := o.(map[string]interface{}) + if !ok { + return out + } + + out = make(map[string]float64, len(mp)) + for k, v := range mp { + switch i := v.(type) { + case float64: + out[k] = i + default: + // Attempt a conversion. + iv, err := toFloat64(i) + if err != nil { + return map[string]float64{} + } + out[k] = iv + } + } + return out +} + +// MustFloat64Map returns the map[string]float64 value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustFloat64Map(path string) map[string]float64 { + val := ko.Float64Map(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Duration returns the time.Duration value of a given key path assuming +// that the key contains a valid numeric value. +func (ko *Koanf) Duration(path string) time.Duration { + // Look for a parsable string representation first. + if v := ko.Int64(path); v != 0 { + return time.Duration(v) + } + + v, _ := time.ParseDuration(ko.String(path)) + return v +} + +// MustDuration returns the time.Duration value of a given key path or panics +// if it isn't set or set to default value 0. +func (ko *Koanf) MustDuration(path string) time.Duration { + val := ko.Duration(path) + if val == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Time attempts to parse the value of a given key path and return time.Time +// representation. If the value is numeric, it is treated as a UNIX timestamp +// and if it's string, a parse is attempted with the given layout. +func (ko *Koanf) Time(path, layout string) time.Time { + // Unix timestamp? + v := ko.Int64(path) + if v != 0 { + return time.Unix(v, 0) + } + + // String representation. + s := ko.String(path) + if s != "" { + t, _ := time.Parse(layout, s) + return t + } + + return time.Time{} +} + +// MustTime attempts to parse the value of a given key path and return time.Time +// representation. If the value is numeric, it is treated as a UNIX timestamp +// and if it's string, a parse is attempted with the given layout. It panics if +// the parsed time is zero. +func (ko *Koanf) MustTime(path, layout string) time.Time { + val := ko.Time(path, layout) + if val.IsZero() { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// String returns the string value of a given key path or "" if the path +// does not exist or if the value is not a valid string. +func (ko *Koanf) String(path string) string { + if v := ko.Get(path); v != nil { + if i, ok := v.(string); ok { + return i + } + return fmt.Sprintf("%v", v) + } + return "" +} + +// MustString returns the string value of a given key path +// or panics if it isn't set or set to default value "". +func (ko *Koanf) MustString(path string) string { + val := ko.String(path) + if val == "" { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Strings returns the []string slice value of a given key path or an +// empty []string slice if the path does not exist or if the value +// is not a valid string slice. +func (ko *Koanf) Strings(path string) []string { + o := ko.Get(path) + if o == nil { + return []string{} + } + + var out []string + switch v := o.(type) { + case []interface{}: + out = make([]string, 0, len(v)) + for _, u := range v { + if s, ok := u.(string); ok { + out = append(out, s) + } else { + out = append(out, fmt.Sprintf("%v", u)) + } + } + return out + case []string: + out := make([]string, len(v)) + copy(out, v) + return out + } + + return []string{} +} + +// MustStrings returns the []string slice value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustStrings(path string) []string { + val := ko.Strings(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// StringMap returns the map[string]string value of a given key path +// or an empty map[string]string if the path does not exist or if the +// value is not a valid string map. +func (ko *Koanf) StringMap(path string) map[string]string { + var ( + out = map[string]string{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + switch mp := o.(type) { + case map[string]string: + out = make(map[string]string, len(mp)) + for k, v := range mp { + out[k] = v + } + case map[string]interface{}: + out = make(map[string]string, len(mp)) + for k, v := range mp { + switch s := v.(type) { + case string: + out[k] = s + default: + // There's a non string type. Return. + return map[string]string{} + } + } + } + + return out +} + +// MustStringMap returns the map[string]string value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustStringMap(path string) map[string]string { + val := ko.StringMap(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// StringsMap returns the map[string][]string value of a given key path +// or an empty map[string][]string if the path does not exist or if the +// value is not a valid strings map. +func (ko *Koanf) StringsMap(path string) map[string][]string { + var ( + out = map[string][]string{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + switch mp := o.(type) { + case map[string][]string: + out = make(map[string][]string, len(mp)) + for k, v := range mp { + out[k] = append(out[k], v...) + } + case map[string][]interface{}: + out = make(map[string][]string, len(mp)) + for k, v := range mp { + for _, v := range v { + switch sv := v.(type) { + case string: + out[k] = append(out[k], sv) + default: + return map[string][]string{} + } + } + } + case map[string]interface{}: + out = make(map[string][]string, len(mp)) + for k, v := range mp { + switch s := v.(type) { + case []string: + out[k] = append(out[k], s...) + case []interface{}: + for _, v := range s { + switch sv := v.(type) { + case string: + out[k] = append(out[k], sv) + default: + return map[string][]string{} + } + } + default: + // There's a non []interface type. Return. + return map[string][]string{} + } + } + } + + return out +} + +// MustStringsMap returns the map[string][]string value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustStringsMap(path string) map[string][]string { + val := ko.StringsMap(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Bytes returns the []byte value of a given key path or an empty +// []byte slice if the path does not exist or if the value is not a valid string. +func (ko *Koanf) Bytes(path string) []byte { + return []byte(ko.String(path)) +} + +// MustBytes returns the []byte value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustBytes(path string) []byte { + val := ko.Bytes(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// Bool returns the bool value of a given key path or false if the path +// does not exist or if the value is not a valid bool representation. +// Accepted string representations of bool are the ones supported by strconv.ParseBool. +func (ko *Koanf) Bool(path string) bool { + if v := ko.Get(path); v != nil { + b, _ := toBool(v) + return b + } + return false +} + +// Bools returns the []bool slice value of a given key path or an +// empty []bool slice if the path does not exist or if the value +// is not a valid bool slice. +func (ko *Koanf) Bools(path string) []bool { + o := ko.Get(path) + if o == nil { + return []bool{} + } + + var out []bool + switch v := o.(type) { + case []interface{}: + out = make([]bool, 0, len(v)) + for _, u := range v { + b, err := toBool(u) + if err != nil { + return nil + } + out = append(out, b) + } + return out + case []bool: + return out + } + return nil +} + +// MustBools returns the []bool value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustBools(path string) []bool { + val := ko.Bools(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} + +// BoolMap returns the map[string]bool value of a given key path +// or an empty map[string]bool if the path does not exist or if the +// value is not a valid bool map. +func (ko *Koanf) BoolMap(path string) map[string]bool { + var ( + out = map[string]bool{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + mp, ok := o.(map[string]interface{}) + if !ok { + return out + } + out = make(map[string]bool, len(mp)) + for k, v := range mp { + switch i := v.(type) { + case bool: + out[k] = i + default: + // Attempt a conversion. + b, err := toBool(i) + if err != nil { + return map[string]bool{} + } + out[k] = b + } + } + + return out +} + +// MustBoolMap returns the map[string]bool value of a given key path or panics +// if the value is not set or set to default value. +func (ko *Koanf) MustBoolMap(path string) map[string]bool { + val := ko.BoolMap(path) + if len(val) == 0 { + panic(fmt.Sprintf("invalid value: %s=%v", path, val)) + } + return val +} diff --git a/vendor/github.com/knadh/koanf/v2/go.work b/vendor/github.com/knadh/koanf/v2/go.work new file mode 100644 index 0000000000..b5337e02c9 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/go.work @@ -0,0 +1,31 @@ +go 1.18 + +use ( + . + ./examples + ./maps + ./parsers/dotenv + ./parsers/hcl + ./parsers/hjson + ./parsers/json + ./parsers/kdl + ./parsers/nestedtext + ./parsers/toml + ./parsers/yaml + ./providers/appconfig + ./providers/basicflag + ./providers/confmap + ./providers/consul + ./providers/env + ./providers/etcd + ./providers/file + ./providers/fs + ./providers/nats + ./providers/parameterstore + ./providers/posflag + ./providers/rawbytes + ./providers/s3 + ./providers/structs + ./providers/vault + ./tests +) diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum new file mode 100644 index 0000000000..eab7bcf845 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/go.work.sum @@ -0,0 +1,154 @@ +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/knadh/koanf/v2/interfaces.go b/vendor/github.com/knadh/koanf/v2/interfaces.go new file mode 100644 index 0000000000..ba69a2443a --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/interfaces.go @@ -0,0 +1,20 @@ +package koanf + +// Provider represents a configuration provider. Providers can +// read configuration from a source (file, HTTP etc.) +type Provider interface { + // ReadBytes returns the entire configuration as raw []bytes to be parsed. + // with a Parser. + ReadBytes() ([]byte, error) + + // Read returns the parsed configuration as a nested map[string]interface{}. + // It is important to note that the string keys should not be flat delimited + // keys like `parent.child.key`, but nested like `{parent: {child: {key: 1}}}`. + Read() (map[string]interface{}, error) +} + +// Parser represents a configuration format parser. +type Parser interface { + Unmarshal([]byte) (map[string]interface{}, error) + Marshal(map[string]interface{}) ([]byte, error) +} diff --git a/vendor/github.com/knadh/koanf/v2/koanf.go b/vendor/github.com/knadh/koanf/v2/koanf.go new file mode 100644 index 0000000000..bd06a2d7e5 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/koanf.go @@ -0,0 +1,577 @@ +package koanf + +import ( + "bytes" + "encoding" + "fmt" + "reflect" + "sort" + "strconv" + + "github.com/knadh/koanf/maps" + "github.com/mitchellh/copystructure" + "github.com/go-viper/mapstructure/v2" +) + +// Koanf is the configuration apparatus. +type Koanf struct { + confMap map[string]interface{} + confMapFlat map[string]interface{} + keyMap KeyMap + conf Conf +} + +// Conf is the Koanf configuration. +type Conf struct { + // Delim is the delimiter to use + // when specifying config key paths, for instance a . for `parent.child.key` + // or a / for `parent/child/key`. + Delim string + + // StrictMerge makes the merging behavior strict. + // Meaning when loading two files that have the same key, + // the first loaded file will define the desired type, and if the second file loads + // a different type will cause an error. + StrictMerge bool +} + +// KeyMap represents a map of flattened delimited keys and the non-delimited +// parts as their slices. For nested keys, the map holds all levels of path combinations. +// For example, the nested structure `parent -> child -> key` will produce the map: +// parent.child.key => [parent, child, key] +// parent.child => [parent, child] +// parent => [parent] +type KeyMap map[string][]string + +// UnmarshalConf represents configuration options used by +// Unmarshal() to unmarshal conf maps into arbitrary structs. +type UnmarshalConf struct { + // Tag is the struct field tag to unmarshal. + // `koanf` is used if left empty. + Tag string + + // If this is set to true, instead of unmarshalling nested structures + // based on the key path, keys are taken literally to unmarshal into + // a flat struct. For example: + // ``` + // type MyStuff struct { + // Child1Name string `koanf:"parent1.child1.name"` + // Child2Name string `koanf:"parent2.child2.name"` + // Type string `koanf:"json"` + // } + // ``` + FlatPaths bool + DecoderConfig *mapstructure.DecoderConfig +} + +// New returns a new instance of Koanf. delim is the delimiter to use +// when specifying config key paths, for instance a . for `parent.child.key` +// or a / for `parent/child/key`. +func New(delim string) *Koanf { + return NewWithConf(Conf{ + Delim: delim, + StrictMerge: false, + }) +} + +// NewWithConf returns a new instance of Koanf based on the Conf. +func NewWithConf(conf Conf) *Koanf { + return &Koanf{ + confMap: make(map[string]interface{}), + confMapFlat: make(map[string]interface{}), + keyMap: make(KeyMap), + conf: conf, + } +} + +// Load takes a Provider that either provides a parsed config map[string]interface{} +// in which case pa (Parser) can be nil, or raw bytes to be parsed, where a Parser +// can be provided to parse. Additionally, options can be passed which modify the +// load behavior, such as passing a custom merge function. +func (ko *Koanf) Load(p Provider, pa Parser, opts ...Option) error { + var ( + mp map[string]interface{} + err error + ) + + if p == nil { + return fmt.Errorf("load received a nil provider") + } + + // No Parser is given. Call the Provider's Read() method to get + // the config map. + if pa == nil { + mp, err = p.Read() + if err != nil { + return err + } + } else { + // There's a Parser. Get raw bytes from the Provider to parse. + b, err := p.ReadBytes() + if err != nil { + return err + } + mp, err = pa.Unmarshal(b) + if err != nil { + return err + } + } + + return ko.merge(mp, newOptions(opts)) +} + +// Keys returns the slice of all flattened keys in the loaded configuration +// sorted alphabetically. +func (ko *Koanf) Keys() []string { + out := make([]string, 0, len(ko.confMapFlat)) + for k := range ko.confMapFlat { + out = append(out, k) + } + sort.Strings(out) + return out +} + +// KeyMap returns a map of flattened keys and the individual parts of the +// key as slices. eg: "parent.child.key" => ["parent", "child", "key"]. +func (ko *Koanf) KeyMap() KeyMap { + out := make(KeyMap, len(ko.keyMap)) + for key, parts := range ko.keyMap { + out[key] = make([]string, len(parts)) + copy(out[key], parts) + } + return out +} + +// All returns a map of all flattened key paths and their values. +// Note that it uses maps.Copy to create a copy that uses +// json.Marshal which changes the numeric types to float64. +func (ko *Koanf) All() map[string]interface{} { + return maps.Copy(ko.confMapFlat) +} + +// Raw returns a copy of the full raw conf map. +// Note that it uses maps.Copy to create a copy that uses +// json.Marshal which changes the numeric types to float64. +func (ko *Koanf) Raw() map[string]interface{} { + return maps.Copy(ko.confMap) +} + +// Sprint returns a key -> value string representation +// of the config map with keys sorted alphabetically. +func (ko *Koanf) Sprint() string { + b := bytes.Buffer{} + for _, k := range ko.Keys() { + b.WriteString(fmt.Sprintf("%s -> %v\n", k, ko.confMapFlat[k])) + } + return b.String() +} + +// Print prints a key -> value string representation +// of the config map with keys sorted alphabetically. +func (ko *Koanf) Print() { + fmt.Print(ko.Sprint()) +} + +// Cut cuts the config map at a given key path into a sub map and +// returns a new Koanf instance with the cut config map loaded. +// For instance, if the loaded config has a path that looks like +// parent.child.sub.a.b, `Cut("parent.child")` returns a new Koanf +// instance with the config map `sub.a.b` where everything above +// `parent.child` are cut out. +func (ko *Koanf) Cut(path string) *Koanf { + out := make(map[string]interface{}) + + // Cut only makes sense if the requested key path is a map. + if v, ok := ko.Get(path).(map[string]interface{}); ok { + out = v + } + + n := New(ko.conf.Delim) + _ = n.merge(out, new(options)) + return n +} + +// Copy returns a copy of the Koanf instance. +func (ko *Koanf) Copy() *Koanf { + return ko.Cut("") +} + +// Merge merges the config map of a given Koanf instance into +// the current instance. +func (ko *Koanf) Merge(in *Koanf) error { + return ko.merge(in.Raw(), new(options)) +} + +// MergeAt merges the config map of a given Koanf instance into +// the current instance as a sub map, at the given key path. +// If all or part of the key path is missing, it will be created. +// If the key path is `""`, this is equivalent to Merge. +func (ko *Koanf) MergeAt(in *Koanf, path string) error { + // No path. Merge the two config maps. + if path == "" { + return ko.Merge(in) + } + + // Unflatten the config map with the given key path. + n := maps.Unflatten(map[string]interface{}{ + path: in.Raw(), + }, ko.conf.Delim) + + return ko.merge(n, new(options)) +} + +// Set sets the value at a specific key. +func (ko *Koanf) Set(key string, val interface{}) error { + // Unflatten the config map with the given key path. + n := maps.Unflatten(map[string]interface{}{ + key: val, + }, ko.conf.Delim) + + return ko.merge(n, new(options)) +} + +// Marshal takes a Parser implementation and marshals the config map into bytes, +// for example, to TOML or JSON bytes. +func (ko *Koanf) Marshal(p Parser) ([]byte, error) { + return p.Marshal(ko.Raw()) +} + +// Unmarshal unmarshals a given key path into the given struct using +// the mapstructure lib. If no path is specified, the whole map is unmarshalled. +// `koanf` is the struct field tag used to match field names. To customize, +// use UnmarshalWithConf(). It uses the mitchellh/mapstructure package. +func (ko *Koanf) Unmarshal(path string, o interface{}) error { + return ko.UnmarshalWithConf(path, o, UnmarshalConf{}) +} + +// UnmarshalWithConf is like Unmarshal but takes configuration params in UnmarshalConf. +// See mitchellh/mapstructure's DecoderConfig for advanced customization +// of the unmarshal behaviour. +func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf) error { + if c.DecoderConfig == nil { + c.DecoderConfig = &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + textUnmarshalerHookFunc()), + Metadata: nil, + Result: o, + WeaklyTypedInput: true, + } + } + + if c.Tag == "" { + c.DecoderConfig.TagName = "koanf" + } else { + c.DecoderConfig.TagName = c.Tag + } + + d, err := mapstructure.NewDecoder(c.DecoderConfig) + if err != nil { + return err + } + + // Unmarshal using flat key paths. + mp := ko.Get(path) + if c.FlatPaths { + if f, ok := mp.(map[string]interface{}); ok { + fmp, _ := maps.Flatten(f, nil, ko.conf.Delim) + mp = fmp + } + } + + return d.Decode(mp) +} + +// Delete removes all nested values from a given path. +// Clears all keys/values if no path is specified. +// Every empty, key on the path, is recursively deleted. +func (ko *Koanf) Delete(path string) { + // No path. Erase the entire map. + if path == "" { + ko.confMap = make(map[string]interface{}) + ko.confMapFlat = make(map[string]interface{}) + ko.keyMap = make(KeyMap) + return + } + + // Does the path exist? + p, ok := ko.keyMap[path] + if !ok { + return + } + maps.Delete(ko.confMap, p) + + // Update the flattened version as well. + ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim) + ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim) +} + +// Get returns the raw, uncast interface{} value of a given key path +// in the config map. If the key path does not exist, nil is returned. +func (ko *Koanf) Get(path string) interface{} { + // No path. Return the whole conf map. + if path == "" { + return ko.Raw() + } + + // Does the path exist? + p, ok := ko.keyMap[path] + if !ok { + return nil + } + res := maps.Search(ko.confMap, p) + + // Non-reference types are okay to return directly. + // Other types are "copied" with maps.Copy or json.Marshal + // that change the numeric types to float64. + + switch v := res.(type) { + case int, int8, int16, int32, int64, float32, float64, string, bool: + return v + case map[string]interface{}: + return maps.Copy(v) + } + + out, _ := copystructure.Copy(&res) + if ptrOut, ok := out.(*interface{}); ok { + return *ptrOut + } + return out +} + +// Slices returns a list of Koanf instances constructed out of a +// []map[string]interface{} interface at the given path. +func (ko *Koanf) Slices(path string) []*Koanf { + out := []*Koanf{} + if path == "" { + return out + } + + // Does the path exist? + sl, ok := ko.Get(path).([]interface{}) + if !ok { + return out + } + + for _, s := range sl { + mp, ok := s.(map[string]interface{}) + if !ok { + continue + } + + k := New(ko.conf.Delim) + _ = k.merge(mp, new(options)) + out = append(out, k) + } + + return out +} + +// Exists returns true if the given key path exists in the conf map. +func (ko *Koanf) Exists(path string) bool { + _, ok := ko.keyMap[path] + return ok +} + +// MapKeys returns a sorted string list of keys in a map addressed by the +// given path. If the path is not a map, an empty string slice is +// returned. +func (ko *Koanf) MapKeys(path string) []string { + var ( + out = []string{} + o = ko.Get(path) + ) + if o == nil { + return out + } + + mp, ok := o.(map[string]interface{}) + if !ok { + return out + } + out = make([]string, 0, len(mp)) + for k := range mp { + out = append(out, k) + } + sort.Strings(out) + return out +} + +// Delim returns delimiter in used by this instance of Koanf. +func (ko *Koanf) Delim() string { + return ko.conf.Delim +} + +func (ko *Koanf) merge(c map[string]interface{}, opts *options) error { + maps.IntfaceKeysToStrings(c) + if opts.merge != nil { + if err := opts.merge(c, ko.confMap); err != nil { + return err + } + } else if ko.conf.StrictMerge { + if err := maps.MergeStrict(c, ko.confMap); err != nil { + return err + } + } else { + maps.Merge(c, ko.confMap) + } + + // Maintain a flattened version as well. + ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim) + ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim) + + return nil +} + +// toInt64 takes an interface value and if it is an integer type, +// converts and returns int64. If it's any other type, +// forces it to a string and attempts to do a strconv.Atoi +// to get an integer out. +func toInt64(v interface{}) (int64, error) { + switch i := v.(type) { + case int: + return int64(i), nil + case int8: + return int64(i), nil + case int16: + return int64(i), nil + case int32: + return int64(i), nil + case int64: + return i, nil + } + + // Force it to a string and try to convert. + f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64) + if err != nil { + return 0, err + } + + return int64(f), nil +} + +// toInt64 takes a `v interface{}` value and if it is a float type, +// converts and returns a `float64`. If it's any other type, forces it to a +// string and attempts to get a float out using `strconv.ParseFloat`. +func toFloat64(v interface{}) (float64, error) { + switch i := v.(type) { + case float32: + return float64(i), nil + case float64: + return i, nil + } + + // Force it to a string and try to convert. + f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64) + if err != nil { + return f, err + } + + return f, nil +} + +// toBool takes an interface value and if it is a bool type, +// returns it. If it's any other type, forces it to a string and attempts +// to parse it as a bool using strconv.ParseBool. +func toBool(v interface{}) (bool, error) { + if b, ok := v.(bool); ok { + return b, nil + } + + // Force it to a string and try to convert. + b, err := strconv.ParseBool(fmt.Sprintf("%v", v)) + if err != nil { + return b, err + } + return b, nil +} + +// populateKeyParts iterates a key map and generates all possible +// traversal paths. For instance, `parent.child.key` generates +// `parent`, and `parent.child`. +func populateKeyParts(m KeyMap, delim string) KeyMap { + out := make(KeyMap, len(m)) // The size of the result is at very least same to KeyMap + for _, parts := range m { + // parts is a slice of [parent, child, key] + var nk string + + for i := range parts { + if i == 0 { + // On first iteration only use first part + nk = parts[i] + } else { + // If nk already contains a part (e.g. `parent`) append delim + `child` + nk += delim + parts[i] + } + if _, ok := out[nk]; ok { + continue + } + out[nk] = make([]string, i+1) + copy(out[nk], parts[0:i+1]) + } + } + return out +} + +// textUnmarshalerHookFunc is a fixed version of mapstructure.TextUnmarshallerHookFunc. +// This hook allows to additionally unmarshal text into custom string types that implement the encoding.Text(Un)Marshaler interface(s). +func textUnmarshalerHookFunc() mapstructure.DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + + // default text representation is the actual value of the `from` string + var ( + dataVal = reflect.ValueOf(data) + text = []byte(dataVal.String()) + ) + if f.Kind() == t.Kind() { + // source and target are of underlying type string + var ( + err error + ptrVal = reflect.New(dataVal.Type()) + ) + if !ptrVal.Elem().CanSet() { + // cannot set, skip, this should not happen + if err := unmarshaller.UnmarshalText(text); err != nil { + return nil, err + } + return result, nil + } + ptrVal.Elem().Set(dataVal) + + // We need to assert that both, the value type and the pointer type + // do (not) implement the TextMarshaller interface before proceeding and simply + // using the string value of the string type. + // it might be the case that the internal string representation differs from + // the (un)marshalled string. + + for _, v := range []reflect.Value{dataVal, ptrVal} { + if marshaller, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err = marshaller.MarshalText() + if err != nil { + return nil, err + } + break + } + } + } + + // text is either the source string's value or the source string type's marshaled value + // which may differ from its internal string value. + if err := unmarshaller.UnmarshalText(text); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/knadh/koanf/v2/options.go b/vendor/github.com/knadh/koanf/v2/options.go new file mode 100644 index 0000000000..63cea203e6 --- /dev/null +++ b/vendor/github.com/knadh/koanf/v2/options.go @@ -0,0 +1,33 @@ +package koanf + +// options contains options to modify the behavior of Koanf.Load. +type options struct { + merge func(a, b map[string]interface{}) error +} + +// newOptions creates a new options instance. +func newOptions(opts []Option) *options { + o := new(options) + o.apply(opts) + return o +} + +// Option is a generic type used to modify the behavior of Koanf.Load. +type Option func(*options) + +// apply the given options. +func (o *options) apply(opts []Option) { + for _, opt := range opts { + opt(o) + } +} + +// WithMergeFunc is an option to modify the merge behavior of Koanf.Load. +// If unset, the default merge function is used. +// +// The merge function is expected to merge map src into dest (left to right). +func WithMergeFunc(merge func(src, dest map[string]interface{}) error) Option { + return func(o *options) { + o.merge = merge + } +} diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 9831c37baf..0e42858aed 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -86,7 +86,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://linuxcontainers.org/incus/ * https://ifconfig.es * https://github.com/zmap/zdns - +* https://framagit.org/bortzmeyer/check-soa Send pull request if you want to be listed here. @@ -193,6 +193,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 9460 - Service Binding and Parameter Specification via the DNS * 9461 - Service Binding Mapping for DNS Servers * 9462 - Discovery of Designated Resolvers +* 9460 - SVCB and HTTPS Records +* 9606 - DNS Resolver Information +* Draft - Compact Denial of Existence in DNSSEC ## Loosely Based Upon diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 0447fd826a..91793b9069 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -27,6 +27,7 @@ const ( EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) _DO = 1 << 15 // DNSSEC OK + _CO = 1 << 14 // Compact Answers OK ) // makeDataOpt is used to unpack the EDNS0 option(s) from a message. @@ -75,7 +76,11 @@ type OPT struct { func (rr *OPT) String() string { s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " if rr.Do() { - s += "flags: do; " + if rr.Co() { + s += "flags: do, co; " + } else { + s += "flags: do; " + } } else { s += "flags:; " } @@ -195,14 +200,34 @@ func (rr *OPT) SetDo(do ...bool) { } } -// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used. +// Co returns the value of the CO (Compact Answers OK) bit. +func (rr *OPT) Co() bool { + return rr.Hdr.Ttl&_CO == _CO +} + +// SetCo sets the CO (Compact Answers OK) bit. +// If we pass an argument, set the CO bit to that value. +// It is possible to pass 2 or more arguments, but they will be ignored. +func (rr *OPT) SetCo(co ...bool) { + if len(co) == 1 { + if co[0] { + rr.Hdr.Ttl |= _CO + } else { + rr.Hdr.Ttl &^= _CO + } + } else { + rr.Hdr.Ttl |= _CO + } +} + +// Z returns the Z part of the OPT RR as a uint16 with only the 14 least significant bits used. func (rr *OPT) Z() uint16 { - return uint16(rr.Hdr.Ttl & 0x7FFF) + return uint16(rr.Hdr.Ttl & 0x3FFF) } -// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used. +// SetZ sets the Z part of the OPT RR, note only the 14 least significant bits of z are used. func (rr *OPT) SetZ(z uint16) { - rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF) + rr.Hdr.Ttl = rr.Hdr.Ttl&^0x3FFF | uint32(z&0x3FFF) } // EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index e26e8027a4..fa8a332eda 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -108,6 +108,8 @@ type ttlState struct { // origin for resolving relative domain names defaults to the DNS root (.). // Full zone file syntax is supported, including directives like $TTL and $ORIGIN. // All fields of the returned RR are set from the read data, except RR.Header().Rdlength which is set to 0. +// Is you need a partial resource record with no rdata - for instance - for dynamic updates, see the [ANY] +// documentation. func NewRR(s string) (RR, error) { if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline return ReadRR(strings.NewReader(s+"\n"), "") diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index c1a76995e7..ac885f66fe 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -1620,6 +1620,16 @@ func (rr *NINFO) parse(c *zlexer, o string) *ParseError { return nil } +// Uses the same format as TXT +func (rr *RESINFO) parse(c *zlexer, o string) *ParseError { + s, e := endingToTxtSlice(c, "bad RESINFO Resinfo") + if e != nil { + return e + } + rr.Txt = s + return nil +} + func (rr *URI) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index 310c7d11f5..d1baeea992 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -214,11 +214,7 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { } } -// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08). -// -// NOTE: The HTTPS/SVCB RFCs are in the draft stage. -// The API, including constants and types related to SVCBKeyValues, may -// change in future versions in accordance with the latest drafts. +// SVCB RR. See RFC 9460. type SVCB struct { Hdr RR_Header Priority uint16 // If zero, Value must be empty or discarded by the user of this library @@ -226,12 +222,8 @@ type SVCB struct { Value []SVCBKeyValue `dns:"pairs"` } -// HTTPS RR. Everything valid for SVCB applies to HTTPS as well. +// HTTPS RR. See RFC 9460. Everything valid for SVCB applies to HTTPS as well. // Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols. -// -// NOTE: The HTTPS/SVCB RFCs are in the draft stage. -// The API, including constants and types related to SVCBKeyValues, may -// change in future versions in accordance with the latest drafts. type HTTPS struct { SVCB } diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 7a34c14ca0..e39cf2fecf 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -101,6 +101,7 @@ const ( TypeCAA uint16 = 257 TypeAVC uint16 = 258 TypeAMTRELAY uint16 = 260 + TypeRESINFO uint16 = 261 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 @@ -267,11 +268,20 @@ func (q *Question) String() (s string) { return s } -// ANY is a wild card record. See RFC 1035, Section 3.2.3. ANY -// is named "*" there. +// ANY is a wild card record. See RFC 1035, Section 3.2.3. ANY is named "*" there. +// The ANY records can be (ab)used to create resource records without any rdata, that +// can be used in dynamic update requests. Basic use pattern: +// +// a := &ANY{RR_Header{ +// Name: "example.org.", +// Rrtype: TypeA, +// Class: ClassINET, +// }} +// +// Results in an A record without rdata. type ANY struct { Hdr RR_Header - // Does not have any rdata + // Does not have any rdata. } func (rr *ANY) String() string { return rr.Hdr.String() } @@ -1508,6 +1518,15 @@ func (rr *ZONEMD) String() string { " " + rr.Digest } +// RESINFO RR. See RFC 9606. + +type RESINFO struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *RESINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + // APL RR. See RFC 3123. type APL struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go index c018ad43da..d226718595 100644 --- a/vendor/github.com/miekg/dns/udp.go +++ b/vendor/github.com/miekg/dns/udp.go @@ -1,5 +1,5 @@ -//go:build !windows -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package dns diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_no_control.go similarity index 85% rename from vendor/github.com/miekg/dns/udp_windows.go rename to vendor/github.com/miekg/dns/udp_no_control.go index a259b67e4d..ca3d4a633b 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_no_control.go @@ -1,9 +1,11 @@ -//go:build windows -// +build windows +//go:build windows || darwin +// +build windows darwin // TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and // go.dev/issue/7174 are ever fixed. +// NOTICE(stek29): darwin supports PKTINFO in sendmsg, but it unbinds sockets, see https://github.com/miekg/dns/issues/724 + package dns import "net" diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go index 16f9ee85a5..2fef1461f5 100644 --- a/vendor/github.com/miekg/dns/update.go +++ b/vendor/github.com/miekg/dns/update.go @@ -2,6 +2,7 @@ package dns // NameUsed sets the RRs in the prereq section to // "Name is in use" RRs. RFC 2136 section 2.4.4. +// See [ANY] on how to make RRs without rdata. func (u *Msg) NameUsed(rr []RR) { if u.Answer == nil { u.Answer = make([]RR, 0, len(rr)) @@ -41,6 +42,7 @@ func (u *Msg) Used(rr []RR) { // RRsetUsed sets the RRs in the prereq section to // "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +// See [ANY] on how to make RRs without rdata. func (u *Msg) RRsetUsed(rr []RR) { if u.Answer == nil { u.Answer = make([]RR, 0, len(rr)) @@ -53,6 +55,7 @@ func (u *Msg) RRsetUsed(rr []RR) { // RRsetNotUsed sets the RRs in the prereq section to // "RRset does not exist" RRs. RFC 2136 section 2.4.3. +// See [ANY] on how to make RRs without rdata. func (u *Msg) RRsetNotUsed(rr []RR) { if u.Answer == nil { u.Answer = make([]RR, 0, len(rr)) @@ -64,6 +67,7 @@ func (u *Msg) RRsetNotUsed(rr []RR) { } // Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +// See [ANY] on how to make RRs without rdata. func (u *Msg) Insert(rr []RR) { if len(u.Question) == 0 { panic("dns: empty question section") @@ -78,6 +82,7 @@ func (u *Msg) Insert(rr []RR) { } // RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +// See [ANY] on how to make RRs without rdata. func (u *Msg) RemoveRRset(rr []RR) { if u.Ns == nil { u.Ns = make([]RR, 0, len(rr)) @@ -89,6 +94,7 @@ func (u *Msg) RemoveRRset(rr []RR) { } // RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +// See [ANY] on how to make RRs without rdata. func (u *Msg) RemoveName(rr []RR) { if u.Ns == nil { u.Ns = make([]RR, 0, len(rr)) @@ -99,6 +105,7 @@ func (u *Msg) RemoveName(rr []RR) { } // Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +// See [ANY] on how to make RRs without rdata. func (u *Msg) Remove(rr []RR) { if u.Ns == nil { u.Ns = make([]RR, 0, len(rr)) diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index e290e3dff7..73e34edc31 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 63} +var Version = v{1, 1, 65} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 330c05395f..ebd9e02970 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -957,6 +957,23 @@ func (r1 *PX) isDuplicate(_r2 RR) bool { return true } +func (r1 *RESINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RESINFO) + if !ok { + return false + } + _ = r2 + if len(r1.Txt) != len(r2.Txt) { + return false + } + for i := 0; i < len(r1.Txt); i++ { + if r1.Txt[i] != r2.Txt[i] { + return false + } + } + return true +} + func (r1 *RFC3597) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*RFC3597) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 5a6cf4c6ad..cc09810fb1 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -762,6 +762,14 @@ func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress boo return off, nil } +func (rr *RESINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringHex(rr.Rdata, msg, off) if err != nil { @@ -2353,6 +2361,17 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *RESINFO) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 11f13ecf9c..cea79ae772 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -66,6 +66,7 @@ var TypeToRR = map[uint16]func() RR{ TypeOPT: func() RR { return new(OPT) }, TypePTR: func() RR { return new(PTR) }, TypePX: func() RR { return new(PX) }, + TypeRESINFO: func() RR { return new(RESINFO) }, TypeRKEY: func() RR { return new(RKEY) }, TypeRP: func() RR { return new(RP) }, TypeRRSIG: func() RR { return new(RRSIG) }, @@ -154,6 +155,7 @@ var TypeToString = map[uint16]string{ TypeOPT: "OPT", TypePTR: "PTR", TypePX: "PX", + TypeRESINFO: "RESINFO", TypeRKEY: "RKEY", TypeRP: "RP", TypeRRSIG: "RRSIG", @@ -238,6 +240,7 @@ func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr } func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RESINFO) Header() *RR_Header { return &rr.Hdr } func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RP) Header() *RR_Header { return &rr.Hdr } @@ -622,6 +625,14 @@ func (rr *PX) len(off int, compression map[string]struct{}) int { return l } +func (rr *RESINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} + func (rr *RFC3597) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Rdata) / 2 @@ -1148,6 +1159,10 @@ func (rr *PX) copy() RR { } } +func (rr *RESINFO) copy() RR { + return &RESINFO{rr.Hdr, cloneSlice(rr.Txt)} +} + func (rr *RFC3597) copy() RR { return &RFC3597{rr.Hdr, rr.Rdata} } diff --git a/vendor/github.com/oklog/ulid/v2/.gitignore b/vendor/github.com/oklog/ulid/v2/.gitignore new file mode 100644 index 0000000000..c92c4d5608 --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/.gitignore @@ -0,0 +1,29 @@ +#### joe made this: http://goel.io/joe + +#####=== Go ===##### + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + diff --git a/vendor/github.com/oklog/ulid/v2/AUTHORS.md b/vendor/github.com/oklog/ulid/v2/AUTHORS.md new file mode 100644 index 0000000000..95581c78b0 --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/AUTHORS.md @@ -0,0 +1,2 @@ +- Peter Bourgon (@peterbourgon) +- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/v2/CHANGELOG.md b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md new file mode 100644 index 0000000000..8da38c6b00 --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md @@ -0,0 +1,33 @@ +## 1.3.1 / 2018-10-02 + +* Use underlying entropy source for random increments in Monotonic (#32) + +## 1.3.0 / 2018-09-29 + +* Monotonic entropy support (#31) + +## 1.2.0 / 2018-09-09 + +* Add a function to convert Unix time in milliseconds back to time.Time (#30) + +## 1.1.0 / 2018-08-15 + +* Ensure random part is always read from the entropy reader in full (#28) + +## 1.0.0 / 2018-07-29 + +* Add ParseStrict and MustParseStrict functions (#26) +* Enforce overflow checking when parsing (#20) + +## 0.3.0 / 2017-01-03 + +* Implement ULID.Compare method + +## 0.2.0 / 2016-12-13 + +* Remove year 2262 Timestamp bug. (#1) +* Gracefully handle invalid encodings when parsing. + +## 0.1.0 / 2016-12-06 + +* First ULID release diff --git a/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..68f03f26eb --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +We use GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first propose your ideas + in a Github issue. This will avoid unnecessary work and surely give + you and us a good deal of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/v2/LICENSE b/vendor/github.com/oklog/ulid/v2/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/ulid/v2/README.md b/vendor/github.com/oklog/ulid/v2/README.md new file mode 100644 index 0000000000..c0094ce881 --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/README.md @@ -0,0 +1,234 @@ +# Universally Unique Lexicographically Sortable Identifier + +[![Project status](https://img.shields.io/github/release/oklog/ulid.svg?style=flat-square)](https://github.com/oklog/ulid/releases/latest) +![Build Status](https://github.com/oklog/ulid/actions/workflows/test.yml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) +[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/oklog/ulid/v2) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) + +A Go port of [ulid/javascript](https://github.com/ulid/javascript) with binary format implemented. + +## Background + +A GUID/UUID can be suboptimal for many use-cases because: + +- It isn't the most character efficient way of encoding 128 bits +- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address +- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures +- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures + +A ULID however: + +- Is compatible with UUID/GUID's +- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) +- Lexicographically sortable +- Canonically encoded as a 26 character string, as opposed to the 36 character UUID +- Uses Crockford's base32 for better efficiency and readability (5 bits per character) +- Case insensitive +- No special characters (URL safe) +- Monotonic sort order (correctly detects and handles the same millisecond) + +## Install + +This package requires Go modules. + +```shell +go get github.com/oklog/ulid/v2 +``` + +## Usage + +ULIDs are constructed from two things: a timestamp with millisecond precision, +and some random data. + +Timestamps are modeled as uint64 values representing a Unix time in milliseconds. +They can be produced by passing a [time.Time](https://pkg.go.dev/time#Time) to +[ulid.Timestamp](https://pkg.go.dev/github.com/oklog/ulid/v2#Timestamp), +or by calling [time.Time.UnixMilli](https://pkg.go.dev/time#Time.UnixMilli) +and converting the returned value to `uint64`. + +Random data is taken from a provided [io.Reader](https://pkg.go.dev/io#Reader). +This design allows for greater flexibility when choosing trade-offs, but can be +a bit confusing to newcomers. + +If you just want to generate a ULID and don't (yet) care about details like +performance, cryptographic security, monotonicity, etc., use the +[ulid.Make](https://pkg.go.dev/github.com/oklog/ulid/v2#Make) helper function. +This function calls [time.Now](https://pkg.go.dev/time#Now) to get a timestamp, +and uses a source of entropy which is process-global, +[pseudo-random](https://pkg.go.dev/math/rand)), and +[monotonic](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicReader)). + +```go +println(ulid.Make()) +// 01G65Z755AFWAKHE12NY0CQ9FH +``` + +More advanced use cases should utilize +[ulid.New](https://pkg.go.dev/github.com/oklog/ulid/v2#New). + +```go +entropy := rand.New(rand.NewSource(time.Now().UnixNano())) +ms := ulid.Timestamp(time.Now()) +println(ulid.New(ms, entropy)) +// 01G65Z755AFWAKHE12NY0CQ9FH +``` + +Care should be taken when providing a source of entropy. + +The above example utilizes [math/rand.Rand](https://pkg.go.dev/math/rand#Rand), +which is not safe for concurrent use by multiple goroutines. Consider +alternatives such as +[x/exp/rand](https://pkg.go.dev/golang.org/x/exp/rand#LockedSource). +Security-sensitive use cases should always use cryptographically secure entropy +provided by [crypto/rand](https://pkg.go.dev/crypto/rand). + +Performance-sensitive use cases should avoid synchronization when generating +IDs. One option is to use a unique source of entropy for each concurrent +goroutine, which results in no lock contention, but cannot provide strong +guarantees about the random data, and does not provide monotonicity within a +given millisecond. One common performance optimization is to pool sources of +entropy using a [sync.Pool](https://pkg.go.dev/sync#Pool). + +Monotonicity is a property that says each ULID is "bigger than" the previous +one. ULIDs are automatically monotonic, but only to millisecond precision. ULIDs +generated within the same millisecond are ordered by their random component, +which means they are by default un-ordered. You can use +[ulid.MonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#MonotonicEntropy) or +[ulid.LockedMonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicEntropy) +to create ULIDs that are monotonic within a given millisecond, with caveats. See +the documentation for details. + +If you don't care about time-based ordering of generated IDs, then there's no +reason to use ULIDs! There are many other kinds of IDs that are easier, faster, +smaller, etc. Consider UUIDs. + +## Commandline tool + +This repo also provides a tool to generate and parse ULIDs at the command line. +These commands should install the latest version of the tool at `bin/ulid`: + +```shell +cd $(mktemp -d) +env GOPATH=$(pwd) GO111MODULE=on go get -v github.com/oklog/ulid/v2/cmd/ulid +``` + +Usage: + +```shell +Usage: ulid [-hlqz] [-f ] [parameters ...] + -f, --format= when parsing, show times in this format: default, rfc3339, unix, ms + -h, --help print this help text + -l, --local when parsing, show local time instead of UTC + -q, --quick when generating, use non-crypto-grade entropy + -z, --zero when generating, fix entropy to all-zeroes +``` + +Examples: + +```shell +$ ulid +01D78XYFJ1PRM1WPBCBT3VHMNV +$ ulid -z +01D78XZ44G0000000000000000 +$ ulid 01D78XZ44G0000000000000000 +Sun Mar 31 03:51:23.536 UTC 2019 +$ ulid --format=rfc3339 --local 01D78XZ44G0000000000000000 +2019-03-31T05:51:23.536+02:00 +``` + +## Specification + +Below is the current specification of ULID as implemented in this repository. + +### Components + +**Timestamp** +- 48 bits +- UNIX-time in milliseconds +- Won't run out of space till the year 10889 AD + +**Entropy** +- 80 bits +- User defined entropy source. +- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) + +### Encoding + +[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. +This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. + +``` +0123456789ABCDEFGHJKMNPQRSTVWXYZ +``` + +### Binary Layout and Byte Order + +The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). + +``` +0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_time_high | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 16_bit_uint_time_low | 16_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +| 32_bit_uint_random | ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +``` + +### String Representation + +``` + 01AN4Z07BY 79KA1307SR9X4MV3 +|----------| |----------------| + Timestamp Entropy + 10 chars 16 chars + 48bits 80bits + base32 base32 +``` + +## Test + +```shell +go test ./... +``` + +## Benchmarks + +On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 + +``` +BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op +BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op +BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op +BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op +BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op +BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op +BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op +BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op +BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op +BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op +BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op +BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op +BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op +BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op +BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op +BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op +``` + +## Prior Art + +- [ulid/javascript](https://github.com/ulid/javascript) +- [RobThree/NUlid](https://github.com/RobThree/NUlid) +- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/v2/ulid.go b/vendor/github.com/oklog/ulid/v2/ulid.go new file mode 100644 index 0000000000..0cb258d431 --- /dev/null +++ b/vendor/github.com/oklog/ulid/v2/ulid.go @@ -0,0 +1,696 @@ +// Copyright 2016 The Oklog Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ulid + +import ( + "bufio" + "bytes" + "database/sql/driver" + "encoding/binary" + "errors" + "io" + "math" + "math/bits" + "math/rand" + "sync" + "time" +) + +/* +An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier + + The components are encoded as 16 octets. + Each component is encoded with the MSB first (network byte order). + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 16_bit_uint_time_low | 16_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | 32_bit_uint_random | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ +type ULID [16]byte + +var ( + // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong + // data size. + ErrDataSize = errors.New("ulid: bad data size when unmarshaling") + + // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with + // invalid Base32 encodings. + ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") + + // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient + // size. + ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") + + // ErrBigTime is returned when constructing an ULID with a time that is larger + // than MaxTime. + ErrBigTime = errors.New("ulid: time too big") + + // ErrOverflow is returned when unmarshaling a ULID whose first character is + // larger than 7, thereby exceeding the valid bit depth of 128. + ErrOverflow = errors.New("ulid: overflow when unmarshaling") + + // ErrMonotonicOverflow is returned by a Monotonic entropy source when + // incrementing the previous ULID's entropy bytes would result in overflow. + ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") + + // ErrScanValue is returned when the value passed to scan cannot be unmarshaled + // into the ULID. + ErrScanValue = errors.New("ulid: source value must be a string or byte slice") +) + +// MonotonicReader is an interface that should yield monotonically increasing +// entropy into the provided slice for all calls with the same ms parameter. If +// a MonotonicReader is provided to the New constructor, its MonotonicRead +// method will be used instead of Read. +type MonotonicReader interface { + io.Reader + MonotonicRead(ms uint64, p []byte) error +} + +// New returns an ULID with the given Unix milliseconds timestamp and an +// optional entropy source. Use the Timestamp function to convert +// a time.Time to Unix milliseconds. +// +// ErrBigTime is returned when passing a timestamp bigger than MaxTime. +// Reading from the entropy source may also return an error. +// +// Safety for concurrent use is only dependent on the safety of the +// entropy source. +func New(ms uint64, entropy io.Reader) (id ULID, err error) { + if err = id.SetTime(ms); err != nil { + return id, err + } + + switch e := entropy.(type) { + case nil: + return id, err + case MonotonicReader: + err = e.MonotonicRead(ms, id[6:]) + default: + _, err = io.ReadFull(e, id[6:]) + } + + return id, err +} + +// MustNew is a convenience function equivalent to New that panics on failure +// instead of returning an error. +func MustNew(ms uint64, entropy io.Reader) ULID { + id, err := New(ms, entropy) + if err != nil { + panic(err) + } + return id +} + +var ( + entropy io.Reader + entropyOnce sync.Once +) + +// DefaultEntropy returns a thread-safe per process monotonically increasing +// entropy source. +func DefaultEntropy() io.Reader { + entropyOnce.Do(func() { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + entropy = &LockedMonotonicReader{ + MonotonicReader: Monotonic(rng, 0), + } + }) + return entropy +} + +// Make returns an ULID with the current time in Unix milliseconds and +// monotonically increasing entropy for the same millisecond. +// It is safe for concurrent use, leveraging a sync.Pool underneath for minimal +// contention. +func Make() (id ULID) { + // NOTE: MustNew can't panic since DefaultEntropy never returns an error. + return MustNew(Now(), DefaultEntropy()) +} + +// Parse parses an encoded ULID, returning an error in case of failure. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. For a version that +// returns an error instead, see ParseStrict. +func Parse(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), false, &id) +} + +// ParseStrict parses an encoded ULID, returning an error in case of failure. +// +// It is like Parse, but additionally validates that the parsed ULID consists +// only of valid base32 characters. It is slightly slower than Parse. +// +// ErrDataSize is returned if the len(ulid) is different from an encoded +// ULID's length. Invalid encodings return ErrInvalidCharacters. +func ParseStrict(ulid string) (id ULID, err error) { + return id, parse([]byte(ulid), true, &id) +} + +func parse(v []byte, strict bool, id *ULID) error { + // Check if a base32 encoded ULID is the right length. + if len(v) != EncodedSize { + return ErrDataSize + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if strict && + (dec[v[0]] == 0xFF || + dec[v[1]] == 0xFF || + dec[v[2]] == 0xFF || + dec[v[3]] == 0xFF || + dec[v[4]] == 0xFF || + dec[v[5]] == 0xFF || + dec[v[6]] == 0xFF || + dec[v[7]] == 0xFF || + dec[v[8]] == 0xFF || + dec[v[9]] == 0xFF || + dec[v[10]] == 0xFF || + dec[v[11]] == 0xFF || + dec[v[12]] == 0xFF || + dec[v[13]] == 0xFF || + dec[v[14]] == 0xFF || + dec[v[15]] == 0xFF || + dec[v[16]] == 0xFF || + dec[v[17]] == 0xFF || + dec[v[18]] == 0xFF || + dec[v[19]] == 0xFF || + dec[v[20]] == 0xFF || + dec[v[21]] == 0xFF || + dec[v[22]] == 0xFF || + dec[v[23]] == 0xFF || + dec[v[24]] == 0xFF || + dec[v[25]] == 0xFF) { + return ErrInvalidCharacters + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if v[0] > '7' { + return ErrOverflow + } + + // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) + // to decode a base32 ULID. + + // 6 bytes timestamp (48 bits) + (*id)[0] = (dec[v[0]] << 5) | dec[v[1]] + (*id)[1] = (dec[v[2]] << 3) | (dec[v[3]] >> 2) + (*id)[2] = (dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4) + (*id)[3] = (dec[v[5]] << 4) | (dec[v[6]] >> 1) + (*id)[4] = (dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3) + (*id)[5] = (dec[v[8]] << 5) | dec[v[9]] + + // 10 bytes of entropy (80 bits) + (*id)[6] = (dec[v[10]] << 3) | (dec[v[11]] >> 2) + (*id)[7] = (dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4) + (*id)[8] = (dec[v[13]] << 4) | (dec[v[14]] >> 1) + (*id)[9] = (dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3) + (*id)[10] = (dec[v[16]] << 5) | dec[v[17]] + (*id)[11] = (dec[v[18]] << 3) | dec[v[19]]>>2 + (*id)[12] = (dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4) + (*id)[13] = (dec[v[21]] << 4) | (dec[v[22]] >> 1) + (*id)[14] = (dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3) + (*id)[15] = (dec[v[24]] << 5) | dec[v[25]] + + return nil +} + +// MustParse is a convenience function equivalent to Parse that panics on failure +// instead of returning an error. +func MustParse(ulid string) ULID { + id, err := Parse(ulid) + if err != nil { + panic(err) + } + return id +} + +// MustParseStrict is a convenience function equivalent to ParseStrict that +// panics on failure instead of returning an error. +func MustParseStrict(ulid string) ULID { + id, err := ParseStrict(ulid) + if err != nil { + panic(err) + } + return id +} + +// Bytes returns bytes slice representation of ULID. +func (id ULID) Bytes() []byte { + return id[:] +} + +// String returns a lexicographically sortable string encoded ULID +// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3. +// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy. +func (id ULID) String() string { + ulid := make([]byte, EncodedSize) + _ = id.MarshalTextTo(ulid) + return string(ulid) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface by +// returning the ULID as a byte slice. +func (id ULID) MarshalBinary() ([]byte, error) { + ulid := make([]byte, len(id)) + return ulid, id.MarshalBinaryTo(ulid) +} + +// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. +// ErrBufferSize is returned when the len(dst) != 16. +func (id ULID) MarshalBinaryTo(dst []byte) error { + if len(dst) != len(id) { + return ErrBufferSize + } + + copy(dst, id[:]) + return nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by +// copying the passed data and converting it to an ULID. ErrDataSize is +// returned if the data length is different from ULID length. +func (id *ULID) UnmarshalBinary(data []byte) error { + if len(data) != len(*id) { + return ErrDataSize + } + + copy((*id)[:], data) + return nil +} + +// Encoding is the base 32 encoding alphabet used in ULID strings. +const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" + +// MarshalText implements the encoding.TextMarshaler interface by +// returning the string encoded ULID. +func (id ULID) MarshalText() ([]byte, error) { + ulid := make([]byte, EncodedSize) + return ulid, id.MarshalTextTo(ulid) +} + +// MarshalTextTo writes the ULID as a string to the given buffer. +// ErrBufferSize is returned when the len(dst) != 26. +func (id ULID) MarshalTextTo(dst []byte) error { + // Optimized unrolled loop ahead. + // From https://github.com/RobThree/NUlid + + if len(dst) != EncodedSize { + return ErrBufferSize + } + + // 10 byte timestamp + dst[0] = Encoding[(id[0]&224)>>5] + dst[1] = Encoding[id[0]&31] + dst[2] = Encoding[(id[1]&248)>>3] + dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] + dst[4] = Encoding[(id[2]&62)>>1] + dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] + dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] + dst[7] = Encoding[(id[4]&124)>>2] + dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] + dst[9] = Encoding[id[5]&31] + + // 16 bytes of entropy + dst[10] = Encoding[(id[6]&248)>>3] + dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] + dst[12] = Encoding[(id[7]&62)>>1] + dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] + dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] + dst[15] = Encoding[(id[9]&124)>>2] + dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] + dst[17] = Encoding[id[10]&31] + dst[18] = Encoding[(id[11]&248)>>3] + dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] + dst[20] = Encoding[(id[12]&62)>>1] + dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] + dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] + dst[23] = Encoding[(id[14]&124)>>2] + dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] + dst[25] = Encoding[id[15]&31] + + return nil +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var dec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const EncodedSize = 26 + +// UnmarshalText implements the encoding.TextUnmarshaler interface by +// parsing the data as string encoded ULID. +// +// ErrDataSize is returned if the len(v) is different from an encoded +// ULID's length. Invalid encodings produce undefined ULIDs. +func (id *ULID) UnmarshalText(v []byte) error { + return parse(v, false, id) +} + +// Time returns the Unix time in milliseconds encoded in the ULID. +// Use the top level Time function to convert the returned value to +// a time.Time. +func (id ULID) Time() uint64 { + return uint64(id[5]) | uint64(id[4])<<8 | + uint64(id[3])<<16 | uint64(id[2])<<24 | + uint64(id[1])<<32 | uint64(id[0])<<40 +} + +// maxTime is the maximum Unix time in milliseconds that can be +// represented in an ULID. +var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() + +// MaxTime returns the maximum Unix time in milliseconds that +// can be encoded in an ULID. +func MaxTime() uint64 { return maxTime } + +// Now is a convenience function that returns the current +// UTC time in Unix milliseconds. Equivalent to: +// Timestamp(time.Now().UTC()) +func Now() uint64 { return Timestamp(time.Now().UTC()) } + +// Timestamp converts a time.Time to Unix milliseconds. +// +// Because of the way ULID stores time, times from the year +// 10889 produces undefined results. +func Timestamp(t time.Time) uint64 { + return uint64(t.Unix())*1000 + + uint64(t.Nanosecond()/int(time.Millisecond)) +} + +// Time converts Unix milliseconds in the format +// returned by the Timestamp function to a time.Time. +func Time(ms uint64) time.Time { + s := int64(ms / 1e3) + ns := int64((ms % 1e3) * 1e6) + return time.Unix(s, ns) +} + +// SetTime sets the time component of the ULID to the given Unix time +// in milliseconds. +func (id *ULID) SetTime(ms uint64) error { + if ms > maxTime { + return ErrBigTime + } + + (*id)[0] = byte(ms >> 40) + (*id)[1] = byte(ms >> 32) + (*id)[2] = byte(ms >> 24) + (*id)[3] = byte(ms >> 16) + (*id)[4] = byte(ms >> 8) + (*id)[5] = byte(ms) + + return nil +} + +// Entropy returns the entropy from the ULID. +func (id ULID) Entropy() []byte { + e := make([]byte, 10) + copy(e, id[6:]) + return e +} + +// SetEntropy sets the ULID entropy to the passed byte slice. +// ErrDataSize is returned if len(e) != 10. +func (id *ULID) SetEntropy(e []byte) error { + if len(e) != 10 { + return ErrDataSize + } + + copy((*id)[6:], e) + return nil +} + +// Compare returns an integer comparing id and other lexicographically. +// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. +func (id ULID) Compare(other ULID) int { + return bytes.Compare(id[:], other[:]) +} + +// Scan implements the sql.Scanner interface. It supports scanning +// a string or byte slice. +func (id *ULID) Scan(src interface{}) error { + switch x := src.(type) { + case nil: + return nil + case string: + return id.UnmarshalText([]byte(x)) + case []byte: + return id.UnmarshalBinary(x) + } + + return ErrScanValue +} + +// Value implements the sql/driver.Valuer interface, returning the ULID as a +// slice of bytes, by invoking MarshalBinary. If your use case requires a string +// representation instead, you can create a wrapper type that calls String() +// instead. +// +// type stringValuer ulid.ULID +// +// func (v stringValuer) Value() (driver.Value, error) { +// return ulid.ULID(v).String(), nil +// } +// +// // Example usage. +// db.Exec("...", stringValuer(id)) +// +// All valid ULIDs, including zero-value ULIDs, return a valid Value with a nil +// error. If your use case requires zero-value ULIDs to return a non-nil error, +// you can create a wrapper type that special-cases this behavior. +// +// var zeroValueULID ulid.ULID +// +// type invalidZeroValuer ulid.ULID +// +// func (v invalidZeroValuer) Value() (driver.Value, error) { +// if ulid.ULID(v).Compare(zeroValueULID) == 0 { +// return nil, fmt.Errorf("zero value") +// } +// return ulid.ULID(v).Value() +// } +// +// // Example usage. +// db.Exec("...", invalidZeroValuer(id)) +// +func (id ULID) Value() (driver.Value, error) { + return id.MarshalBinary() +} + +// Monotonic returns an entropy source that is guaranteed to yield +// strictly increasing entropy bytes for the same ULID timestamp. +// On conflicts, the previous ULID entropy is incremented with a +// random number between 1 and `inc` (inclusive). +// +// The provided entropy source must actually yield random bytes or else +// monotonic reads are not guaranteed to terminate, since there isn't +// enough randomness to compute an increment number. +// +// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. +// The lower the value of `inc`, the easier the next ULID within the +// same millisecond is to guess. If your code depends on ULIDs having +// secure entropy bytes, then don't go under this default unless you know +// what you're doing. +// +// The returned type isn't safe for concurrent use. +func Monotonic(entropy io.Reader, inc uint64) *MonotonicEntropy { + m := MonotonicEntropy{ + Reader: bufio.NewReader(entropy), + inc: inc, + } + + if m.inc == 0 { + m.inc = math.MaxUint32 + } + + if rng, ok := entropy.(rng); ok { + m.rng = rng + } + + return &m +} + +type rng interface{ Int63n(n int64) int64 } + +// LockedMonotonicReader wraps a MonotonicReader with a sync.Mutex for +// safe concurrent use. +type LockedMonotonicReader struct { + mu sync.Mutex + MonotonicReader +} + +// MonotonicRead synchronizes calls to the wrapped MonotonicReader. +func (r *LockedMonotonicReader) MonotonicRead(ms uint64, p []byte) (err error) { + r.mu.Lock() + err = r.MonotonicReader.MonotonicRead(ms, p) + r.mu.Unlock() + return err +} + +// MonotonicEntropy is an opaque type that provides monotonic entropy. +type MonotonicEntropy struct { + io.Reader + ms uint64 + inc uint64 + entropy uint80 + rand [8]byte + rng rng +} + +// MonotonicRead implements the MonotonicReader interface. +func (m *MonotonicEntropy) MonotonicRead(ms uint64, entropy []byte) (err error) { + if !m.entropy.IsZero() && m.ms == ms { + err = m.increment() + m.entropy.AppendTo(entropy) + } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { + m.ms = ms + m.entropy.SetBytes(entropy) + } + return err +} + +// increment the previous entropy number with a random number +// of up to m.inc (inclusive). +func (m *MonotonicEntropy) increment() error { + if inc, err := m.random(); err != nil { + return err + } else if m.entropy.Add(inc) { + return ErrMonotonicOverflow + } + return nil +} + +// random returns a uniform random value in [1, m.inc), reading entropy +// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. +// Adapted from: https://golang.org/pkg/crypto/rand/#Int +func (m *MonotonicEntropy) random() (inc uint64, err error) { + if m.inc <= 1 { + return 1, nil + } + + // Fast path for using a underlying rand.Rand directly. + if m.rng != nil { + // Range: [1, m.inc) + return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil + } + + // bitLen is the maximum bit length needed to encode a value < m.inc. + bitLen := bits.Len64(m.inc) + + // byteLen is the maximum byte length needed to encode a value < m.inc. + byteLen := uint(bitLen+7) / 8 + + // msbitLen is the number of bits in the most significant byte of m.inc-1. + msbitLen := uint(bitLen % 8) + if msbitLen == 0 { + msbitLen = 8 + } + + for inc == 0 || inc >= m.inc { + if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { + return 0, err + } + + // Clear bits in the first byte to increase the probability + // that the candidate is < m.inc. + m.rand[0] &= uint8(int(1< 0 { - _, ts := stale.pq.Peek() - if now.Sub(ts) < max { - break - } - id, _ := stale.pq.Pop() - ids = append(ids, id) - } - - return ids -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go deleted file mode 100644 index 5f0d715b69..0000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package streams // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams" - -import ( - "go.opentelemetry.io/collector/pdata/pcommon" - - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" -) - -// Sequence of streams that can be iterated upon -type Seq[T any] func(yield func(identity.Stream, T) bool) bool - -// Map defines a collection of items tracked by a stream-id and the operations -// on it -type Map[T any] interface { - Load(identity.Stream) (T, bool) - Store(identity.Stream, T) error - Delete(identity.Stream) - Items() func(yield func(identity.Stream, T) bool) bool - Len() int - Clear() -} - -var _ Map[any] = HashMap[any](nil) - -type HashMap[T any] map[identity.Stream]T - -func (m HashMap[T]) Load(id identity.Stream) (T, bool) { - v, ok := (map[identity.Stream]T)(m)[id] - return v, ok -} - -func (m HashMap[T]) Store(id identity.Stream, v T) error { - (map[identity.Stream]T)(m)[id] = v - return nil -} - -func (m HashMap[T]) Delete(id identity.Stream) { - delete((map[identity.Stream]T)(m), id) -} - -func (m HashMap[T]) Items() func(yield func(identity.Stream, T) bool) bool { - return func(yield func(identity.Stream, T) bool) bool { - for id, v := range (map[identity.Stream]T)(m) { - if !yield(id, v) { - break - } - } - return false - } -} - -func (m HashMap[T]) Len() int { - return len((map[identity.Stream]T)(m)) -} - -func (m HashMap[T]) Clear() { - clear(m) -} - -// Evictors remove the "least important" stream based on some strategy such as -// the oldest, least active, etc. -// -// Returns whether a stream was evicted and if so the now gone stream id -type Evictor interface { - Evict() (gone identity.Stream, ok bool) -} - -type DataPointSlice[DP DataPoint[DP]] interface { - Len() int - At(i int) DP - AppendEmpty() DP -} - -type DataPoint[Self any] interface { - Timestamp() pcommon.Timestamp - Attributes() pcommon.Map - CopyTo(dest Self) -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go index 172789c607..9f08870936 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go @@ -130,10 +130,9 @@ func (hw *hashWriter) writeMapHash(m pcommon.Map) { // on the first call due to it being cleared of any added keys at then end of the function. nextIndex := len(hw.keysBuf) - m.Range(func(k string, _ pcommon.Value) bool { + for k := range m.All() { hw.keysBuf = append(hw.keysBuf, k) - return true - }) + } // Get only the newly added keys from the buffer by slicing the buffer from nextIndex to the end workingKeySet := hw.keysBuf[nextIndex:] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml index f726a58cdf..ac3c7541c0 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml @@ -1,3 +1,4 @@ status: + class: pkg codeowners: active: [dmitryax] \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go index f5b5c1c59d..589bdc744d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go @@ -10,11 +10,12 @@ import ( "time" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/xconfmap" telemetry "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" ) -var _ component.ConfigValidator = (*Config)(nil) +var _ xconfmap.Validator = (*Config)(nil) type Config struct { MaxStale time.Duration `mapstructure:"max_stale"` diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md index f9c560a870..f11a329c32 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md @@ -6,15 +6,7 @@ The following telemetry is emitted by this component. -### otelcol_deltatocumulative.datapoints.dropped - -number of datapoints dropped due to given 'reason' - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {datapoint} | Sum | Int | true | - -### otelcol_deltatocumulative.datapoints.linear +### otelcol_deltatocumulative_datapoints total number of datapoints processed. may have 'error' attribute, if processing failed @@ -22,31 +14,7 @@ total number of datapoints processed. may have 'error' attribute, if processing | ---- | ----------- | ---------- | --------- | | {datapoint} | Sum | Int | true | -### otelcol_deltatocumulative.datapoints.processed - -number of datapoints processed - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {datapoint} | Sum | Int | true | - -### otelcol_deltatocumulative.gaps.length - -total duration where data was expected but not received - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| s | Sum | Int | true | - -### otelcol_deltatocumulative.streams.evicted - -number of streams evicted - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {stream} | Sum | Int | true | - -### otelcol_deltatocumulative.streams.limit +### otelcol_deltatocumulative_streams_limit upper limit of tracked streams @@ -54,7 +22,7 @@ upper limit of tracked streams | ---- | ----------- | ---------- | | {stream} | Gauge | Int | -### otelcol_deltatocumulative.streams.max_stale +### otelcol_deltatocumulative_streams_max_stale duration after which streams inactive streams are dropped @@ -62,15 +30,7 @@ duration after which streams inactive streams are dropped | ---- | ----------- | ---------- | | s | Gauge | Int | -### otelcol_deltatocumulative.streams.tracked - -number of streams tracked - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {dps} | Sum | Int | false | - -### otelcol_deltatocumulative.streams.tracked.linear +### otelcol_deltatocumulative_streams_tracked number of streams tracked diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go index 904ae1ee68..9e05d41965 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go @@ -5,7 +5,7 @@ package deltatocumulativeprocessor // import "github.com/open-telemetry/opentele import ( "context" - "fmt" + "errors" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -26,7 +26,7 @@ func NewFactory() processor.Factory { func createMetricsProcessor(_ context.Context, set processor.Settings, cfg component.Config, next consumer.Metrics) (processor.Metrics, error) { pcfg, ok := cfg.(*Config) if !ok { - return nil, fmt.Errorf("configuration parsing error") + return nil, errors.New("configuration parsing error") } tel, err := telemetry.New(set.TelemetrySettings) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go index 33c2f283c8..1da69b6c62 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go @@ -12,103 +12,130 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice" ) -func (dp Number) Add(in Number) Number { - switch in.ValueType() { +// Aggregator performs an operation on two datapoints. +// Given [pmetric] types are mutable by nature, this logically works as follows: +// +// *state = op(state, dp) +// +// See [Adder] for an implementation. +type Aggregator interface { + Numbers(state, dp pmetric.NumberDataPoint) error + Histograms(state, dp pmetric.HistogramDataPoint) error + Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error +} + +var _ Aggregator = (*Adder)(nil) + +// Adder adds (+) datapoints. +type Adder struct{} + +var maxBuckets = 160 + +func (add Adder) Numbers(state, dp pmetric.NumberDataPoint) error { + switch dp.ValueType() { case pmetric.NumberDataPointValueTypeDouble: - v := dp.DoubleValue() + in.DoubleValue() - dp.SetDoubleValue(v) + v := state.DoubleValue() + dp.DoubleValue() + state.SetDoubleValue(v) case pmetric.NumberDataPointValueTypeInt: - v := dp.IntValue() + in.IntValue() - dp.SetIntValue(v) + v := state.IntValue() + dp.IntValue() + state.SetIntValue(v) } - dp.SetTimestamp(in.Timestamp()) - return dp + return nil } -func (dp Histogram) Add(in Histogram) Histogram { +func (add Adder) Histograms(state, dp pmetric.HistogramDataPoint) error { // bounds different: no way to merge, so reset observation to new boundaries - if !pslice.Equal(dp.ExplicitBounds(), in.ExplicitBounds()) { - in.MoveTo(dp.HistogramDataPoint) - return dp + if !pslice.Equal(state.ExplicitBounds(), dp.ExplicitBounds()) { + dp.CopyTo(state) + return nil } // spec requires len(BucketCounts) == len(ExplicitBounds)+1. // given we have limited error handling at this stage (and already verified boundaries are correct), // doing a best-effort add of whatever we have appears reasonable. - n := min(dp.BucketCounts().Len(), in.BucketCounts().Len()) + n := min(state.BucketCounts().Len(), dp.BucketCounts().Len()) for i := 0; i < n; i++ { - sum := dp.BucketCounts().At(i) + in.BucketCounts().At(i) - dp.BucketCounts().SetAt(i, sum) + sum := state.BucketCounts().At(i) + dp.BucketCounts().At(i) + state.BucketCounts().SetAt(i, sum) } - dp.SetTimestamp(in.Timestamp()) - dp.SetCount(dp.Count() + in.Count()) + state.SetCount(state.Count() + dp.Count()) - if dp.HasSum() && in.HasSum() { - dp.SetSum(dp.Sum() + in.Sum()) + if state.HasSum() && dp.HasSum() { + state.SetSum(state.Sum() + dp.Sum()) } else { - dp.RemoveSum() + state.RemoveSum() } - if dp.HasMin() && in.HasMin() { - dp.SetMin(math.Min(dp.Min(), in.Min())) + if state.HasMin() && dp.HasMin() { + state.SetMin(math.Min(state.Min(), dp.Min())) } else { - dp.RemoveMin() + state.RemoveMin() } - if dp.HasMax() && in.HasMax() { - dp.SetMax(math.Max(dp.Max(), in.Max())) + if state.HasMax() && dp.HasMax() { + state.SetMax(math.Max(state.Max(), dp.Max())) } else { - dp.RemoveMax() + state.RemoveMax() } - return dp + return nil } -func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram { - type H = ExpHistogram +func (add Adder) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error { + type H = pmetric.ExponentialHistogramDataPoint - if dp.Scale() != in.Scale() { - hi, lo := expo.HiLo(dp, in, H.Scale) + if state.Scale() != dp.Scale() { + hi, lo := expo.HiLo(state, dp, H.Scale) from, to := expo.Scale(hi.Scale()), expo.Scale(lo.Scale()) expo.Downscale(hi.Positive(), from, to) expo.Downscale(hi.Negative(), from, to) hi.SetScale(lo.Scale()) } - if dp.ZeroThreshold() != in.ZeroThreshold() { - hi, lo := expo.HiLo(dp, in, H.ZeroThreshold) - expo.WidenZero(lo.DataPoint, hi.ZeroThreshold()) + // Downscale if an expected number of buckets after the merge is too large. + from := expo.Scale(state.Scale()) + to := min( + expo.Limit(maxBuckets, from, state.Positive(), dp.Positive()), + expo.Limit(maxBuckets, from, state.Negative(), dp.Negative()), + ) + if from != to { + expo.Downscale(state.Positive(), from, to) + expo.Downscale(state.Negative(), from, to) + expo.Downscale(dp.Positive(), from, to) + expo.Downscale(dp.Negative(), from, to) + state.SetScale(int32(to)) + dp.SetScale(int32(to)) } - expo.Merge(dp.Positive(), in.Positive()) - expo.Merge(dp.Negative(), in.Negative()) + if state.ZeroThreshold() != dp.ZeroThreshold() { + hi, lo := expo.HiLo(state, dp, H.ZeroThreshold) + expo.WidenZero(lo, hi.ZeroThreshold()) + } + + expo.Merge(state.Positive(), dp.Positive()) + expo.Merge(state.Negative(), dp.Negative()) - dp.SetTimestamp(in.Timestamp()) - dp.SetCount(dp.Count() + in.Count()) - dp.SetZeroCount(dp.ZeroCount() + in.ZeroCount()) + state.SetCount(state.Count() + dp.Count()) + state.SetZeroCount(state.ZeroCount() + dp.ZeroCount()) - if dp.HasSum() && in.HasSum() { - dp.SetSum(dp.Sum() + in.Sum()) + if state.HasSum() && dp.HasSum() { + state.SetSum(state.Sum() + dp.Sum()) } else { - dp.RemoveSum() + state.RemoveSum() } - if dp.HasMin() && in.HasMin() { - dp.SetMin(math.Min(dp.Min(), in.Min())) + if state.HasMin() && dp.HasMin() { + state.SetMin(math.Min(state.Min(), dp.Min())) } else { - dp.RemoveMin() + state.RemoveMin() } - if dp.HasMax() && in.HasMax() { - dp.SetMax(math.Max(dp.Max(), in.Max())) + if state.HasMax() && dp.HasMax() { + state.SetMax(math.Max(state.Max(), dp.Max())) } else { - dp.RemoveMax() + state.RemoveMax() } - return dp -} - -func (dp Summary) Add(Summary) Summary { - panic("todo") + return nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go deleted file mode 100644 index 3a36f6d552..0000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package data // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" - -import ( - "go.opentelemetry.io/collector/pdata/pmetric" - - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo" -) - -type Number struct { - pmetric.NumberDataPoint -} - -type Histogram struct { - pmetric.HistogramDataPoint -} - -type ExpHistogram struct { - expo.DataPoint -} - -type Summary struct { - pmetric.SummaryDataPoint -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go index 150e29a658..82536ea1fa 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go @@ -23,6 +23,15 @@ func Merge(arel, brel Buckets) { lo := min(a.Lower(), b.Lower()) up := max(a.Upper(), b.Upper()) + // Skip leading and trailing zeros to reduce number of buckets. + // As we cap number of buckets this allows us to have higher scale. + for lo < up && a.Abs(lo) == 0 && b.Abs(lo) == 0 { + lo++ + } + for lo < up-1 && a.Abs(up-1) == 0 && b.Abs(up-1) == 0 { + up-- + } + size := up - lo counts := pcommon.NewUInt64Slice() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go index 5201806fb8..50fdef75c9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -6,6 +6,8 @@ package expo // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "fmt" "math" + + "go.opentelemetry.io/collector/pdata/pmetric" ) type Scale int32 @@ -29,7 +31,7 @@ func (scale Scale) Idx(v float64) int { // This means a value min < v <= max belongs to this bucket. // // NOTE: this is different from Go slice intervals, which are [a,b) -func (scale Scale) Bounds(index int) (min, max float64) { +func (scale Scale) Bounds(index int) (minVal, maxVal float64) { // from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function lower := func(index int) float64 { inverseFactor := math.Ldexp(math.Ln2, int(-scale)) @@ -47,7 +49,7 @@ func Downscale(bs Buckets, from, to Scale) { case from < to: // because even distribution within the buckets cannot be assumed, it is // not possible to correctly upscale (split) buckets. - // any attempt to do so would yield erronous data. + // any attempt to do so would yield erroneous data. panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to)) } @@ -107,9 +109,35 @@ func Collapse(bs Buckets) { // zero the excess area. its not needed to represent the observation // anymore, but kept for two reasons: // 1. future observations may need it, no need to re-alloc then if kept - // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid + // 2. [pcommon.Uint64Slice] cannot, in fact, be sliced, so getting rid // of it would alloc ¯\_(ツ)_/¯ for i := size; i < counts.Len(); i++ { counts.SetAt(i, 0) } } + +// Limit returns a target Scale that when be downscaled to, +// the total bucket count after [Merge] never exceeds maxBuckets. +func Limit(maxBuckets int, scale Scale, arel, brel pmetric.ExponentialHistogramDataPointBuckets) Scale { + a, b := Abs(arel), Abs(brel) + + lo := min(a.Lower(), b.Lower()) + up := max(a.Upper(), b.Upper()) + + // Skip leading and trailing zeros. + for lo < up && a.Abs(lo) == 0 && b.Abs(lo) == 0 { + lo++ + } + for lo < up-1 && a.Abs(up-1) == 0 && b.Abs(up-1) == 0 { + up-- + } + + // Keep downscaling until the number of buckets is within the limit. + for up-lo > maxBuckets { + lo /= 2 + up /= 2 + scale-- + } + + return scale +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go index 2d5401b39f..969c5f2734 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go @@ -37,8 +37,8 @@ func WidenZero(dp DataPoint, width float64) { widen(dp.Positive()) widen(dp.Negative()) - _, max := scale.Bounds(zero) - dp.SetZeroThreshold(max) + _, maxVal := scale.Bounds(zero) + dp.SetZeroThreshold(maxVal) } // Slice drops data outside the range from <= i < to from the bucket counts. It behaves the same as Go's [a:b] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go index f2a759e9bf..608932e722 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go @@ -30,18 +30,25 @@ func (e ErrOutOfOrder) Error() string { return fmt.Sprintf("out of order: dropped sample from time=%s, because series is already at time=%s", e.Sample, e.Last) } -type Type interface { +type Type[Self any] interface { pmetric.NumberDataPoint | pmetric.HistogramDataPoint | pmetric.ExponentialHistogramDataPoint StartTimestamp() pcommon.Timestamp Timestamp() pcommon.Timestamp + SetTimestamp(pcommon.Timestamp) + CopyTo(Self) } -// AccumulateInto adds state and dp, storing the result in state -// -// state = state + dp -func AccumulateInto[T Type](state, dp T) error { +type Aggregator struct { + data.Aggregator +} + +func Aggregate[T Type[T]](state, dp T, aggregate func(state, dp T) error) error { switch { + case state.Timestamp() == 0: + // first sample of series, no state to aggregate with + dp.CopyTo(state) + return nil case dp.StartTimestamp() < state.StartTimestamp(): // belongs to older series return ErrOlderStart{Start: state.StartTimestamp(), Sample: dp.StartTimestamp()} @@ -50,16 +57,22 @@ func AccumulateInto[T Type](state, dp T) error { return ErrOutOfOrder{Last: state.Timestamp(), Sample: dp.Timestamp()} } - switch dp := any(dp).(type) { - case pmetric.NumberDataPoint: - state := any(state).(pmetric.NumberDataPoint) - data.Number{NumberDataPoint: state}.Add(data.Number{NumberDataPoint: dp}) - case pmetric.HistogramDataPoint: - state := any(state).(pmetric.HistogramDataPoint) - data.Histogram{HistogramDataPoint: state}.Add(data.Histogram{HistogramDataPoint: dp}) - case pmetric.ExponentialHistogramDataPoint: - state := any(state).(pmetric.ExponentialHistogramDataPoint) - data.ExpHistogram{DataPoint: state}.Add(data.ExpHistogram{DataPoint: dp}) + if err := aggregate(state, dp); err != nil { + return err } + + state.SetTimestamp(dp.Timestamp()) return nil } + +func (aggr Aggregator) Numbers(state, dp pmetric.NumberDataPoint) error { + return Aggregate(state, dp, aggr.Aggregator.Numbers) +} + +func (aggr Aggregator) Histograms(state, dp pmetric.HistogramDataPoint) error { + return Aggregate(state, dp, aggr.Aggregator.Histograms) +} + +func (aggr Aggregator) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error { + return Aggregate(state, dp, aggr.Aggregator.Exponential) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go new file mode 100644 index 0000000000..b93f9935d6 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package maps // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps" + +import ( + "fmt" + "sync/atomic" + + "github.com/puzpuzpuz/xsync/v3" +) + +func Limit(limit int64) Context { + return Context{limit: limit, guard: new(atomic.Int64), total: new(atomic.Int64)} +} + +func New[K comparable, V any](ctx Context) *Parallel[K, V] { + return &Parallel[K, V]{ctx: ctx, elems: *xsync.NewMapOf[K, V]()} +} + +// lowercase alias for unexported embedding +type ctx = Context + +// Parallel is a lock-free map-like structure. It can be safely used by multiple +// routines concurrently. +// +// Due to the lock-free nature, typical get, put, delete operations are not +// available. Instead, [Parallel.LoadOrStore] returns an existing value or +// inserts a new one if missing. As such, values themselves should be mutable by +// being reference types (pointers or pmetric.* types). +// +// Parallel enforces the [Context] size limit. +type Parallel[K comparable, V any] struct { + ctx + elems xsync.MapOf[K, V] +} + +// Context holds size information about one or more maps. +// Can be shared across maps for a common limit. +type Context struct { + limit int64 + guard *atomic.Int64 + total *atomic.Int64 +} + +func (ctx Context) String() string { + return fmt.Sprintf("(%d, %d)", ctx.guard.Load(), ctx.total.Load()) +} + +// LoadOrStore loads existing values from the map or creates missing ones initialized to . +// +// Return Value: +// - , true: m[k] already existed and was loaded +// - , false: m[k] was created and initialized to +// - , false: m[k] did not exist but was not created due to size limit +func (m *Parallel[K, V]) LoadOrStore(k K, def V) (_ V, loaded bool) { + // multiple routines may attempt to LoadOrStore the same value at once. as + // such, we cannot use data-dependent instructions such as if(not exist) + // {...}, because the may have changed right after we checked + // it. + + v, ok := m.elems.Load(k) + if ok { + return v, true + } + + // as long as there appears to be actual space, try to store + for m.total.Load() < m.limit { + // multiple routines may do this. to enforce the limit, try to claim a + // "slot" below the limit + slot := m.guard.Add(1) + if slot > m.limit { + // slot we got is above the limit. either the map is now full (loop + // will exit) or routines that won't actually store hold slots, in + // which case we will try again. + m.guard.Add(-1) + continue + } + + // we got a valid slot. others may too. as such, we try to store, but + // may end up loading instead if another routine stored just before us. + v, loaded = m.elems.LoadOrStore(k, def) + if loaded { + // another routine stored, but we got a value. give up slot + m.guard.Add(-1) + } else { + // we stored. increase the total size + m.total.Add(1) + } + return v, loaded + } + + // we didn't store, because we hit the limit. attempt another load, just in + // case another routine stored by now. + return m.elems.Load(k) +} + +// LoadAndDelete deletes m[k], returning the value it had if it existed +func (m *Parallel[K, V]) LoadAndDelete(k K) (_ V, loaded bool) { + v, loaded := m.elems.LoadAndDelete(k) + if loaded { + // m[k] did exist. decrease size and open up a slot + m.total.Add(-1) + m.guard.Add(-1) + } + return v, loaded +} + +func (ctx Context) Size() int64 { + return ctx.total.Load() +} + +// Exceeded reports whether a [Limited.LoadOrStore] failed due to the limit being exceeded. +func Exceeded[T comparable](v T, loaded bool) bool { + return !loaded && v == *new(T) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go index 82a4476ba9..c7134638b6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go @@ -5,13 +5,13 @@ package metadata import ( "context" "errors" + "sync" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" ) func Meter(settings component.TelemetrySettings) metric.Meter { @@ -25,17 +25,13 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { - meter metric.Meter - DeltatocumulativeDatapointsDropped metric.Int64Counter - DeltatocumulativeDatapointsLinear metric.Int64Counter - DeltatocumulativeDatapointsProcessed metric.Int64Counter - DeltatocumulativeGapsLength metric.Int64Counter - DeltatocumulativeStreamsEvicted metric.Int64Counter - DeltatocumulativeStreamsLimit metric.Int64Gauge - DeltatocumulativeStreamsMaxStale metric.Int64Gauge - DeltatocumulativeStreamsTracked metric.Int64UpDownCounter - DeltatocumulativeStreamsTrackedLinear metric.Int64ObservableUpDownCounter - observeDeltatocumulativeStreamsTrackedLinear func(context.Context, metric.Observer) error + meter metric.Meter + mu sync.Mutex + registrations []metric.Registration + DeltatocumulativeDatapoints metric.Int64Counter + DeltatocumulativeStreamsLimit metric.Int64Gauge + DeltatocumulativeStreamsMaxStale metric.Int64Gauge + DeltatocumulativeStreamsTracked metric.Int64ObservableUpDownCounter } // TelemetryBuilderOption applies changes to default builder. @@ -49,14 +45,38 @@ func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { tbof(mb) } -// WithDeltatocumulativeStreamsTrackedLinearCallback sets callback for observable DeltatocumulativeStreamsTrackedLinear metric. -func WithDeltatocumulativeStreamsTrackedLinearCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { - return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { - builder.observeDeltatocumulativeStreamsTrackedLinear = func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(builder.DeltatocumulativeStreamsTrackedLinear, cb(), opts...) - return nil - } - }) +// RegisterDeltatocumulativeStreamsTrackedCallback sets callback for observable DeltatocumulativeStreamsTracked metric. +func (builder *TelemetryBuilder) RegisterDeltatocumulativeStreamsTrackedCallback(cb metric.Int64Callback) error { + reg, err := builder.meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error { + cb(ctx, &observerInt64{inst: builder.DeltatocumulativeStreamsTracked, obs: o}) + return nil + }, builder.DeltatocumulativeStreamsTracked) + if err != nil { + return err + } + builder.mu.Lock() + defer builder.mu.Unlock() + builder.registrations = append(builder.registrations, reg) + return nil +} + +type observerInt64 struct { + embedded.Int64Observer + inst metric.Int64Observable + obs metric.Observer +} + +func (oi *observerInt64) Observe(value int64, opts ...metric.ObserveOption) { + oi.obs.ObserveInt64(oi.inst, value, opts...) +} + +// Shutdown unregister all registered callbacks for async instruments. +func (builder *TelemetryBuilder) Shutdown() { + builder.mu.Lock() + defer builder.mu.Unlock() + for _, reg := range builder.registrations { + reg.Unregister() + } } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry @@ -68,68 +88,29 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme } builder.meter = Meter(settings) var err, errs error - builder.DeltatocumulativeDatapointsDropped, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( - "otelcol_deltatocumulative.datapoints.dropped", - metric.WithDescription("number of datapoints dropped due to given 'reason'"), - metric.WithUnit("{datapoint}"), - ) - errs = errors.Join(errs, err) - builder.DeltatocumulativeDatapointsLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( - "otelcol_deltatocumulative.datapoints.linear", + builder.DeltatocumulativeDatapoints, err = builder.meter.Int64Counter( + "otelcol_deltatocumulative_datapoints", metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed"), metric.WithUnit("{datapoint}"), ) errs = errors.Join(errs, err) - builder.DeltatocumulativeDatapointsProcessed, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( - "otelcol_deltatocumulative.datapoints.processed", - metric.WithDescription("number of datapoints processed"), - metric.WithUnit("{datapoint}"), - ) - errs = errors.Join(errs, err) - builder.DeltatocumulativeGapsLength, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( - "otelcol_deltatocumulative.gaps.length", - metric.WithDescription("total duration where data was expected but not received"), - metric.WithUnit("s"), - ) - errs = errors.Join(errs, err) - builder.DeltatocumulativeStreamsEvicted, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( - "otelcol_deltatocumulative.streams.evicted", - metric.WithDescription("number of streams evicted"), - metric.WithUnit("{stream}"), - ) - errs = errors.Join(errs, err) - builder.DeltatocumulativeStreamsLimit, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( - "otelcol_deltatocumulative.streams.limit", + builder.DeltatocumulativeStreamsLimit, err = builder.meter.Int64Gauge( + "otelcol_deltatocumulative_streams_limit", metric.WithDescription("upper limit of tracked streams"), metric.WithUnit("{stream}"), ) errs = errors.Join(errs, err) - builder.DeltatocumulativeStreamsMaxStale, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( - "otelcol_deltatocumulative.streams.max_stale", + builder.DeltatocumulativeStreamsMaxStale, err = builder.meter.Int64Gauge( + "otelcol_deltatocumulative_streams_max_stale", metric.WithDescription("duration after which streams inactive streams are dropped"), metric.WithUnit("s"), ) errs = errors.Join(errs, err) - builder.DeltatocumulativeStreamsTracked, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64UpDownCounter( - "otelcol_deltatocumulative.streams.tracked", - metric.WithDescription("number of streams tracked"), - metric.WithUnit("{dps}"), - ) - errs = errors.Join(errs, err) - builder.DeltatocumulativeStreamsTrackedLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableUpDownCounter( - "otelcol_deltatocumulative.streams.tracked.linear", + builder.DeltatocumulativeStreamsTracked, err = builder.meter.Int64ObservableUpDownCounter( + "otelcol_deltatocumulative_streams_tracked", metric.WithDescription("number of streams tracked"), metric.WithUnit("{dps}"), ) errs = errors.Join(errs, err) - _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeDeltatocumulativeStreamsTrackedLinear, builder.DeltatocumulativeStreamsTrackedLinear) - errs = errors.Join(errs, err) return &builder, errs } - -func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { - if cfgLevel <= srvLevel { - return meter - } - return noop.Meter{} -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go index ab6fde6550..401478fb54 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go @@ -21,27 +21,30 @@ func New(set component.TelemetrySettings) (Metrics, error) { tracked: &zero, } - trackedCb := metadata.WithDeltatocumulativeStreamsTrackedLinearCallback(func() int64 { - return int64((*m.tracked)()) + telb, err := metadata.NewTelemetryBuilder(set) + if err != nil { + return Metrics{}, err + } + err = telb.RegisterDeltatocumulativeStreamsTrackedCallback(func(_ context.Context, observer metric.Int64Observer) error { + observer.Observe(int64((*m.tracked)())) + return nil }) - - telb, err := metadata.NewTelemetryBuilder(set, trackedCb) if err != nil { return Metrics{}, err } - m.TelemetryBuilder = *telb + m.TelemetryBuilder = telb return m, nil } type Metrics struct { - metadata.TelemetryBuilder + *metadata.TelemetryBuilder tracked *func() int } -func (m Metrics) Datapoints() Counter { - return Counter{Int64Counter: m.DeltatocumulativeDatapointsLinear} +func (m *Metrics) Datapoints() Counter { + return Counter{Int64Counter: m.DeltatocumulativeDatapoints} } func (m *Metrics) WithTracked(streams func() int) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml index be925197db..3269c0b011 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml @@ -12,14 +12,7 @@ status: telemetry: metrics: # streams - deltatocumulative.streams.tracked: - description: number of streams tracked - unit: "{dps}" - sum: - value_type: int - monotonic: false - enabled: true - deltatocumulative.streams.tracked.linear: + deltatocumulative_streams_tracked: description: number of streams tracked unit: "{dps}" sum: @@ -27,52 +20,23 @@ telemetry: monotonic: false async: true enabled: true - deltatocumulative.streams.limit: + deltatocumulative_streams_limit: description: upper limit of tracked streams unit: "{stream}" gauge: value_type: int enabled: true - deltatocumulative.streams.evicted: - description: number of streams evicted - unit: "{stream}" - sum: - value_type: int - monotonic: true - enabled: true - deltatocumulative.streams.max_stale: + deltatocumulative_streams_max_stale: description: duration after which streams inactive streams are dropped unit: "s" gauge: value_type: int enabled: true # datapoints - deltatocumulative.datapoints.processed: - description: number of datapoints processed - unit: "{datapoint}" - sum: - value_type: int - monotonic: true - enabled: true - deltatocumulative.datapoints.dropped: - description: number of datapoints dropped due to given 'reason' - unit: "{datapoint}" - sum: - value_type: int - monotonic: true - enabled: true - - deltatocumulative.datapoints.linear: + deltatocumulative_datapoints: description: total number of datapoints processed. may have 'error' attribute, if processing failed unit: "{datapoint}" sum: value_type: int monotonic: true enabled: true - deltatocumulative.gaps.length: - description: total duration where data was expected but not received - unit: "s" - sum: - value_type: int - monotonic: true - enabled: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go index 149431b897..5fd061355d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go @@ -8,14 +8,16 @@ import ( "sync" "time" + "github.com/puzpuzpuz/xsync/v3" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry" ) @@ -27,43 +29,49 @@ type Processor struct { cfg Config last state - mtx sync.Mutex + aggr data.Aggregator ctx context.Context cancel context.CancelFunc - stale staleness.Tracker + stale *xsync.MapOf[identity.Stream, time.Time] tel telemetry.Metrics } func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *Processor { ctx, cancel := context.WithCancel(context.Background()) + limit := maps.Limit(int64(cfg.MaxStreams)) proc := Processor{ next: next, cfg: *cfg, last: state{ - nums: make(map[identity.Stream]pmetric.NumberDataPoint), - hist: make(map[identity.Stream]pmetric.HistogramDataPoint), - expo: make(map[identity.Stream]pmetric.ExponentialHistogramDataPoint), + ctx: limit, + nums: maps.New[identity.Stream, *mutex[pmetric.NumberDataPoint]](limit), + hist: maps.New[identity.Stream, *mutex[pmetric.HistogramDataPoint]](limit), + expo: maps.New[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]](limit), }, + aggr: delta.Aggregator{Aggregator: new(data.Adder)}, ctx: ctx, cancel: cancel, - stale: staleness.NewTracker(), + stale: xsync.NewMapOf[identity.Stream, time.Time](), tel: tel, } - tel.WithTracked(proc.last.Len) + tel.WithTracked(proc.last.Size) cfg.Metrics(tel) return &proc } -func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { - p.mtx.Lock() - defer p.mtx.Unlock() +type vals struct { + nums *mutex[pmetric.NumberDataPoint] + hist *mutex[pmetric.HistogramDataPoint] + expo *mutex[pmetric.ExponentialHistogramDataPoint] +} +func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { now := time.Now() const ( @@ -71,6 +79,12 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro drop = false ) + zero := vals{ + nums: guard(pmetric.NewNumberDataPoint()), + hist: guard(pmetric.NewHistogramDataPoint()), + expo: guard(pmetric.NewExponentialHistogramDataPoint()), + } + metrics.Filter(md, func(m metrics.Metric) bool { if m.AggregationTemporality() != pmetric.AggregationTemporalityDelta { return keep @@ -85,41 +99,70 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro var attrs telemetry.Attributes defer func() { p.tel.Datapoints().Inc(ctx, attrs...) }() - // if stream new and state capacity reached, reject - exist := p.last.Has(id) - if !exist && p.last.Len() >= p.cfg.MaxStreams { - attrs.Set(telemetry.Error("limit")) - return drop - } - - // stream is ok and active, update stale tracker - p.stale.Refresh(now, id) - - // this is the first sample of the stream. there is nothing to - // aggregate with, so clone this value into the state and done - if !exist { - p.last.BeginWith(id, dp) - return keep - } - - // aggregate with state from previous requests. - // delta.AccumulateInto(state, dp) stores result in `state`. - // this is then copied into `dp` (the value passed onto the pipeline) var err error switch dp := dp.(type) { case pmetric.NumberDataPoint: - state := p.last.nums[id] - err = delta.AccumulateInto(state, dp) - state.CopyTo(dp) + last, loaded := p.last.nums.LoadOrStore(id, zero.nums) + if maps.Exceeded(last, loaded) { + // state is full, reject stream + attrs.Set(telemetry.Error("limit")) + return drop + } + + // stream is ok and active, update stale tracker + p.stale.Store(id, now) + + if !loaded { + // cached zero was stored, alloc new one + zero.nums = guard(pmetric.NewNumberDataPoint()) + } + + last.use(func(last pmetric.NumberDataPoint) { + err = p.aggr.Numbers(last, dp) + last.CopyTo(dp) + }) case pmetric.HistogramDataPoint: - state := p.last.hist[id] - err = delta.AccumulateInto(state, dp) - state.CopyTo(dp) + last, loaded := p.last.hist.LoadOrStore(id, zero.hist) + if maps.Exceeded(last, loaded) { + // state is full, reject stream + attrs.Set(telemetry.Error("limit")) + return drop + } + + // stream is ok and active, update stale tracker + p.stale.Store(id, now) + + if !loaded { + // cached zero was stored, alloc new one + zero.hist = guard(pmetric.NewHistogramDataPoint()) + } + + last.use(func(last pmetric.HistogramDataPoint) { + err = p.aggr.Histograms(last, dp) + last.CopyTo(dp) + }) case pmetric.ExponentialHistogramDataPoint: - state := p.last.expo[id] - err = delta.AccumulateInto(state, dp) - state.CopyTo(dp) + last, loaded := p.last.expo.LoadOrStore(id, zero.expo) + if maps.Exceeded(last, loaded) { + // state is full, reject stream + attrs.Set(telemetry.Error("limit")) + return drop + } + + // stream is ok and active, update stale tracker + p.stale.Store(id, now) + + if !loaded { + // cached zero was stored, alloc new one + zero.expo = guard(pmetric.NewExponentialHistogramDataPoint()) + } + + last.use(func(last pmetric.ExponentialHistogramDataPoint) { + err = p.aggr.Exponential(last, dp) + last.CopyTo(dp) + }) } + if err != nil { attrs.Set(telemetry.Cause(err)) return drop @@ -152,12 +195,16 @@ func (p *Processor) Start(_ context.Context, _ component.Host) error { case <-p.ctx.Done(): return case <-tick.C: - p.mtx.Lock() - stale := p.stale.Collect(p.cfg.MaxStale) - for _, id := range stale { - p.last.Delete(id) - } - p.mtx.Unlock() + now := time.Now() + p.stale.Range(func(id identity.Stream, last time.Time) bool { + if now.Sub(last) > p.cfg.MaxStale { + p.last.nums.LoadAndDelete(id) + p.last.hist.LoadAndDelete(id) + p.last.expo.LoadAndDelete(id) + p.stale.Delete(id) + } + return true + }) } } }() @@ -177,38 +224,27 @@ func (p *Processor) Capabilities() consumer.Capabilities { // state keeps a cumulative value, aggregated over time, per stream type state struct { - nums map[identity.Stream]pmetric.NumberDataPoint - hist map[identity.Stream]pmetric.HistogramDataPoint - expo map[identity.Stream]pmetric.ExponentialHistogramDataPoint + ctx maps.Context + nums *maps.Parallel[identity.Stream, *mutex[pmetric.NumberDataPoint]] + hist *maps.Parallel[identity.Stream, *mutex[pmetric.HistogramDataPoint]] + expo *maps.Parallel[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]] } -func (m state) Len() int { - return len(m.nums) + len(m.hist) + len(m.expo) +func (s state) Size() int { + return int(s.ctx.Size()) } -func (m state) Has(id identity.Stream) bool { - _, nok := m.nums[id] - _, hok := m.hist[id] - _, eok := m.expo[id] - return nok || hok || eok +type mutex[T any] struct { + mtx sync.Mutex + v T } -func (m state) Delete(id identity.Stream) { - delete(m.nums, id) - delete(m.hist, id) - delete(m.expo, id) +func (mtx *mutex[T]) use(do func(T)) { + mtx.mtx.Lock() + do(mtx.v) + mtx.mtx.Unlock() } -func (m state) BeginWith(id identity.Stream, dp any) { - switch dp := dp.(type) { - case pmetric.NumberDataPoint: - m.nums[id] = pmetric.NewNumberDataPoint() - dp.CopyTo(m.nums[id]) - case pmetric.HistogramDataPoint: - m.hist[id] = pmetric.NewHistogramDataPoint() - dp.CopyTo(m.hist[id]) - case pmetric.ExponentialHistogramDataPoint: - m.expo[id] = pmetric.NewExponentialHistogramDataPoint() - dp.CopyTo(m.expo[id]) - } +func guard[T any](v T) *mutex[T] { + return &mutex[T]{v: v} } diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 7276742ec9..9beaae26c2 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -24,9 +24,9 @@ import ( "strings" ) -// reservedHeaders that change the connection, are set by Prometheus, or can +// ReservedHeaders that change the connection, are set by Prometheus, or can // be changed otherwise. -var reservedHeaders = map[string]struct{}{ +var ReservedHeaders = map[string]struct{}{ "Authorization": {}, "Host": {}, "Content-Encoding": {}, @@ -72,7 +72,7 @@ func (h *Headers) SetDirectory(dir string) { // Validate validates the Headers config. func (h *Headers) Validate() error { for n := range h.Headers { - if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { + if _, ok := ReservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 63809083ac..5d3f1941bb 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -225,7 +225,7 @@ func (u *URL) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaler interface for URL. func (u URL) MarshalJSON() ([]byte, error) { if u.URL != nil { - return json.Marshal(u.URL.String()) + return json.Marshal(u.String()) } return []byte("null"), nil } @@ -251,7 +251,7 @@ func (o *OAuth2) UnmarshalYAML(unmarshal func(interface{}) error) error { if err := unmarshal((*plain)(o)); err != nil { return err } - return o.ProxyConfig.Validate() + return o.Validate() } // UnmarshalJSON implements the json.Marshaler interface for URL. @@ -260,7 +260,7 @@ func (o *OAuth2) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, (*plain)(o)); err != nil { return err } - return o.ProxyConfig.Validate() + return o.Validate() } // SetDirectory joins any relative file paths with dir. @@ -604,8 +604,8 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon // The only timeout we care about is the configured scrape timeout. // It is applied on request. So we leave out any timings here. var rt http.RoundTripper = &http.Transport{ - Proxy: cfg.ProxyConfig.Proxy(), - ProxyConnectHeader: cfg.ProxyConfig.GetProxyConnectHeader(), + Proxy: cfg.Proxy(), + ProxyConnectHeader: cfg.GetProxyConnectHeader(), MaxIdleConns: 20000, MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 DisableKeepAlives: !opts.keepAlivesEnabled, @@ -914,8 +914,8 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { return &http.Transport{ TLSClientConfig: tlsConfig, - Proxy: rt.config.ProxyConfig.Proxy(), - ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), + Proxy: rt.config.Proxy(), + ProxyConnectHeader: rt.config.GetProxyConnectHeader(), DisableKeepAlives: !rt.opts.keepAlivesEnabled, MaxIdleConns: 20, MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 @@ -1508,7 +1508,7 @@ func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) { } return } - if c.ProxyURL.URL != nil && c.ProxyURL.URL.String() != "" { + if c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { if c.NoProxy == "" { c.proxyFunc = http.ProxyURL(c.ProxyURL.URL) return diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..4067978a17 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn { } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..de83afe93e 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool { return false } for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck return false } } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..a6b01755bd 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,13 +27,25 @@ import ( ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,7 +62,7 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. LegacyValidation ValidationScheme = iota diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go index 6e8fbabce5..3bd817328a 100644 --- a/vendor/github.com/prometheus/common/promslog/slog.go +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -25,73 +25,43 @@ import ( "path/filepath" "strconv" "strings" + "time" ) +// LogStyle represents the common logging formats in the Prometheus ecosystem. type LogStyle string const ( SlogStyle LogStyle = "slog" GoKitStyle LogStyle = "go-kit" + + reservedKeyPrefix = "logged_" ) var ( - LevelFlagOptions = []string{"debug", "info", "warn", "error"} + // LevelFlagOptions represents allowed logging levels. + LevelFlagOptions = []string{"debug", "info", "warn", "error"} + // FormatFlagOptions represents allowed formats. FormatFlagOptions = []string{"logfmt", "json"} - callerAddFunc = false - defaultWriter = os.Stderr - goKitStyleReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr { - key := a.Key - switch key { - case slog.TimeKey: - a.Key = "ts" - - // This timestamp format differs from RFC3339Nano by using .000 instead - // of .999999999 which changes the timestamp from 9 variable to 3 fixed - // decimals (.130 instead of .130987456). - t := a.Value.Time() - a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00")) - case slog.SourceKey: - a.Key = "caller" - src, _ := a.Value.Any().(*slog.Source) - - switch callerAddFunc { - case true: - a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line)) - default: - a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) - } - case slog.LevelKey: - a.Value = slog.StringValue(strings.ToLower(a.Value.String())) - default: - } - - return a - } - defaultReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr { - key := a.Key - switch key { - case slog.TimeKey: - t := a.Value.Time() - a.Value = slog.TimeValue(t.UTC()) - case slog.SourceKey: - src, _ := a.Value.Any().(*slog.Source) - a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) - default: - } - - return a - } + defaultWriter = os.Stderr ) -// AllowedLevel is a settable identifier for the minimum level a log entry -// must be have. -type AllowedLevel struct { - s string +// Level controls a logging level, with an info default. +// It wraps slog.LevelVar with string-based level control. +// Level is safe to be used concurrently. +type Level struct { lvl *slog.LevelVar } -func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error { +// NewLevel returns a new Level. +func NewLevel() *Level { + return &Level{ + lvl: &slog.LevelVar{}, + } +} + +func (l *Level) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string type plain string if err := unmarshal((*plain)(&s)); err != nil { @@ -100,55 +70,60 @@ func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error { if s == "" { return nil } - lo := &AllowedLevel{} - if err := lo.Set(s); err != nil { + if err := l.Set(s); err != nil { return err } - *l = *lo return nil } -func (l *AllowedLevel) String() string { - return l.s -} - -// Set updates the value of the allowed level. -func (l *AllowedLevel) Set(s string) error { - if l.lvl == nil { - l.lvl = &slog.LevelVar{} +// String returns the current level. +func (l *Level) String() string { + switch l.lvl.Level() { + case slog.LevelDebug: + return "debug" + case slog.LevelInfo: + return "info" + case slog.LevelWarn: + return "warn" + case slog.LevelError: + return "error" + default: + return "" } +} +// Set updates the logging level with the validation. +func (l *Level) Set(s string) error { switch strings.ToLower(s) { case "debug": l.lvl.Set(slog.LevelDebug) - callerAddFunc = true case "info": l.lvl.Set(slog.LevelInfo) - callerAddFunc = false case "warn": l.lvl.Set(slog.LevelWarn) - callerAddFunc = false case "error": l.lvl.Set(slog.LevelError) - callerAddFunc = false default: return fmt.Errorf("unrecognized log level %s", s) } - l.s = s return nil } -// AllowedFormat is a settable identifier for the output format that the logger can have. -type AllowedFormat struct { +// Format controls a logging output format. +// Not concurrency-safe. +type Format struct { s string } -func (f *AllowedFormat) String() string { +// NewFormat creates a new Format. +func NewFormat() *Format { return &Format{} } + +func (f *Format) String() string { return f.s } // Set updates the value of the allowed format. -func (f *AllowedFormat) Set(s string) error { +func (f *Format) Set(s string) error { switch s { case "logfmt", "json": f.s = s @@ -160,18 +135,112 @@ func (f *AllowedFormat) Set(s string) error { // Config is a struct containing configurable settings for the logger type Config struct { - Level *AllowedLevel - Format *AllowedFormat + Level *Level + Format *Format Style LogStyle Writer io.Writer } +func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) slog.Attr { + return func(groups []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey, "ts": + if t, ok := a.Value.Any().(time.Time); ok { + a.Key = "ts" + + // This timestamp format differs from RFC3339Nano by using .000 instead + // of .999999999 which changes the timestamp from 9 variable to 3 fixed + // decimals (.130 instead of .130987456). + a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00")) + } else { + // If we can't cast the any from the value to a + // time.Time, it means the caller logged + // another attribute with a key of `ts`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_ts`. + a.Key = reservedKeyPrefix + key + } + case slog.SourceKey, "caller": + if src, ok := a.Value.Any().(*slog.Source); ok { + a.Key = "caller" + switch lvl.String() { + case "debug": + a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line)) + default: + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } + } else { + // If we can't cast the any from the value to + // an *slog.Source, it means the caller logged + // another attribute with a key of `caller`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_caller`. + a.Key = reservedKeyPrefix + key + } + case slog.LevelKey: + if lvl, ok := a.Value.Any().(slog.Level); ok { + a.Value = slog.StringValue(strings.ToLower(lvl.String())) + } else { + // If we can't cast the any from the value to + // an slog.Level, it means the caller logged + // another attribute with a key of `level`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_level`. + a.Key = reservedKeyPrefix + key + } + default: + } + return a + } +} + +func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey: + // Note that we do not change the timezone to UTC anymore. + if _, ok := a.Value.Any().(time.Time); !ok { + // If we can't cast the any from the value to a + // time.Time, it means the caller logged + // another attribute with a key of `time`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_time`. + a.Key = reservedKeyPrefix + key + } + case slog.SourceKey: + if src, ok := a.Value.Any().(*slog.Source); ok { + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } else { + // If we can't cast the any from the value to + // an *slog.Source, it means the caller logged + // another attribute with a key of `source`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_source`. + a.Key = reservedKeyPrefix + key + } + case slog.LevelKey: + if _, ok := a.Value.Any().(slog.Level); !ok { + // If we can't cast the any from the value to + // an slog.Level, it means the caller logged + // another attribute with a key of `level`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_level`. + a.Key = reservedKeyPrefix + key + } + default: + } + return a +} + // New returns a new slog.Logger. Each logged line will be annotated // with a timestamp. The output always goes to stderr. func New(config *Config) *slog.Logger { if config.Level == nil { - config.Level = &AllowedLevel{} - _ = config.Level.Set("info") + config.Level = NewLevel() } if config.Writer == nil { @@ -181,11 +250,11 @@ func New(config *Config) *slog.Logger { logHandlerOpts := &slog.HandlerOptions{ Level: config.Level.lvl, AddSource: true, - ReplaceAttr: defaultReplaceAttrFunc, + ReplaceAttr: defaultReplaceAttr, } if config.Style == GoKitStyle { - logHandlerOpts.ReplaceAttr = goKitStyleReplaceAttrFunc + logHandlerOpts.ReplaceAttr = newGoKitStyleReplaceAttrFunc(config.Level) } if config.Format != nil && config.Format.s == "json" { @@ -197,5 +266,5 @@ func New(config *Config) *slog.Logger { // NewNopLogger is a convenience function to return an slog.Logger that writes // to io.Discard. func NewNopLogger() *slog.Logger { - return slog.New(slog.NewTextHandler(io.Discard, nil)) + return New(&Config{Writer: io.Discard}) } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go index d417c15e0d..86ee6c3b73 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go @@ -22,11 +22,13 @@ import ( "bytes" _ "embed" "net/http" + "strings" "text/template" ) // Config represents the configuration of the web listener. type LandingConfig struct { + RoutePrefix string // The route prefix for the exporter. HeaderColor string // Used for the landing page header. CSS string // CSS style tag for the landing page. Name string // The name of the exporter, generally suffixed by _exporter. @@ -62,6 +64,7 @@ type LandingLinks struct { type LandingPageHandler struct { landingPage []byte + routePrefix string } var ( @@ -93,6 +96,15 @@ func NewLandingPage(c LandingConfig) (*LandingPageHandler, error) { } c.CSS = buf.String() } + if c.RoutePrefix == "" { + c.RoutePrefix = "/" + } else if !strings.HasSuffix(c.RoutePrefix, "/") { + c.RoutePrefix += "/" + } + // Strip leading '/' from Links if present + for i, link := range c.Links { + c.Links[i].Address = strings.TrimPrefix(link.Address, "/") + } t := template.Must(template.New("landing page").Parse(landingPagehtmlContent)) buf.Reset() @@ -102,11 +114,12 @@ func NewLandingPage(c LandingConfig) (*LandingPageHandler, error) { return &LandingPageHandler{ landingPage: buf.Bytes(), + routePrefix: c.RoutePrefix, }, nil } func (h *LandingPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { + if r.URL.Path != h.routePrefix { http.NotFound(w, r) return } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html index e1ac0aecdd..829f4a9c73 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html @@ -15,13 +15,13 @@
    {{ range .Links }} -
  • {{.Text}}{{if .Description}}: {{.Description}}{{end}}
  • +
  • {{.Text}}{{if .Description}}: {{.Description}}{{end}}
  • {{ end }}
{{ if .Form.Action }}
-
+ {{ range .Form.Inputs }}  
{{ end }} @@ -33,10 +33,10 @@
Download a detailed report of resource usage (pprof format, from the Go runtime): - To visualize and share profiles you can upload to pprof.me + To visualize and share profiles you can upload to pprof.me
diff --git a/vendor/github.com/prometheus/otlptranslator/.golangci.yml b/vendor/github.com/prometheus/otlptranslator/.golangci.yml index ed5f43f1a6..372a9a97a1 100644 --- a/vendor/github.com/prometheus/otlptranslator/.golangci.yml +++ b/vendor/github.com/prometheus/otlptranslator/.golangci.yml @@ -1,26 +1,19 @@ -formatters: - enable: - - gci - - gofumpt - settings: - gci: - sections: - - standard - - default - - prefix(github.com/prometheus/otlptranslator) - gofumpt: - extra-rules: true -issues: - max-issues-per-linter: 0 - max-same-issues: 0 +run: + timeout: 15m + +output: + sort-results: true + linters: # Keep this list sorted alphabetically enable: - depguard - errorlint - exptostd + - gci - gocritic - godot + - gofumpt - loggercheck - misspell - nilnesserr @@ -35,72 +28,99 @@ linters: - unused - usestdlibvars - whitespace - settings: - depguard: - rules: - main: - deny: - - pkg: sync/atomic - desc: Use go.uber.org/atomic instead of sync/atomic - - pkg: github.com/stretchr/testify/assert - desc: Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert - - pkg: io/ioutil - desc: Use corresponding 'os' or 'io' functions instead. - - pkg: regexp - desc: Use github.com/grafana/regexp instead of regexp - - pkg: github.com/pkg/errors - desc: Use 'errors' or 'fmt' instead of github.com/pkg/errors - - pkg: golang.org/x/exp/slices - desc: Use 'slices' instead. - perfsprint: - # Optimizes `fmt.Errorf`. - errorf: true - revive: - # By default, revive will enable only the linting rules that are named in the configuration file. - # So, it's needed to explicitly enable all required rules here. - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - - name: blank-imports - - name: comment-spacings - - name: context-as-argument - arguments: - # Allow functions with test or bench signatures. - - allowTypesBefore: '*testing.T,testing.TB' - - name: context-keys-type - - name: dot-imports - - name: early-return - arguments: - - preserveScope - # A lot of false positives: incorrectly identifies channel draining as "empty code block". - # See https://github.com/mgechev/revive/issues/386 - - name: empty-block - disabled: true - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - - name: increment-decrement - - name: indent-error-flow - arguments: - - preserveScope - - name: range - - name: receiver-naming - - name: redefines-builtin-id - - name: superfluous-else - arguments: - - preserveScope - - name: time-naming - - name: unexported-return - - name: unreachable-code - - name: unused-parameter - - name: var-declaration - - name: var-naming - testifylint: - disable: - - float-compare - - go-require - enable-all: true -run: - timeout: 15m -version: "2" + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + # The default exclusions are too aggressive. For one, they + # essentially disable any linting on doc comments. We disable + # default exclusions here and add exclusions fitting our codebase + # further down. + exclude-use-default: false + exclude-rules: + - linters: + - errcheck + # Taken from the default exclusions (that are otherwise disabled above). + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - path: _test.go + linters: + - errcheck + - linters: + - godot + source: "^// ===" + - linters: + - perfsprint + text: "fmt.Sprintf can be replaced with string concatenation" +linters-settings: + depguard: + rules: + main: + deny: + - pkg: "sync/atomic" + desc: "Use go.uber.org/atomic instead of sync/atomic" + - pkg: "github.com/stretchr/testify/assert" + desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" + - pkg: "io/ioutil" + desc: "Use corresponding 'os' or 'io' functions instead." + - pkg: "regexp" + desc: "Use github.com/grafana/regexp instead of regexp" + - pkg: "github.com/pkg/errors" + desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" + - pkg: "golang.org/x/exp/slices" + desc: "Use 'slices' instead." + gci: + sections: + - standard + - default + - prefix(github.com/prometheus/otlptranslator) + gofumpt: + extra-rules: true + perfsprint: + # Optimizes `fmt.Errorf`. + errorf: true + revive: + # By default, revive will enable only the linting rules that are named in the configuration file. + # So, it's needed to explicitly enable all required rules here. + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + - name: blank-imports + - name: comment-spacings + - name: context-as-argument + arguments: + # Allow functions with test or bench signatures. + - allowTypesBefore: "*testing.T,testing.TB" + - name: context-keys-type + - name: dot-imports + - name: early-return + arguments: + - "preserveScope" + # A lot of false positives: incorrectly identifies channel draining as "empty code block". + # See https://github.com/mgechev/revive/issues/386 + - name: empty-block + disabled: true + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + arguments: + - "preserveScope" + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + arguments: + - "preserveScope" + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + testifylint: + disable: + - float-compare + - go-require + enable-all: true diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go b/vendor/github.com/prometheus/otlptranslator/metric_name_builder.go similarity index 89% rename from vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go rename to vendor/github.com/prometheus/otlptranslator/metric_name_builder.go index 8b5ea2a046..804b8b9d86 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go +++ b/vendor/github.com/prometheus/otlptranslator/metric_name_builder.go @@ -1,4 +1,4 @@ -// Copyright 2024 The Prometheus Authors +// Copyright 2025 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -10,18 +10,21 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/metric_name_builder.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The Prometheus Authors // Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go // Provenance-includes-license: Apache-2.0 // Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -package prometheus +package otlptranslator import ( - "regexp" "slices" "strings" "unicode" + "github.com/grafana/regexp" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -66,8 +69,8 @@ var unitMap = map[string]string{ "%": "percent", } -// The map that translates the "per" unit -// Example: s => per second (singular) +// The map that translates the "per" unit. +// Example: s => per second (singular). var perUnitMap = map[string]string{ "s": "second", "m": "minute", @@ -112,12 +115,27 @@ func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetric } var ( - nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) // Regexp for metric name characters that should be replaced with _. invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) multipleUnderscoresRE = regexp.MustCompile(`__+`) ) +// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :). +func isValidCompliantMetricChar(r rune) bool { + return (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == ':' +} + +// replaceInvalidMetricChar replaces invalid metric name characters with underscore. +func replaceInvalidMetricChar(r rune) rune { + if isValidCompliantMetricChar(r) { + return r + } + return '_' +} + // Build a normalized name for the specified metric. func normalizeName(metric pmetric.Metric, namespace string) string { // Split metric name into "tokens" (of supported metric name runes). @@ -125,7 +143,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. nameTokens := strings.FieldsFunc( metric.Name(), - func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, + func(r rune) bool { return !isValidCompliantMetricChar(r) }, ) mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) @@ -199,13 +217,13 @@ func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( - nonMetricNameCharRE.ReplaceAllString(unit, "_"), + strings.Map(replaceInvalidMetricChar, unit), "_", ), "_") } -// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit -// Returns the specified unit if not found in unitMap +// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit. +// Returns the specified unit if not found in unitMap. func unitMapGetOrDefault(unit string) string { if promUnit, ok := unitMap[unit]; ok { return promUnit @@ -213,8 +231,8 @@ func unitMapGetOrDefault(unit string) string { return unit } -// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit -// Returns the specified unit if not found in perUnitMap +// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit. +// Returns the specified unit if not found in perUnitMap. func perUnitMapGetOrDefault(perUnit string) string { if promPerUnit, ok := perUnitMap[perUnit]; ok { return promPerUnit @@ -222,7 +240,7 @@ func perUnitMapGetOrDefault(perUnit string) string { return perUnit } -// Remove the specified value from the slice +// Remove the specified value from the slice. func removeItem(slice []string, value string) []string { newSlice := make([]string, 0, len(slice)) for _, sliceEntry := range slice { @@ -259,7 +277,7 @@ func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes // Append _total for Counters if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { - metricName = metricName + "_total" + metricName += "_total" } // Append _ratio for metrics with unit "1" @@ -268,7 +286,7 @@ func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { - metricName = metricName + "_ratio" + metricName += "_ratio" } } return metricName diff --git a/vendor/github.com/prometheus/otlptranslator/metric_namer.go b/vendor/github.com/prometheus/otlptranslator/metric_namer.go deleted file mode 100644 index 808dd77832..0000000000 --- a/vendor/github.com/prometheus/otlptranslator/metric_namer.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/metric_name_builder.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The Prometheus Authors -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package otlptranslator - -import ( - "slices" - "strings" - "unicode" - - "github.com/grafana/regexp" -) - -// The map to translate OTLP units to Prometheus units -// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html -// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) -// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units -// OpenMetrics specification for units: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#units-and-base-units -var unitMap = map[string]string{ - // Time - "d": "days", - "h": "hours", - "min": "minutes", - "s": "seconds", - "ms": "milliseconds", - "us": "microseconds", - "ns": "nanoseconds", - - // Bytes - "By": "bytes", - "KiBy": "kibibytes", - "MiBy": "mebibytes", - "GiBy": "gibibytes", - "TiBy": "tibibytes", - "KBy": "kilobytes", - "MBy": "megabytes", - "GBy": "gigabytes", - "TBy": "terabytes", - - // SI - "m": "meters", - "V": "volts", - "A": "amperes", - "J": "joules", - "W": "watts", - "g": "grams", - - // Misc - "Cel": "celsius", - "Hz": "hertz", - "1": "", - "%": "percent", -} - -// The map that translates the "per" unit. -// Example: s => per second (singular). -var perUnitMap = map[string]string{ - "s": "second", - "m": "minute", - "h": "hour", - "d": "day", - "w": "week", - "mo": "month", - "y": "year", -} - -// MetricNamer is a helper struct to build metric names. -type MetricNamer struct { - Namespace string - WithMetricSuffixes bool - UTF8Allowed bool -} - -// Metric is a helper struct that holds information about a metric. -type Metric struct { - Name string - Unit string - Type MetricType -} - -// Build builds a metric name for the specified metric. -// -// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required. -// Otherwise the metric name is normalized to be Prometheus-compliant. -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, -// https://prometheus.io/docs/practices/naming/#metric-and-label-naming -func (mn *MetricNamer) Build(metric Metric) string { - if mn.UTF8Allowed { - return mn.buildMetricName(metric.Name, metric.Unit, metric.Type) - } - return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type) -} - -func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string { - // Full normalization following standard Prometheus naming conventions - if mn.WithMetricSuffixes { - return normalizeName(name, unit, metricType, mn.Namespace) - } - - // Simple case (no full normalization, no units, etc.). - metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") - - // Namespace? - if mn.Namespace != "" { - return mn.Namespace + "_" + metricName - } - - // Metric name starts with a digit? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) { - metricName = "_" + metricName - } - - return metricName -} - -var ( - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) - multipleUnderscoresRE = regexp.MustCompile(`__+`) -) - -// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :). -func isValidCompliantMetricChar(r rune) bool { - return (r >= 'a' && r <= 'z') || - (r >= 'A' && r <= 'Z') || - (r >= '0' && r <= '9') || - r == ':' -} - -// replaceInvalidMetricChar replaces invalid metric name characters with underscore. -func replaceInvalidMetricChar(r rune) rune { - if isValidCompliantMetricChar(r) { - return r - } - return '_' -} - -// Build a normalized name for the specified metric. -func normalizeName(name, unit string, metricType MetricType, namespace string) string { - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens := strings.FieldsFunc( - name, - func(r rune) bool { return !isValidCompliantMetricChar(r) }, - ) - - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit) - nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix)) - - // Append _total for Counters - if metricType == MetricTypeMonotonicCounter { - nameTokens = append(removeItem(nameTokens, "total"), "total") - } - - // Append _ratio for metrics with unit "1" - // Some OTel receivers improperly use unit "1" for counters of objects - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions - // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY - // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) - if unit == "1" && metricType == MetricTypeGauge { - nameTokens = append(removeItem(nameTokens, "ratio"), "ratio") - } - - // Namespace? - if namespace != "" { - nameTokens = append([]string{namespace}, nameTokens...) - } - - // Build the string from the tokens, separated with underscores - normalizedName := strings.Join(nameTokens, "_") - - // Metric name cannot start with a digit, so prefix it with "_" in this case - if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { - normalizedName = "_" + normalizedName - } - - return normalizedName -} - -// addUnitTokens will add the suffixes to the nameTokens if they are not already present. -// It will also remove trailing underscores from the main suffix to avoid double underscores -// when joining the tokens. -// -// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just -// 'per_', it will be entirely removed. -func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string { - if slices.Contains(nameTokens, mainUnitSuffix) { - mainUnitSuffix = "" - } - - if perUnitSuffix == "per_" { - perUnitSuffix = "" - } else { - perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_") - if slices.Contains(nameTokens, perUnitSuffix) { - perUnitSuffix = "" - } - } - - if perUnitSuffix != "" { - mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_") - } - - if mainUnitSuffix != "" { - nameTokens = append(nameTokens, mainUnitSuffix) - } - if perUnitSuffix != "" { - nameTokens = append(nameTokens, perUnitSuffix) - } - return nameTokens -} - -// cleanUpUnit cleans up unit so it matches model.LabelNameRE. -func cleanUpUnit(unit string) string { - // Multiple consecutive underscores are replaced with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( - strings.Map(replaceInvalidMetricChar, unit), - "_", - ), "_") -} - -// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit. -// Returns the specified unit if not found in unitMap. -func unitMapGetOrDefault(unit string) string { - if promUnit, ok := unitMap[unit]; ok { - return promUnit - } - return unit -} - -// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit. -// Returns the specified unit if not found in perUnitMap. -func perUnitMapGetOrDefault(perUnit string) string { - if promPerUnit, ok := perUnitMap[perUnit]; ok { - return promPerUnit - } - return perUnit -} - -// Remove the specified value from the slice. -func removeItem(slice []string, value string) []string { - newSlice := make([]string, 0, len(slice)) - for _, sliceEntry := range slice { - if sliceEntry != value { - newSlice = append(newSlice, sliceEntry) - } - } - return newSlice -} - -func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string { - if mn.Namespace != "" { - name = mn.Namespace + "_" + name - } - - if mn.WithMetricSuffixes { - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit) - if mainUnitSuffix != "" { - name = name + "_" + mainUnitSuffix - } - if perUnitSuffix != "" { - name = name + "_" + perUnitSuffix - } - - // Append _total for Counters - if metricType == MetricTypeMonotonicCounter { - name += "_total" - } - - // Append _ratio for metrics with unit "1" - // Some OTel receivers improperly use unit "1" for counters of objects - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions - // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY - // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) - if unit == "1" && metricType == MetricTypeGauge { - name += "_ratio" - } - } - return name -} - -// buildUnitSuffixes builds the main and per unit suffixes for the specified unit -// but doesn't do any special character transformation to accommodate Prometheus naming conventions. -// Removing trailing underscores or appending suffixes is done in the caller. -func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) { - // Split unit at the '/' if any - unitTokens := strings.SplitN(unit, "/", 2) - - if len(unitTokens) > 0 { - // Main unit - // Update if not blank and doesn't contain '{}' - mainUnitOTel := strings.TrimSpace(unitTokens[0]) - if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel) - } - - // Per unit - // Update if not blank and doesn't contain '{}' - if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOTel := strings.TrimSpace(unitTokens[1]) - if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel) - } - if perUnitSuffix != "" { - perUnitSuffix = "per_" + perUnitSuffix - } - } - } - - return mainUnitSuffix, perUnitSuffix -} diff --git a/vendor/github.com/prometheus/otlptranslator/metric_type.go b/vendor/github.com/prometheus/otlptranslator/metric_type.go deleted file mode 100644 index 30464cfea8..0000000000 --- a/vendor/github.com/prometheus/otlptranslator/metric_type.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and - -package otlptranslator - -// MetricType is a representation of metric types from OpenTelemetry. -// Different types of Sums were introduced based on their metric temporalities. -// For more details, see: -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums -type MetricType int - -const ( - // MetricTypeUnknown represents an unknown metric type. - MetricTypeUnknown = iota - // MetricTypeNonMonotonicCounter represents a counter that is not monotonically increasing, also known as delta counter. - MetricTypeNonMonotonicCounter - // MetricTypeMonotonicCounter represents a counter that is monotonically increasing, also known as cumulative counter. - MetricTypeMonotonicCounter - // MetricTypeGauge represents a gauge metric. - MetricTypeGauge - // MetricTypeHistogram represents a histogram metric. - MetricTypeHistogram - // MetricTypeExponentialHistogram represents an exponential histogram metric. - MetricTypeExponentialHistogram - // MetricTypeSummary represents a summary metric. - MetricTypeSummary -) diff --git a/vendor/github.com/prometheus/otlptranslator/normalize_label.go b/vendor/github.com/prometheus/otlptranslator/normalize_label.go index aa771f7840..252221afee 100644 --- a/vendor/github.com/prometheus/otlptranslator/normalize_label.go +++ b/vendor/github.com/prometheus/otlptranslator/normalize_label.go @@ -24,23 +24,15 @@ import ( "unicode" ) -// LabelNamer is a helper struct to build label names. -type LabelNamer struct { - UTF8Allowed bool -} - -// Build normalizes the specified label to follow Prometheus label names standard. +// NormalizeLabel normalizes the specified label to follow Prometheus label names standard. // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. // // Labels that start with non-letter rune will be prefixed with "key_". // An exception is made for double-underscores which are allowed. -// -// If UTF8Allowed is true, the label is returned as is. This option is provided just to -// keep a consistent interface with the MetricNamer. -func (ln *LabelNamer) Build(label string) string { +func NormalizeLabel(label string) string { // Trivial case. - if len(label) == 0 || ln.UTF8Allowed { + if len(label) == 0 { return label } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 465affe082..f140044baa 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -110,9 +110,9 @@ func Load(s string, logger *slog.Logger) (*Config, error) { switch cfg.OTLPConfig.TranslationStrategy { case UnderscoreEscapingWithSuffixes: case "": - case NoUTF8EscapingWithSuffixes: + case NoTranslation, NoUTF8EscapingWithSuffixes: if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { - return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled") + return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy) } default: return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) @@ -156,6 +156,7 @@ var ( // DefaultConfig is the default top-level configuration. DefaultConfig = Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, } // DefaultGlobalConfig is the default global configuration. @@ -166,7 +167,8 @@ var ( RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. - ScrapeProtocols: DefaultScrapeProtocols, + ScrapeProtocols: DefaultScrapeProtocols, + ConvertClassicHistogramsToNHCB: false, } DefaultRuntimeConfig = RuntimeConfig{ @@ -478,8 +480,15 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` - // Allow UTF8 Metric and Label Names. + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a ScrepeConfig is created programmatically. MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrepeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` + // Whether to convert all scraped classic histograms into native histograms with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -635,7 +644,8 @@ func (c *GlobalConfig) isZero() bool { c.RuleQueryOffset == 0 && c.QueryLogFile == "" && c.ScrapeFailureLogFile == "" && - c.ScrapeProtocols == nil + c.ScrapeProtocols == nil && + !c.ConvertClassicHistogramsToNHCB } // RuntimeConfig configures the values for the process behavior. @@ -682,7 +692,7 @@ type ScrapeConfig struct { // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` // Whether to convert all scraped classic histograms into a native histogram with custom buckets. - ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. @@ -718,8 +728,13 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` - // Allow UTF8 Metric and Label Names. + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a ScrepeConfig is created programmatically. MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrepeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -836,17 +851,57 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { } } + //nolint:staticcheck + if model.NameValidationScheme != model.UTF8Validation { + return errors.New("model.NameValidationScheme must be set to UTF8") + } + switch globalConfig.MetricNameValidationScheme { - case LegacyValidationConfig: - case "", UTF8ValidationConfig: - if model.NameValidationScheme != model.UTF8Validation { - panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") - } + case "": + globalConfig.MetricNameValidationScheme = UTF8ValidationConfig + case LegacyValidationConfig, UTF8ValidationConfig: default: - return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + return fmt.Errorf("unknown global name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) } - if c.MetricNameValidationScheme == "" { + // Scrapeconfig validation scheme matches global if left blank. + switch c.MetricNameValidationScheme { + case "": c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + case LegacyValidationConfig, UTF8ValidationConfig: + default: + return fmt.Errorf("unknown scrape config name validation method specified, must be either 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme) + } + + // Escaping scheme is based on the validation scheme if left blank. + switch globalConfig.MetricNameEscapingScheme { + case "": + if globalConfig.MetricNameValidationScheme == LegacyValidationConfig { + globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores + } else { + globalConfig.MetricNameEscapingScheme = model.AllowUTF8 + } + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameValidationScheme) + } + + if c.MetricNameEscapingScheme == "" { + c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme + } + + switch c.MetricNameEscapingScheme { + case model.AllowUTF8: + if c.MetricNameValidationScheme != UTF8ValidationConfig { + return errors.New("utf8 metric names requested but validation scheme is not set to UTF8") + } + case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameValidationScheme) + } + + if c.ConvertClassicHistogramsToNHCB == nil { + global := globalConfig.ConvertClassicHistogramsToNHCB + c.ConvertClassicHistogramsToNHCB = &global } return nil @@ -857,6 +912,25 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) } +// ToValidationScheme returns the validation scheme for the given string config value. +func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err error) { + switch s { + case UTF8ValidationConfig: + validationScheme = model.UTF8Validation + case LegacyValidationConfig: + validationScheme = model.LegacyValidation + default: + return model.UTF8Validation, fmt.Errorf("invalid metric name validation scheme, %s", s) + } + + return validationScheme, nil +} + +// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB. +func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool { + return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB +} + // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` @@ -1435,6 +1509,21 @@ var ( // and label name characters that are not alphanumerics/underscores to underscores. // Unit and type suffixes may be appended to metric names, according to certain rules. UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" + // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric + // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion. + // + // WARNING: This setting has significant known risks and limitations (see + // https://prometheus.io/docs/practices/naming/ for details): + // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). + // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed + // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit + // `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // + // As a result, this setting is experimental and currently, should not be used in + // production systems. + // + // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks. + NoTranslation translationStrategyOption = "NoTranslation" ) // OTLPConfig is the configuration for writing to the OTLP endpoint. @@ -1442,6 +1531,7 @@ type OTLPConfig struct { PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` + ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/prometheus/config/reload.go b/vendor/github.com/prometheus/prometheus/config/reload.go index 8be1b28d8a..cc0cc97158 100644 --- a/vendor/github.com/prometheus/prometheus/config/reload.go +++ b/vendor/github.com/prometheus/prometheus/config/reload.go @@ -20,6 +20,7 @@ import ( "os" "path/filepath" + promconfig "github.com/prometheus/common/config" "gopkg.in/yaml.v2" ) @@ -49,10 +50,10 @@ func GenerateChecksum(yamlFilePath string) (string, error) { dir := filepath.Dir(yamlFilePath) for i, file := range config.RuleFiles { - config.RuleFiles[i] = filepath.Join(dir, file) + config.RuleFiles[i] = promconfig.JoinDir(dir, file) } for i, file := range config.ScrapeConfigFiles { - config.ScrapeConfigFiles[i] = filepath.Join(dir, file) + config.ScrapeConfigFiles[i] = promconfig.JoinDir(dir, file) } files := map[string][]string{ diff --git a/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go index 0f35c401e6..7e35a1807f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go +++ b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go @@ -101,7 +101,7 @@ type EC2SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*EC2SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ec2Metrics{ refreshMetrics: rmi, } @@ -262,7 +262,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error } input := &ec2.DescribeInstancesInput{Filters: filters} - if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { + if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, _ bool) bool { for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { diff --git a/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go b/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go index b892867f1b..fb249b8256 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go +++ b/vendor/github.com/prometheus/prometheus/discovery/aws/lightsail.go @@ -83,7 +83,7 @@ type LightsailSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*LightsailSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &lightsailMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go index 862d86859b..670afb5a4e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go +++ b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go @@ -38,9 +38,8 @@ import ( "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/promslog" - "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" @@ -458,11 +457,10 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) } if err != nil { - if errors.Is(err, errorNotFound) { - d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) - } else { + if !errors.Is(err, errorNotFound) { return nil, err } + d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) // Get out of this routine because we cannot continue without a network interface. return nil, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/consul/metrics.go b/vendor/github.com/prometheus/prometheus/discovery/consul/metrics.go index 8266e7cc60..b49509bd8f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/consul/metrics.go +++ b/vendor/github.com/prometheus/prometheus/discovery/consul/metrics.go @@ -31,7 +31,7 @@ type consulMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &consulMetrics{ rpcFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go index eeaedd8869..d0ececd9e9 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go +++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go @@ -65,7 +65,7 @@ func init() { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &digitaloceanMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/metrics.go b/vendor/github.com/prometheus/prometheus/discovery/file/metrics.go index c01501e4ef..3e3df7bbf6 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/metrics.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/metrics.go @@ -30,7 +30,7 @@ type fileMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { fm := &fileMetrics{ fileSDReadErrorsCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go b/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go index 9a5b0e856e..32f1bb6722 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go +++ b/vendor/github.com/prometheus/prometheus/discovery/gce/gce.go @@ -83,7 +83,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &gceMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go index 45bc43eff9..1002025128 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go @@ -16,7 +16,6 @@ package kubernetes import ( "context" "errors" - "fmt" "log/slog" "net" "strconv" @@ -106,13 +105,14 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n // LabelServiceName so this operation doesn't have to iterate over all // endpoint objects. for _, obj := range e.endpointSliceStore.List() { - esa, err := e.getEndpointSliceAdaptor(obj) - if err != nil { + es, ok := obj.(*v1.EndpointSlice) + if !ok { e.logger.Error("converting to EndpointSlice object failed", "err", err) continue } - if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { - e.enqueue(esa.get()) + // Only consider the underlying EndpointSlices in the same namespace. + if svcName, exists := es.Labels[v1.LabelServiceName]; exists && svcName == svc.Name && es.Namespace == svc.Namespace { + e.enqueue(es) } } } @@ -229,27 +229,17 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr return true } - esa, err := e.getEndpointSliceAdaptor(o) - if err != nil { - e.logger.Error("converting to EndpointSlice object failed", "err", err) - return true + if es, ok := o.(*v1.EndpointSlice); ok { + send(ctx, ch, e.buildEndpointSlice(*es)) + } else { + e.logger.Error("received unexpected object", "object", o) + return false } - - send(ctx, ch, e.buildEndpointSlice(esa)) return true } -func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAdaptor, error) { - switch endpointSlice := o.(type) { - case *v1.EndpointSlice: - return newEndpointSliceAdaptorFromV1(endpointSlice), nil - default: - return nil, fmt.Errorf("received unexpected object: %v", o) - } -} - -func endpointSliceSource(ep endpointSliceAdaptor) string { - return endpointSliceSourceFromNamespaceAndName(ep.namespace(), ep.name()) +func endpointSliceSource(ep v1.EndpointSlice) string { + return endpointSliceSourceFromNamespaceAndName(ep.Namespace, ep.Name) } func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { @@ -274,95 +264,95 @@ const ( endpointSliceEndpointTopologyLabelPresentPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_present_" ) -func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgroup.Group { +func (e *EndpointSlice) buildEndpointSlice(eps v1.EndpointSlice) *targetgroup.Group { tg := &targetgroup.Group{ Source: endpointSliceSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.namespace()), - endpointSliceAddressTypeLabel: lv(eps.addressType()), + namespaceLabel: lv(eps.Namespace), + endpointSliceAddressTypeLabel: lv(string(eps.AddressType)), } - addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpointSlice) e.addServiceLabels(eps, tg) type podEntry struct { pod *apiv1.Pod - servicePorts []endpointSlicePortAdaptor + servicePorts []v1.EndpointPort } seenPods := map[string]*podEntry{} - add := func(addr string, ep endpointSliceEndpointAdaptor, port endpointSlicePortAdaptor) { + add := func(addr string, ep v1.Endpoint, port v1.EndpointPort) { a := addr - if port.port() != nil { - a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.Port), 10)) } target := model.LabelSet{ model.AddressLabel: lv(a), } - if port.name() != nil { - target[endpointSlicePortNameLabel] = lv(*port.name()) + if port.Name != nil { + target[endpointSlicePortNameLabel] = lv(*port.Name) } - if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) + if port.Protocol != nil { + target[endpointSlicePortProtocolLabel] = lv(string(*port.Protocol)) } - if port.port() != nil { - target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.Port), 10)) } - if port.appProtocol() != nil { - target[endpointSlicePortAppProtocol] = lv(*port.appProtocol()) + if port.AppProtocol != nil { + target[endpointSlicePortAppProtocol] = lv(*port.AppProtocol) } - if ep.conditions().ready() != nil { - target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) + if ep.Conditions.Ready != nil { + target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.Conditions.Ready)) } - if ep.conditions().serving() != nil { - target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + if ep.Conditions.Serving != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.Conditions.Serving)) } - if ep.conditions().terminating() != nil { - target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + if ep.Conditions.Terminating != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.Conditions.Terminating)) } - if ep.hostname() != nil { - target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) + if ep.Hostname != nil { + target[endpointSliceEndpointHostnameLabel] = lv(*ep.Hostname) } - if ep.targetRef() != nil { - target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.targetRef().Kind) - target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name) + if ep.TargetRef != nil { + target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.TargetRef.Kind) + target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.TargetRef.Name) } - if ep.nodename() != nil { - target[endpointSliceEndpointNodenameLabel] = lv(*ep.nodename()) + if ep.NodeName != nil { + target[endpointSliceEndpointNodenameLabel] = lv(*ep.NodeName) } - if ep.zone() != nil { - target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.zone()) + if ep.Zone != nil { + target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.Zone) } - for k, v := range ep.topology() { + for k, v := range ep.DeprecatedTopology { ln := strutil.SanitizeLabelName(k) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue } if e.withNodeMetadata { - if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { - target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + if ep.TargetRef != nil && ep.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.TargetRef.Name) } else { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + target = addNodeLabels(target, e.nodeInf, e.logger, ep.NodeName) } } - pod := e.resolvePodRef(ep.targetRef()) + pod := e.resolvePodRef(ep.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) @@ -383,12 +373,12 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) for i, c := range containers { for _, cport := range c.Ports { - if port.port() == nil { + if port.Port == nil { continue } - if *port.port() == cport.ContainerPort { - ports := strconv.FormatUint(uint64(*port.port()), 10) + if *port.Port == cport.ContainerPort { + ports := strconv.FormatUint(uint64(*port.Port), 10) isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) @@ -408,9 +398,9 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou tg.Targets = append(tg.Targets, target) } - for _, ep := range eps.endpoints() { - for _, port := range eps.ports() { - for _, addr := range ep.addresses() { + for _, ep := range eps.Endpoints { + for _, port := range eps.Ports { + for _, addr := range ep.Addresses { add(addr, ep, port) } } @@ -429,10 +419,10 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { - if eport.port() == nil { + if eport.Port == nil { continue } - if cport.ContainerPort == *eport.port() { + if cport.ContainerPort == *eport.Port { return true } } @@ -479,16 +469,16 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { return obj.(*apiv1.Pod) } -func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { +func (e *EndpointSlice) addServiceLabels(esa v1.EndpointSlice, tg *targetgroup.Group) { var ( found bool name string ) - ns := esa.namespace() + ns := esa.Namespace // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. - name, found = esa.labels()[esa.labelServiceName()] + name, found = esa.Labels[v1.LabelServiceName] if !found { return } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go deleted file mode 100644 index 81243e2ce0..0000000000 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions. -type endpointSliceAdaptor interface { - get() interface{} - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - addressType() string - endpoints() []endpointSliceEndpointAdaptor - ports() []endpointSlicePortAdaptor - labels() map[string]string - labelServiceName() string -} - -type endpointSlicePortAdaptor interface { - name() *string - port() *int32 - protocol() *string - appProtocol() *string -} - -type endpointSliceEndpointAdaptor interface { - addresses() []string - hostname() *string - nodename() *string - zone() *string - conditions() endpointSliceEndpointConditionsAdaptor - targetRef() *corev1.ObjectReference - topology() map[string]string -} - -type endpointSliceEndpointConditionsAdaptor interface { - ready() *bool - serving() *bool - terminating() *bool -} - -// Adaptor for k8s.io/api/discovery/v1. -type endpointSliceAdaptorV1 struct { - endpointSlice *v1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1(endpointSlice *v1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1) name() string { - return e.endpointSlice.ObjectMeta.Name -} - -func (e *endpointSliceAdaptorV1) namespace() string { - return e.endpointSlice.ObjectMeta.Namespace -} - -func (e *endpointSliceAdaptorV1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1) labelServiceName() string { - return v1.LabelServiceName -} - -type endpointSliceEndpointAdaptorV1 struct { - endpoint v1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1(endpoint v1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1) zone() *string { - return e.endpoint.Zone -} - -func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1) topology() map[string]string { - return e.endpoint.DeprecatedTopology -} - -type endpointSliceEndpointConditionsAdaptorV1 struct { - endpointConditions v1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1(endpointConditions v1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { - return e.endpointConditions.Terminating -} - -type endpointSlicePortAdaptorV1 struct { - endpointPort v1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1(port v1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go index 1b7847c5c4..0de574471f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go @@ -121,21 +121,18 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b return true } - var ia ingressAdaptor - switch ingress := o.(type) { - case *v1.Ingress: - ia = newIngressAdaptorFromV1(ingress) - default: + if ingress, ok := o.(*v1.Ingress); ok { + send(ctx, ch, i.buildIngress(*ingress)) + } else { i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } - send(ctx, ch, i.buildIngress(ia)) return true } -func ingressSource(s ingressAdaptor) string { - return ingressSourceFromNamespaceAndName(s.namespace(), s.name()) +func ingressSource(s v1.Ingress) string { + return ingressSourceFromNamespaceAndName(s.Namespace, s.Name) } func ingressSourceFromNamespaceAndName(namespace, name string) string { @@ -149,15 +146,15 @@ const ( ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) -func ingressLabels(ingress ingressAdaptor) model.LabelSet { +func ingressLabels(ingress v1.Ingress) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. ls := make(model.LabelSet) - ls[namespaceLabel] = lv(ingress.namespace()) - if cls := ingress.ingressClassName(); cls != nil { + ls[namespaceLabel] = lv(ingress.Namespace) + if cls := ingress.Spec.IngressClassName; cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) + addObjectMetaLabels(ls, ingress.ObjectMeta, RoleIngress) return ls } @@ -177,19 +174,39 @@ func pathsFromIngressPaths(ingressPaths []string) []string { return paths } -func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { +func rulePaths(rule v1.IngressRule) []string { + rv := rule.IngressRuleValue + if rv.HTTP == nil { + return nil + } + paths := make([]string, len(rv.HTTP.Paths)) + for n, p := range rv.HTTP.Paths { + paths[n] = p.Path + } + return paths +} + +func tlsHosts(ingressTLS []v1.IngressTLS) []string { + var hosts []string + for _, tls := range ingressTLS { + hosts = append(hosts, tls.Hosts...) + } + return hosts +} + +func (i *Ingress) buildIngress(ingress v1.Ingress) *targetgroup.Group { tg := &targetgroup.Group{ Source: ingressSource(ingress), } tg.Labels = ingressLabels(ingress) - for _, rule := range ingress.rules() { + for _, rule := range ingress.Spec.Rules { scheme := "http" - paths := pathsFromIngressPaths(rule.paths()) + paths := pathsFromIngressPaths(rulePaths(rule)) out: - for _, pattern := range ingress.tlsHosts() { - if matchesHostnamePattern(pattern, rule.host()) { + for _, pattern := range tlsHosts(ingress.Spec.TLS) { + if matchesHostnamePattern(pattern, rule.Host) { scheme = "https" break out } @@ -197,9 +214,9 @@ func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { for _, path := range paths { tg.Targets = append(tg.Targets, model.LabelSet{ - model.AddressLabel: lv(rule.host()), + model.AddressLabel: lv(rule.Host), ingressSchemeLabel: lv(scheme), - ingressHostLabel: lv(rule.host()), + ingressHostLabel: lv(rule.Host), ingressPathLabel: lv(path), }) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go deleted file mode 100644 index 84281196b4..0000000000 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ingressAdaptor is an adaptor for the different Ingress versions. -type ingressAdaptor interface { - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - labels() map[string]string - annotations() map[string]string - tlsHosts() []string - ingressClassName() *string - rules() []ingressRuleAdaptor -} - -type ingressRuleAdaptor interface { - paths() []string - host() string -} - -// Adaptor for networking.k8s.io/v1. -type ingressAdaptorV1 struct { - ingress *v1.Ingress -} - -func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { - return &ingressAdaptorV1{ingress: ingress} -} - -func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1 struct { - rule v1.IngressRule -} - -func newIngressRuleAdaptorFromV1(rule v1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1{rule: rule} -} - -func (i *ingressRuleAdaptorV1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index c6f0e445da..03d9f2f449 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -24,8 +24,6 @@ import ( "sync" "time" - "github.com/prometheus/prometheus/util/strutil" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -40,15 +38,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" + // Required to get the GCP auth provider working. + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" - // Required to get the GCP auth provider working. - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" ) const ( diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go index fe419bc782..ba3cb1d32a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go @@ -28,7 +28,7 @@ type kubernetesMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &kubernetesMetrics{ eventCount: prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 87e0ecc44b..3219117d2a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -101,12 +101,12 @@ func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus. // Register the metrics. // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { + metrics, err := NewManagerMetrics(registerer, mgr.name) + if err != nil { logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } + mgr.metrics = metrics return mgr } diff --git a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go index 9c93e43f51..0c2c2e9702 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go +++ b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go @@ -80,7 +80,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &marathonMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go index de277a58db..53a8b2e135 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go @@ -25,7 +25,6 @@ import ( "strconv" "time" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" @@ -211,7 +210,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er return nil, fmt.Errorf("error while computing network labels: %w", err) } - allContainers := make(map[string]types.Container) + allContainers := make(map[string]container.Summary) for _, c := range containers { allContainers[c.ID] = c } @@ -237,17 +236,16 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er if len(networks) == 0 { // Try to lookup shared networks for { - if containerNetworkMode.IsContainer() { - tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] - if !exists { - break - } - networks = tmpContainer.NetworkSettings.Networks - containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) - if len(networks) > 0 { - break - } - } else { + if !containerNetworkMode.IsContainer() { + break + } + tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] + if !exists { + break + } + networks = tmpContainer.NetworkSettings.Networks + containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) + if len(networks) > 0 { break } } diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go index ae12116301..57c0af7171 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go @@ -70,7 +70,7 @@ type Filter struct { } // NewDiscovererMetrics implements discovery.Config. -func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*DockerSwarmSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &dockerswarmMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go index 5cea68c4a5..e7a6362052 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/hypervisor.go @@ -77,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details pagerHypervisors := hypervisors.List(client, nil) - err = pagerHypervisors.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { + err = pagerHypervisors.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { hypervisorList, err := hypervisors.ExtractHypervisors(page) if err != nil { return false, fmt.Errorf("could not extract hypervisors: %w", err) diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go index dea327afe3..6c2f79b3a4 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go @@ -119,7 +119,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) - err = pagerFIP.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { + err = pagerFIP.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { result, err := floatingips.ExtractFloatingIPs(page) if err != nil { return false, fmt.Errorf("could not extract floatingips: %w", err) diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/loadbalancer.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/loadbalancer.go index 32e0f24f8d..254b713cdd 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/loadbalancer.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/loadbalancer.go @@ -118,9 +118,6 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro // Fetch all floating IPs fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list all fips: %w", err) - } if err != nil { return nil, fmt.Errorf("failed to list floating IPs: %w", err) } @@ -192,9 +189,5 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro tg.Targets = append(tg.Targets, labels) } - if err != nil { - return nil, err - } - return []*targetgroup.Group{tg}, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go index eb1d4d5a4d..d7b58787a1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/openstack.go @@ -67,7 +67,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &openstackMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go index 2401d78fba..93b88ccfab 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/registry.go +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -22,9 +22,8 @@ import ( "strings" "sync" - "gopkg.in/yaml.v2" - "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery/targetgroup" ) diff --git a/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go b/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go index 5ec7b65215..5efe49e23d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go +++ b/vendor/github.com/prometheus/prometheus/discovery/triton/triton.go @@ -71,7 +71,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &tritonMetrics{ refreshMetrics: rmi, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go index a1cfe3d055..af26cc5a0e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go +++ b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go @@ -59,7 +59,7 @@ type ServersetSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*ServersetSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } @@ -101,7 +101,7 @@ type NerveSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*NerveSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 0747ab90d9..ed66d73cbf 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -250,15 +250,7 @@ func (ls Labels) WithoutEmpty() Labels { // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { - if len(ls) != len(o) { - return false - } - for i, l := range ls { - if l != o[i] { - return false - } - } - return true + return slices.Equal(ls, o) } // EmptyLabels returns n empty Labels value, for convenience. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go index a232eeea5d..005eaa509e 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -104,14 +104,14 @@ func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { if l.Name == model.MetricNameLabel { // If the default validation scheme has been overridden with legacy mode, // we need to call the special legacy validation checker. - if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + if validationScheme == model.LegacyValidation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { return strconv.ErrSyntax } if !model.IsValidMetricName(model.LabelValue(l.Value)) { return strconv.ErrSyntax } } - if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation { + if validationScheme == model.LegacyValidation { if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { return strconv.ErrSyntax } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index bfd9034059..cf6c9158e9 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -991,7 +991,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str return true } - analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != prefixCaseSensitive { return false @@ -1026,7 +1026,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true - }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + }, func(prefix string, _ bool, matcher StringMatcher) bool { multiMatcher.addPrefix(prefix, caseSensitive, matcher) return true }) diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 1373484426..70daef426f 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -135,11 +135,6 @@ func (c *Config) Validate() error { // Design escaping mechanism to allow that, once valid use case appears. return model.LabelName(value).IsValid() } - if model.NameValidationScheme == model.LegacyValidation { - isValidLabelNameWithRegexVarFn = func(value string) bool { - return relabelTargetLegacy.MatchString(value) - } - } if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 3fc3fa0437..9b1c897a98 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -92,7 +92,7 @@ type RuleGroups struct { } type ruleGroups struct { - Groups []yaml.Node `yaml:"groups"` + Groups []RuleGroupNode `yaml:"groups"` } // Validate validates all rules in the rule groups. @@ -128,9 +128,9 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { set[g.Name] = struct{}{} for i, r := range g.Rules { - for _, node := range g.Rules[i].Validate() { - var ruleName yaml.Node - if r.Alert.Value != "" { + for _, node := range r.Validate(node.Groups[j].Rules[i]) { + var ruleName string + if r.Alert != "" { ruleName = r.Alert } else { ruleName = r.Record @@ -138,7 +138,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { errs = append(errs, &Error{ Group: g.Name, Rule: i + 1, - RuleName: ruleName.Value, + RuleName: ruleName, Err: node, }) } @@ -150,6 +150,17 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []Rule `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` +} + +// RuleGroupNode adds yaml.v3 layer to support line and columns outputs for invalid rule groups. +type RuleGroupNode struct { + yaml.Node Name string `yaml:"name"` Interval model.Duration `yaml:"interval,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"` @@ -181,64 +192,64 @@ type RuleNode struct { } // Validate the rule and return a list of encountered errors. -func (r *RuleNode) Validate() (nodes []WrappedError) { - if r.Record.Value != "" && r.Alert.Value != "" { +func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) { + if r.Record != "" && r.Alert != "" { nodes = append(nodes, WrappedError{ err: errors.New("only one of 'record' and 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Record.Value == "" && r.Alert.Value == "" { + if r.Record == "" && r.Alert == "" { nodes = append(nodes, WrappedError{ err: errors.New("one of 'record' or 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Expr.Value == "" { + if r.Expr == "" { nodes = append(nodes, WrappedError{ err: errors.New("field 'expr' must be set in rule"), - node: &r.Expr, + node: &node.Expr, }) - } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { + } else if _, err := parser.ParseExpr(r.Expr); err != nil { nodes = append(nodes, WrappedError{ err: fmt.Errorf("could not parse expression: %w", err), - node: &r.Expr, + node: &node.Expr, }) } - if r.Record.Value != "" { + if r.Record != "" { if len(r.Annotations) > 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'annotations' in recording rule"), - node: &r.Record, + node: &node.Record, }) } if r.For != 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'for' in recording rule"), - node: &r.Record, + node: &node.Record, }) } if r.KeepFiringFor != 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'keep_firing_for' in recording rule"), - node: &r.Record, + node: &node.Record, }) } - if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { + if !model.IsValidMetricName(model.LabelValue(r.Record)) { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), - node: &r.Record, + err: fmt.Errorf("invalid recording rule name: %s", r.Record), + node: &node.Record, }) } // While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name. // Disallow "{}" chars. - if strings.Contains(r.Record.Value, "{") || strings.Contains(r.Record.Value, "}") { + if strings.Contains(r.Record, "{") || strings.Contains(r.Record, "}") { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record.Value), - node: &r.Record, + err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record), + node: &node.Record, }) } } @@ -274,8 +285,8 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { // testTemplateParsing checks if the templates used in labels and annotations // of the alerting rules are parsed correctly. -func testTemplateParsing(rl *RuleNode) (errs []error) { - if rl.Alert.Value == "" { +func testTemplateParsing(rl *Rule) (errs []error) { + if rl.Alert == "" { // Not an alerting rule. return errs } @@ -292,7 +303,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { tmpl := template.NewTemplateExpander( context.TODO(), strings.Join(append(defs, text), ""), - "__alert_"+rl.Alert.Value, + "__alert_"+rl.Alert, tmplData, model.Time(timestamp.FromTime(time.Now())), nil, diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 2682855281..6409e37232 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -29,12 +29,18 @@ import ( type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. + // TODO(bwplotka): Similar to CreatedTimestamp, have ts == 0 meaning no timestamp provided. + // We already accepted in many places (PRW, proto parsing histograms) that 0 timestamp is not a + // a valid timestamp. If needed it can be represented as 0+1ms. Series() ([]byte, *int64, float64) // Histogram returns the bytes of a series with a sparse histogram as a // value, the timestamp if set, and the histogram in the current sample. // Depending on the parsed input, the function returns an (integer) Histogram // or a FloatHistogram, with the respective other return value being nil. + // TODO(bwplotka): Similar to CreatedTimestamp, have ts == 0 meaning no timestamp provided. + // We already accepted in many places (PRW, proto parsing histograms) that 0 timestamp is not a + // a valid timestamp. If needed it can be represented as 0+1ms. Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) // Help returns the metric name and help text in the current entry. @@ -57,11 +63,10 @@ type Parser interface { // The returned byte slice becomes invalid after the next call to Next. Comment() []byte - // Metric writes the labels of the current sample into the passed labels. - // It returns the string from which the metric was parsed. + // Labels writes the labels of the current sample into the passed labels. // The values of the "le" labels of classic histograms and "quantile" labels // of summaries should follow the OpenMetrics formatting rules. - Metric(l *labels.Labels) string + Labels(l *labels.Labels) // Exemplar writes the exemplar of the current sample into the passed // exemplar. It can be called repeatedly to retrieve multiple exemplars @@ -70,11 +75,9 @@ type Parser interface { Exemplar(l *exemplar.Exemplar) bool // CreatedTimestamp returns the created timestamp (in milliseconds) for the - // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // current sample. It returns 0 if it is unknown e.g. if it wasn't set or // if the scrape protocol or metric type does not support created timestamps. - // Assume the CreatedTimestamp returned pointer is only valid until - // the Next iteration. - CreatedTimestamp() *int64 + CreatedTimestamp() int64 // Next advances the parser to the next sample. // It returns (EntryInvalid, io.EOF) if no samples were read. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go index 83e381539f..ea4941f2e2 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -67,8 +67,7 @@ type NHCBParser struct { h *histogram.Histogram fh *histogram.FloatHistogram // For Metric. - lset labels.Labels - metricString string + lset labels.Labels // For Type. bName []byte typ model.MetricType @@ -84,7 +83,7 @@ type NHCBParser struct { fhNHCB *histogram.FloatHistogram lsetNHCB labels.Labels exemplars []exemplar.Exemplar - ctNHCB *int64 + ctNHCB int64 metricStringNHCB string // Collates values from the classic histogram series to build @@ -93,7 +92,7 @@ type NHCBParser struct { tempNHCB convertnhcb.TempHistogram tempExemplars []exemplar.Exemplar tempExemplarCount int - tempCT *int64 + tempCT int64 // Remembers the last base histogram metric name (assuming it's // a classic histogram) so we can tell if the next float series @@ -141,13 +140,12 @@ func (p *NHCBParser) Comment() []byte { return p.parser.Comment() } -func (p *NHCBParser) Metric(l *labels.Labels) string { +func (p *NHCBParser) Labels(l *labels.Labels) { if p.state == stateEmitting { *l = p.lsetNHCB - return p.metricStringNHCB + return } *l = p.lset - return p.metricString } func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { @@ -162,7 +160,7 @@ func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { return p.parser.Exemplar(ex) } -func (p *NHCBParser) CreatedTimestamp() *int64 { +func (p *NHCBParser) CreatedTimestamp() int64 { switch p.state { case stateStart: if p.entry == EntrySeries || p.entry == EntryHistogram { @@ -173,7 +171,7 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { case stateEmitting: return p.ctNHCB } - return nil + return 0 } func (p *NHCBParser) Next() (Entry, error) { @@ -200,7 +198,7 @@ func (p *NHCBParser) Next() (Entry, error) { switch p.entry { case EntrySeries: p.bytes, p.ts, p.value = p.parser.Series() - p.metricString = p.parser.Metric(&p.lset) + p.parser.Labels(&p.lset) // Check the label set to see if we can continue or need to emit the NHCB. var isNHCB bool if p.compareLabels() { @@ -224,7 +222,7 @@ func (p *NHCBParser) Next() (Entry, error) { return p.entry, p.err case EntryHistogram: p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() - p.metricString = p.parser.Metric(&p.lset) + p.parser.Labels(&p.lset) p.storeExponentialLabels() case EntryType: p.bName, p.typ = p.parser.Type() @@ -377,6 +375,6 @@ func (p *NHCBParser) processNHCB() bool { } p.tempNHCB.Reset() p.tempExemplarCount = 0 - p.tempCT = nil + p.tempCT = 0 return err == nil } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index f0dd51afee..be3ec67cac 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -73,7 +73,7 @@ func (l *openMetricsLexer) Error(es string) { // OpenMetricsParser parses samples from a byte slice of samples in the official // OpenMetrics text exposition format. -// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit +// Specification can be found at https://prometheus.io/docs/specs/om/open_metrics_spec/ type OpenMetricsParser struct { l *openMetricsLexer builder labels.ScratchBuilder @@ -197,10 +197,10 @@ func (p *OpenMetricsParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. +// Labels writes the labels of the current sample into the passed labels. +func (p *OpenMetricsParser) Labels(l *labels.Labels) { + // Defensive copy in case the following keeps a reference. + // See https://github.com/prometheus/prometheus/issues/16490 s := string(p.series) p.builder.Reset() @@ -220,8 +220,6 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar writes the exemplar of the current sample into the passed exemplar. @@ -263,11 +261,11 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. -func (p *OpenMetricsParser) CreatedTimestamp() *int64 { +func (p *OpenMetricsParser) CreatedTimestamp() int64 { if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. p.ctHashSet = 0 // Use ctHashSet as a single way of telling "empty cache" - return nil + return 0 } var ( @@ -284,7 +282,7 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { currHash := p.seriesHash(&buf, currName) // Check cache, perhaps we fetched something already. if currHash == p.ctHashSet && p.ct > 0 { - return &p.ct + return p.ct } // Create a new lexer to reset the parser once this function is done executing. @@ -314,12 +312,12 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // spec improvement would help. // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. p.resetCTParseValues() - return nil + return 0 } if eType != EntrySeries { // Assume we hit different family, no CT line found. p.resetCTParseValues() - return nil + return 0 } peekedName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] @@ -333,14 +331,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { if peekedHash != currHash { // Found CT line for a different series, for our series no CT. p.resetCTParseValues() - return nil + return 0 } // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#timestamps ct := int64(p.val * 1000.0) p.setCTParseValues(ct, currHash, currName, true) - return &ct + return ct } } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 17b0c3db8b..c4dcb4aee3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -223,12 +223,11 @@ func (p *PromParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *PromParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. +// Labels writes the labels of the current sample into the passed labels. +func (p *PromParser) Labels(l *labels.Labels) { + // Defensive copy in case the following keeps a reference. + // See https://github.com/prometheus/prometheus/issues/16490 s := string(p.series) - p.builder.Reset() metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) p.builder.Add(labels.MetricName, metricName) @@ -246,8 +245,6 @@ func (p *PromParser) Metric(l *labels.Labels) string { p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar implements the Parser interface. However, since the classic @@ -257,10 +254,10 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } -// CreatedTimestamp returns nil as it's not implemented yet. +// CreatedTimestamp returns 0 as it's not implemented yet. // TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 -func (p *PromParser) CreatedTimestamp() *int64 { - return nil +func (p *PromParser) CreatedTimestamp() int64 { + return 0 } // nextToken returns the next token from the promlexer. It skips over tabs @@ -506,6 +503,10 @@ func yoloString(b []byte) string { return unsafe.String(unsafe.SliceData(b), len(b)) } +func yoloBytes(b string) []byte { + return unsafe.Slice(unsafe.StringData(b), len(b)) +} + func parseFloat(s string) (float64, error) { // Keep to pre-Go 1.13 float formats. if strings.ContainsAny(s, "pP_") { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index a77e1d728f..834e34133a 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -15,7 +15,6 @@ package textparse import ( "bytes" - "encoding/binary" "errors" "fmt" "io" @@ -25,14 +24,12 @@ import ( "sync" "unicode/utf8" - "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) @@ -45,24 +42,24 @@ var floatFormatBufPool = sync.Pool{ }, } -// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus -// protobuf format and then present it as it if were parsed by a -// Prometheus-2-style text parser. This is only done so that we can easily plug -// in the protobuf format into Prometheus 2. For future use (with the final -// format that will be used for native histograms), we have to revisit the -// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing -// could be used in a similar fashion (byte-slice pointers into the raw -// payload), which requires some hand-coded protobuf handling. But the current -// parsers all expect the full series name (metric name plus label pairs) as one -// string, which is not how things are represented in the protobuf format. If -// the re-arrangement work is actually causing problems (which has to be seen), -// that expectation needs to be changed. +// ProtobufParser parses the old Prometheus protobuf format and present it +// as the text-style textparse.Parser interface. +// +// It uses a tailored streaming protobuf dto.MetricStreamingDecoder that +// reuses internal protobuf structs and allows direct unmarshalling to Prometheus +// types like labels. type ProtobufParser struct { - in []byte // The input to parse. - inPos int // Position within the input. - metricPos int // Position within Metric slice. + dec *dto.MetricStreamingDecoder + + // Used for both the string returned by Series and Histogram, as well as, + // metric family for Type, Unit and Help. + entryBytes *bytes.Buffer + + lset labels.Labels + builder labels.ScratchBuilder // Held here to reduce allocations when building Labels. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 - // is the count. -1 is the sum. Otherwise it is the index within + // is the count. -1 is the sum. Otherwise, it is the index within // quantiles/buckets. fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. @@ -78,27 +75,20 @@ type ProtobufParser struct { // that we have to decode the next MetricFamily. state Entry - builder labels.ScratchBuilder // held here to reduce allocations when building Labels - - mf *dto.MetricFamily - // Whether to also parse a classic histogram that is also present as a // native histogram. parseClassicHistograms bool - - // The following are just shenanigans to satisfy the Parser interface. - metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. } // NewProtobufParser returns a parser for the payload in the byte slice. func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser { return &ProtobufParser{ - in: b, + dec: dto.NewMetricStreamingDecoder(b), + entryBytes: &bytes.Buffer{}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder. + state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, parseClassicHistograms: parseClassicHistograms, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), } } @@ -106,19 +96,18 @@ func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolT // value, the timestamp if set, and the value of the current sample. func (p *ProtobufParser) Series() ([]byte, *int64, float64) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. v float64 ) - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - v = m.GetCounter().GetValue() + v = p.dec.GetCounter().GetValue() case dto.MetricType_GAUGE: - v = m.GetGauge().GetValue() + v = p.dec.GetGauge().GetValue() case dto.MetricType_UNTYPED: - v = m.GetUntyped().GetValue() + v = p.dec.GetUntyped().GetValue() case dto.MetricType_SUMMARY: - s := m.GetSummary() + s := p.dec.GetSummary() switch p.fieldPos { case -2: v = float64(s.GetSampleCount()) @@ -133,7 +122,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // This should only happen for a classic histogram. - h := m.GetHistogram() + h := p.dec.GetHistogram() switch p.fieldPos { case -2: v = h.GetSampleCountFloat() @@ -159,8 +148,8 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { default: panic("encountered unexpected metric type, this is a bug") } - if ts != 0 { - return p.metricBytes.Bytes(), &ts, v + if *ts != 0 { + return p.entryBytes.Bytes(), ts, v } // TODO(beorn7): We assume here that ts==0 means no timestamp. That's // not true in general, but proto3 originally has no distinction between @@ -171,7 +160,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // away from gogo-protobuf to an actively maintained protobuf // implementation. Once that's done, we can simply use the `optional` // keyword and check for the unset state explicitly. - return p.metricBytes.Bytes(), nil, v + return p.entryBytes.Bytes(), nil, v } // Histogram returns the bytes of a series with a native histogram as a value, @@ -186,47 +175,56 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // value. func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() - h = m.GetHistogram() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. + h = p.dec.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { p.redoClassic = true } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ - Count: h.GetSampleCountFloat(), - Sum: h.GetSampleSum(), - ZeroThreshold: h.GetZeroThreshold(), - ZeroCount: h.GetZeroCountFloat(), - Schema: h.GetSchema(), + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + + // Decoder reuses slices, so we need to copy. PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveCount(), + PositiveBuckets: make([]float64, len(h.GetPositiveCount())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeCount(), + NegativeBuckets: make([]float64, len(h.GetNegativeCount())), } for i, span := range h.GetPositiveSpan() { fh.PositiveSpans[i].Offset = span.GetOffset() fh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveCount() { + fh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeCount() { + fh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { fh.CounterResetHint = histogram.GaugeType } fh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, nil, &fh + if *ts != 0 { + return p.entryBytes.Bytes(), ts, nil, &fh } // Nasty hack: Assume that ts==0 means no timestamp. That's not true in // general, but proto3 has no distinction between unset and // default. Need to avoid in the final format. - return p.metricBytes.Bytes(), nil, nil, &fh + return p.entryBytes.Bytes(), nil, nil, &fh } + // TODO(bwplotka): Create sync.Pool for those structs. sh := histogram.Histogram{ Count: h.GetSampleCount(), Sum: h.GetSampleSum(), @@ -234,41 +232,47 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ZeroCount: h.GetZeroCount(), Schema: h.GetSchema(), PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveDelta(), + PositiveBuckets: make([]int64, len(h.GetPositiveDelta())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeDelta(), + NegativeBuckets: make([]int64, len(h.GetNegativeDelta())), } for i, span := range h.GetPositiveSpan() { sh.PositiveSpans[i].Offset = span.GetOffset() sh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveDelta() { + sh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeDelta() { + sh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { sh.CounterResetHint = histogram.GaugeType } sh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, &sh, nil + if *ts != 0 { + return p.entryBytes.Bytes(), ts, &sh, nil } - return p.metricBytes.Bytes(), nil, &sh, nil + return p.entryBytes.Bytes(), nil, &sh, nil } // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Help() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) + return p.entryBytes.Bytes(), yoloBytes(p.dec.GetHelp()) } // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Type() ([]byte, model.MetricType) { - n := p.metricBytes.Bytes() - switch p.mf.GetType() { + n := p.entryBytes.Bytes() + switch p.dec.GetType() { case dto.MetricType_COUNTER: return n, model.MetricTypeCounter case dto.MetricType_GAUGE: @@ -287,7 +291,7 @@ func (p *ProtobufParser) Type() ([]byte, model.MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Unit() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) + return p.entryBytes.Bytes(), []byte(p.dec.GetUnit()) } // Comment always returns nil because comments aren't supported by the protobuf @@ -296,24 +300,9 @@ func (p *ProtobufParser) Comment() []byte { return nil } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *ProtobufParser) Metric(l *labels.Labels) string { - p.builder.Reset() - p.builder.Add(labels.MetricName, p.getMagicName()) - - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - p.builder.Add(lp.GetName(), lp.GetValue()) - } - if needed, name, value := p.getMagicLabel(); needed { - p.builder.Add(name, value) - } - - // Sort labels to maintain the sorted labels invariant. - p.builder.Sort() - *l = p.builder.Labels() - - return p.metricBytes.String() +// Labels writes the labels of the current sample into the passed labels. +func (p *ProtobufParser) Labels(l *labels.Labels) { + *l = p.lset.Copy() } // Exemplar writes the exemplar of the current sample into the passed @@ -326,15 +315,14 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // We only ever return one exemplar per (non-native-histogram) series. return false } - m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - exProto = m.GetCounter().GetExemplar() + exProto = p.dec.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: isClassic := p.state == EntrySeries - if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { - exs := m.GetHistogram().GetExemplars() + if !isClassic && len(p.dec.GetHistogram().GetExemplars()) > 0 { + exs := p.dec.GetHistogram().GetExemplars() for p.exemplarPos < len(exs) { exProto = exs[p.exemplarPos] p.exemplarPos++ @@ -346,7 +334,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return false } } else { - bb := m.GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos < 0 { if isClassic { return false // At _count or _sum. @@ -390,26 +378,24 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns CT or nil if CT is not present or -// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. -func (p *ProtobufParser) CreatedTimestamp() *int64 { +// CreatedTimestamp returns CT or 0 if CT is not present on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() int64 { var ct *types.Timestamp - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.dec.GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.dec.GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.dec.GetHistogram().GetCreatedTimestamp() default: } - ctAsTime, err := types.TimestampFromProto(ct) - if err != nil { - // Errors means ct == nil or invalid timestamp, which we silently ignore. - return nil + if ct == nil { + return 0 } - ctMilis := ctAsTime.UnixMilli() - return &ctMilis + // Same as the gogo proto types.TimestampFromProto but straight to integer. + // and without validation. + return ct.GetSeconds()*1e3 + int64(ct.GetNanos())/1e6 } // Next advances the parser to the next "sample" (emulating the behavior of a @@ -418,31 +404,34 @@ func (p *ProtobufParser) CreatedTimestamp() *int64 { func (p *ProtobufParser) Next() (Entry, error) { p.exemplarReturned = false switch p.state { + // Invalid state occurs on: + // * First Next() call. + // * Recursive call that tells Next to move to the next metric family. case EntryInvalid: - p.metricPos = 0 p.exemplarPos = 0 p.fieldPos = -2 - n, err := readDelimited(p.in[p.inPos:], p.mf) - p.inPos += n - if err != nil { + + if err := p.dec.NextMetricFamily(); err != nil { return p.state, err } - - // Skip empty metric families. - if len(p.mf.GetMetric()) == 0 { - return p.Next() + if err := p.dec.NextMetric(); err != nil { + // Skip empty metric families. + if errors.Is(err, io.EOF) { + return p.Next() + } + return EntryInvalid, err } // We are at the beginning of a metric family. Put only the name - // into metricBytes and validate only name, help, and type for now. - name := p.mf.GetName() + // into entryBytes and validate only name, help, and type for now. + name := p.dec.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } - if help := p.mf.GetHelp(); !utf8.ValidString(help) { + if help := p.dec.GetHelp(); !utf8.ValidString(help) { return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER, dto.MetricType_GAUGE, dto.MetricType_HISTOGRAM, @@ -451,11 +440,11 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.dec.GetType()) } - unit := p.mf.GetUnit() + unit := p.dec.GetUnit() if len(unit) > 0 { - if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if p.dec.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) } @@ -463,12 +452,11 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) } } - p.metricBytes.Reset() - p.metricBytes.WriteString(name) - + p.entryBytes.Reset() + p.entryBytes.WriteString(name) p.state = EntryHelp case EntryHelp: - if p.mf.Unit != "" { + if p.dec.Unit != "" { p.state = EntryUnit } else { p.state = EntryType @@ -476,48 +464,78 @@ func (p *ProtobufParser) Next() (Entry, error) { case EntryUnit: p.state = EntryType case EntryType: - t := p.mf.GetType() + t := p.dec.GetType() if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } else { p.state = EntrySeries } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } - case EntryHistogram, EntrySeries: - if p.redoClassic { - p.redoClassic = false - p.state = EntrySeries - p.fieldPos = -3 - p.fieldsDone = false - } - t := p.mf.GetType() - if p.state == EntrySeries && !p.fieldsDone && - (t == dto.MetricType_SUMMARY || - t == dto.MetricType_HISTOGRAM || - t == dto.MetricType_GAUGE_HISTOGRAM) { - p.fieldPos++ - } else { - p.metricPos++ + case EntrySeries: + // Potentially a second series in the metric family. + t := p.dec.GetType() + if t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM { + // Non-trivial series (complex metrics, with magic suffixes). + + // Did we iterate over all the classic representations fields? + // NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate. + if !p.fieldsDone { + // Still some fields to iterate over. + p.fieldPos++ + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + return p.state, nil + } + + // Reset histogram fields. p.fieldPos = -2 p.fieldsDone = false p.exemplarPos = 0 + // If this is a metric family containing native - // histograms, we have to switch back to native - // histograms after parsing a classic histogram. - if p.state == EntrySeries && - (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + // histograms, it means we are here thanks to redoClassic state. + // Return to native histograms for the consistent flow. + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } } - if p.metricPos >= len(p.mf.GetMetric()) { - p.state = EntryInvalid - return p.Next() + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err + } + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + case EntryHistogram: + // Was Histogram() called and parseClassicHistograms is true? + if p.redoClassic { + p.redoClassic = false + p.fieldPos = -3 + p.fieldsDone = false + p.state = EntrySeries + return p.Next() // Switch to classic histogram. + } + + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } default: @@ -526,30 +544,39 @@ func (p *ProtobufParser) Next() (Entry, error) { return p.state, nil } -func (p *ProtobufParser) updateMetricBytes() error { - b := p.metricBytes - b.Reset() - b.WriteString(p.getMagicName()) - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - b.WriteByte(model.SeparatorByte) - n := lp.GetName() - if !model.LabelName(n).IsValid() { - return fmt.Errorf("invalid label name: %s", n) - } - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - v := lp.GetValue() - if !utf8.ValidString(v) { - return fmt.Errorf("invalid label value: %s", v) - } - b.WriteString(v) +// onSeriesOrHistogramUpdate updates internal state before returning +// a series or histogram. It updates: +// * p.lset. +// * p.entryBytes. +// * p.fieldsDone depending on p.fieldPos. +func (p *ProtobufParser) onSeriesOrHistogramUpdate() error { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + if err := p.dec.Label(&p.builder); err != nil { + return err } - if needed, n, v := p.getMagicLabel(); needed { - b.WriteByte(model.SeparatorByte) - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - b.WriteString(v) + + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + p.builder.Overwrite(&p.lset) + + // entryBytes has to be unique for each series. + p.entryBytes.Reset() + p.lset.Range(func(l labels.Label) { + if l.Name == labels.MetricName { + p.entryBytes.WriteString(l.Value) + return + } + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Name) + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Value) + }) return nil } @@ -557,36 +584,37 @@ func (p *ProtobufParser) updateMetricBytes() error { // ("_count", "_sum", "_bucket") if needed according to the current parser // state. func (p *ProtobufParser) getMagicName() string { - t := p.mf.GetType() + t := p.dec.GetType() if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { - return p.mf.GetName() + return p.dec.GetName() } if p.fieldPos == -2 { - return p.mf.GetName() + "_count" + return p.dec.GetName() + "_count" } if p.fieldPos == -1 { - return p.mf.GetName() + "_sum" + return p.dec.GetName() + "_sum" } if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { - return p.mf.GetName() + "_bucket" + return p.dec.GetName() + "_bucket" } - return p.mf.GetName() + return p.dec.GetName() } // getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if // so, its name and value. It also sets p.fieldsDone if applicable. func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + // Native histogram or _count and _sum series. if p.state == EntryHistogram || p.fieldPos < 0 { return false, "", "" } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_SUMMARY: - qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + qq := p.dec.GetSummary().GetQuantile() q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos >= len(bb) { p.fieldsDone = true return true, model.BucketLabel, "+Inf" @@ -598,29 +626,6 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { return false, "", "" } -var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") - -// readDelimited is essentially doing what the function of the same name in -// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is -// specific to a MetricFamily, utilizes the more efficient gogo-protobuf -// unmarshaling, and acts on a byte slice directly without any additional -// staging buffers. -func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { - if len(b) == 0 { - return 0, io.EOF - } - messageLength, varIntLength := proto.DecodeVarint(b) - if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { - return 0, errInvalidVarint - } - totalLength := varIntLength + int(messageLength) - if totalLength > len(b) { - return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) - } - mf.Reset() - return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) -} - // formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". diff --git a/vendor/github.com/prometheus/prometheus/notifier/alert.go b/vendor/github.com/prometheus/prometheus/notifier/alert.go new file mode 100644 index 0000000000..88245c9a7f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/notifier/alert.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels labels.Labels `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations labels.Labels `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL,omitempty"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return a.Labels.Get(labels.AlertName) +} + +// Hash returns a hash over the alert. It is equivalent to the alert labels hash. +func (a *Alert) Hash() uint64 { + return a.Labels.Hash() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), fmt.Sprintf("%016x", a.Hash())[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true iff the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) + var relabeledAlerts []*Alert + + for _, a := range alerts { + lb.Reset(a.Labels) + externalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, relabelConfigs...) + if !keep { + continue + } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) + } + return relabeledAlerts +} diff --git a/vendor/github.com/prometheus/prometheus/notifier/alertmanager.go b/vendor/github.com/prometheus/prometheus/notifier/alertmanager.go new file mode 100644 index 0000000000..8bcf7954ec --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/notifier/alertmanager.go @@ -0,0 +1,90 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "fmt" + "net/url" + "path" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +// Alertmanager holds Alertmanager endpoint information. +type alertmanager interface { + url() *url.URL +} + +type alertmanagerLabels struct{ labels.Labels } + +const pathLabel = "__alerts_path__" + +func (a alertmanagerLabels) url() *url.URL { + return &url.URL{ + Scheme: a.Get(model.SchemeLabel), + Host: a.Get(model.AddressLabel), + Path: a.Get(pathLabel), + } +} + +// AlertmanagerFromGroup extracts a list of alertmanagers from a target group +// and an associated AlertmanagerConfig. +func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { + var res []alertmanager + var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) + + for _, tlset := range tg.Targets { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tlset { + lb.Set(string(ln), string(lv)) + } + // Set configured scheme as the initial scheme label for overwrite. + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) + + // Combine target labels with target group labels. + for ln, lv := range tg.Labels { + if _, ok := tlset[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) + if !keep { + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) + continue + } + + addr := lb.Get(model.AddressLabel) + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { + return nil, nil, err + } + + res = append(res, alertmanagerLabels{lb.Labels()}) + } + return res, droppedAlertManagers, nil +} + +func postPath(pre string, v config.AlertmanagerAPIVersion) string { + alertPushEndpoint := fmt.Sprintf("/api/%v/alerts", string(v)) + return path.Join("/", pre, alertPushEndpoint) +} diff --git a/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go b/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go new file mode 100644 index 0000000000..50471098ad --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "crypto/md5" + "encoding/hex" + "log/slog" + "net/http" + "sync" + + config_util "github.com/prometheus/common/config" + "github.com/prometheus/sigv4" + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// alertmanagerSet contains a set of Alertmanagers discovered via a group of service +// discovery definitions that have a common configuration on how alerts should be sent. +type alertmanagerSet struct { + cfg *config.AlertmanagerConfig + client *http.Client + + metrics *alertMetrics + + mtx sync.RWMutex + ams []alertmanager + droppedAms []alertmanager + logger *slog.Logger +} + +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") + if err != nil { + return nil, err + } + t := client.Transport + + if cfg.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) + if err != nil { + return nil, err + } + } + + client.Transport = t + + s := &alertmanagerSet{ + client: client, + cfg: cfg, + logger: logger, + metrics: metrics, + } + return s, nil +} + +// sync extracts a deduplicated set of Alertmanager endpoints from a list +// of target groups definitions. +func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { + allAms := []alertmanager{} + allDroppedAms := []alertmanager{} + + for _, tg := range tgs { + ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) + if err != nil { + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) + continue + } + allAms = append(allAms, ams...) + allDroppedAms = append(allDroppedAms, droppedAms...) + } + + s.mtx.Lock() + defer s.mtx.Unlock() + previousAms := s.ams + // Set new Alertmanagers and deduplicate them along their unique URL. + s.ams = []alertmanager{} + s.droppedAms = []alertmanager{} + s.droppedAms = append(s.droppedAms, allDroppedAms...) + seen := map[string]struct{}{} + + for _, am := range allAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + + // This will initialize the Counters for the AM to 0. + s.metrics.sent.WithLabelValues(us) + s.metrics.errors.WithLabelValues(us) + + seen[us] = struct{}{} + s.ams = append(s.ams, am) + } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } +} + +func (s *alertmanagerSet) configHash() (string, error) { + b, err := yaml.Marshal(s.cfg) + if err != nil { + return "", err + } + hash := md5.Sum(b) + return hex.EncodeToString(hash[:]), nil +} diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/manager.go similarity index 54% rename from vendor/github.com/prometheus/prometheus/notifier/notifier.go rename to vendor/github.com/prometheus/prometheus/notifier/manager.go index fbc37c29ef..69ce9b221b 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/manager.go @@ -16,28 +16,18 @@ package notifier import ( "bytes" "context" - "crypto/md5" - "encoding/hex" "encoding/json" "fmt" "io" "log/slog" "net/http" "net/url" - "path" "sync" "time" - "github.com/go-openapi/strfmt" - "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" - "github.com/prometheus/sigv4" - "go.uber.org/atomic" - "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -46,6 +36,9 @@ import ( ) const ( + // DefaultMaxBatchSize is the default maximum number of alerts to send in a single request to the alertmanager. + DefaultMaxBatchSize = 256 + contentTypeJSON = "application/json" ) @@ -58,53 +51,6 @@ const ( var userAgent = version.PrometheusUserAgent() -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels labels.Labels `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations labels.Labels `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL,omitempty"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return a.Labels.Get(labels.AlertName) -} - -// Hash returns a hash over the alert. It is equivalent to the alert labels hash. -func (a *Alert) Hash() uint64 { - return a.Labels.Hash() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), fmt.Sprintf("%016x", a.Hash())[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true iff the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - // Manager is responsible for dispatching alert notifications to an // alert manager service. type Manager struct { @@ -133,84 +79,9 @@ type Options struct { Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) Registerer prometheus.Registerer -} -type alertMetrics struct { - latency *prometheus.SummaryVec - errors *prometheus.CounterVec - sent *prometheus.CounterVec - dropped prometheus.Counter - queueLength prometheus.GaugeFunc - queueCapacity prometheus.Gauge - alertmanagersDiscovered prometheus.GaugeFunc -} - -func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanagersDiscovered func() float64) *alertMetrics { - m := &alertMetrics{ - latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "latency_seconds", - Help: "Latency quantiles for sending alert notifications.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{alertmanagerLabel}, - ), - errors: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "errors_total", - Help: "Total number of sent alerts affected by errors.", - }, - []string{alertmanagerLabel}, - ), - sent: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "sent_total", - Help: "Total number of alerts sent.", - }, - []string{alertmanagerLabel}, - ), - dropped: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "dropped_total", - Help: "Total number of alerts dropped due to errors when sending to Alertmanager.", - }), - queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_length", - Help: "The number of alert notifications in the queue.", - }, queueLen), - queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_capacity", - Help: "The capacity of the alert notifications queue.", - }), - alertmanagersDiscovered: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Name: "prometheus_notifications_alertmanagers_discovered", - Help: "The number of alertmanagers discovered and active.", - }, alertmanagersDiscovered), - } - - m.queueCapacity.Set(float64(queueCap)) - - if r != nil { - r.MustRegister( - m.latency, - m.errors, - m.sent, - m.dropped, - m.queueLength, - m.queueCapacity, - m.alertmanagersDiscovered, - ) - } - - return m + // MaxBatchSize determines the maximum number of alerts to send in a single request to the alertmanager. + MaxBatchSize int } func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { @@ -225,6 +96,10 @@ func NewManager(o *Options, logger *slog.Logger) *Manager { if o.Do == nil { o.Do = do } + // Set default MaxBatchSize if not provided. + if o.MaxBatchSize <= 0 { + o.MaxBatchSize = DefaultMaxBatchSize + } if logger == nil { logger = promslog.NewNopLogger() } @@ -295,8 +170,6 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { return nil } -const maxBatchSize = 64 - func (n *Manager) queueLen() int { n.mtx.RLock() defer n.mtx.RUnlock() @@ -310,7 +183,7 @@ func (n *Manager) nextBatch() []*Alert { var alerts []*Alert - if len(n.queue) > maxBatchSize { + if maxBatchSize := n.opts.MaxBatchSize; len(n.queue) > maxBatchSize { alerts = append(make([]*Alert, 0, maxBatchSize), n.queue[:maxBatchSize]...) n.queue = n.queue[maxBatchSize:] } else { @@ -463,28 +336,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } -func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert { - lb := labels.NewBuilder(labels.EmptyLabels()) - var relabeledAlerts []*Alert - - for _, a := range alerts { - lb.Reset(a.Labels) - externalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - keep := relabel.ProcessBuilder(lb, relabelConfigs...) - if !keep { - continue - } - a.Labels = lb.Labels() - relabeledAlerts = append(relabeledAlerts, a) - } - return relabeledAlerts -} - // setMore signals that the alert queue has items. func (n *Manager) setMore() { // If we cannot send on the channel, it means the signal already exists @@ -552,10 +403,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { n.mtx.RUnlock() var ( - wg sync.WaitGroup - numSuccess atomic.Uint64 + wg sync.WaitGroup + amSetCovered sync.Map ) - for _, ams := range amSets { + for k, ams := range amSets { var ( payload []byte err error @@ -611,24 +462,28 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { cachedPayload = nil } + // Being here means len(ams.ams) > 0 + amSetCovered.Store(k, false) for _, am := range ams.ams { wg.Add(1) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) defer cancel() - go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { - if err := n.sendOne(ctx, client, url, payload); err != nil { + go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int) { + err := n.sendOne(ctx, client, url, payload) + if err != nil { n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Add(float64(count)) } else { - numSuccess.Inc() + amSetCovered.CompareAndSwap(k, false, true) } + n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) n.metrics.sent.WithLabelValues(url).Add(float64(count)) wg.Done() - }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) + }(ctx, k, ams.client, am.url().String(), payload, len(amAlerts)) } ams.mtx.RUnlock() @@ -636,35 +491,18 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { wg.Wait() - return numSuccess.Load() > 0 -} - -func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { - openAPIAlerts := models.PostableAlerts{} - for _, a := range alerts { - start := strfmt.DateTime(a.StartsAt) - end := strfmt.DateTime(a.EndsAt) - openAPIAlerts = append(openAPIAlerts, &models.PostableAlert{ - Annotations: labelsToOpenAPILabelSet(a.Annotations), - EndsAt: end, - StartsAt: start, - Alert: models.Alert{ - GeneratorURL: strfmt.URI(a.GeneratorURL), - Labels: labelsToOpenAPILabelSet(a.Labels), - }, - }) - } - - return openAPIAlerts -} - -func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { - apiLabelSet := models.LabelSet{} - modelLabelSet.Range(func(label labels.Label) { - apiLabelSet[label.Name] = label.Value + // Return false if there are any sets which were attempted (e.g. not filtered + // out) but have no successes. + allAmSetsCovered := true + amSetCovered.Range(func(_, value any) bool { + if !value.(bool) { + allAmSetsCovered = false + return false + } + return true }) - return apiLabelSet + return allAmSetsCovered } func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error { @@ -705,165 +543,3 @@ func (n *Manager) Stop() { close(n.stopRequested) }) } - -// Alertmanager holds Alertmanager endpoint information. -type alertmanager interface { - url() *url.URL -} - -type alertmanagerLabels struct{ labels.Labels } - -const pathLabel = "__alerts_path__" - -func (a alertmanagerLabels) url() *url.URL { - return &url.URL{ - Scheme: a.Get(model.SchemeLabel), - Host: a.Get(model.AddressLabel), - Path: a.Get(pathLabel), - } -} - -// alertmanagerSet contains a set of Alertmanagers discovered via a group of service -// discovery definitions that have a common configuration on how alerts should be sent. -type alertmanagerSet struct { - cfg *config.AlertmanagerConfig - client *http.Client - - metrics *alertMetrics - - mtx sync.RWMutex - ams []alertmanager - droppedAms []alertmanager - logger *slog.Logger -} - -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") - if err != nil { - return nil, err - } - t := client.Transport - - if cfg.SigV4Config != nil { - t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) - if err != nil { - return nil, err - } - } - - client.Transport = t - - s := &alertmanagerSet{ - client: client, - cfg: cfg, - logger: logger, - metrics: metrics, - } - return s, nil -} - -// sync extracts a deduplicated set of Alertmanager endpoints from a list -// of target groups definitions. -func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { - allAms := []alertmanager{} - allDroppedAms := []alertmanager{} - - for _, tg := range tgs { - ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) - if err != nil { - s.logger.Error("Creating discovered Alertmanagers failed", "err", err) - continue - } - allAms = append(allAms, ams...) - allDroppedAms = append(allDroppedAms, droppedAms...) - } - - s.mtx.Lock() - defer s.mtx.Unlock() - previousAms := s.ams - // Set new Alertmanagers and deduplicate them along their unique URL. - s.ams = []alertmanager{} - s.droppedAms = []alertmanager{} - s.droppedAms = append(s.droppedAms, allDroppedAms...) - seen := map[string]struct{}{} - - for _, am := range allAms { - us := am.url().String() - if _, ok := seen[us]; ok { - continue - } - - // This will initialize the Counters for the AM to 0. - s.metrics.sent.WithLabelValues(us) - s.metrics.errors.WithLabelValues(us) - - seen[us] = struct{}{} - s.ams = append(s.ams, am) - } - // Now remove counters for any removed Alertmanagers. - for _, am := range previousAms { - us := am.url().String() - if _, ok := seen[us]; ok { - continue - } - s.metrics.latency.DeleteLabelValues(us) - s.metrics.sent.DeleteLabelValues(us) - s.metrics.errors.DeleteLabelValues(us) - seen[us] = struct{}{} - } -} - -func (s *alertmanagerSet) configHash() (string, error) { - b, err := yaml.Marshal(s.cfg) - if err != nil { - return "", err - } - hash := md5.Sum(b) - return hex.EncodeToString(hash[:]), nil -} - -func postPath(pre string, v config.AlertmanagerAPIVersion) string { - alertPushEndpoint := fmt.Sprintf("/api/%v/alerts", string(v)) - return path.Join("/", pre, alertPushEndpoint) -} - -// AlertmanagerFromGroup extracts a list of alertmanagers from a target group -// and an associated AlertmanagerConfig. -func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { - var res []alertmanager - var droppedAlertManagers []alertmanager - lb := labels.NewBuilder(labels.EmptyLabels()) - - for _, tlset := range tg.Targets { - lb.Reset(labels.EmptyLabels()) - - for ln, lv := range tlset { - lb.Set(string(ln), string(lv)) - } - // Set configured scheme as the initial scheme label for overwrite. - lb.Set(model.SchemeLabel, cfg.Scheme) - lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) - - // Combine target labels with target group labels. - for ln, lv := range tg.Labels { - if _, ok := tlset[ln]; !ok { - lb.Set(string(ln), string(lv)) - } - } - - preRelabel := lb.Labels() - keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) - if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) - continue - } - - addr := lb.Get(model.AddressLabel) - if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { - return nil, nil, err - } - - res = append(res, alertmanagerLabels{lb.Labels()}) - } - return res, droppedAlertManagers, nil -} diff --git a/vendor/github.com/prometheus/prometheus/notifier/metric.go b/vendor/github.com/prometheus/prometheus/notifier/metric.go new file mode 100644 index 0000000000..b9a55b3ec7 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/notifier/metric.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import "github.com/prometheus/client_golang/prometheus" + +type alertMetrics struct { + latency *prometheus.SummaryVec + errors *prometheus.CounterVec + sent *prometheus.CounterVec + dropped prometheus.Counter + queueLength prometheus.GaugeFunc + queueCapacity prometheus.Gauge + alertmanagersDiscovered prometheus.GaugeFunc +} + +func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanagersDiscovered func() float64) *alertMetrics { + m := &alertMetrics{ + latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "latency_seconds", + Help: "Latency quantiles for sending alert notifications.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{alertmanagerLabel}, + ), + errors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "errors_total", + Help: "Total number of sent alerts affected by errors.", + }, + []string{alertmanagerLabel}, + ), + sent: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "sent_total", + Help: "Total number of alerts sent.", + }, + []string{alertmanagerLabel}, + ), + dropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dropped_total", + Help: "Total number of alerts dropped due to errors when sending to Alertmanager.", + }), + queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_length", + Help: "The number of alert notifications in the queue.", + }, queueLen), + queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_capacity", + Help: "The capacity of the alert notifications queue.", + }), + alertmanagersDiscovered: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_notifications_alertmanagers_discovered", + Help: "The number of alertmanagers discovered and active.", + }, alertmanagersDiscovered), + } + + m.queueCapacity.Set(float64(queueCap)) + + if r != nil { + r.MustRegister( + m.latency, + m.errors, + m.sent, + m.dropped, + m.queueLength, + m.queueCapacity, + m.alertmanagersDiscovered, + ) + } + + return m +} diff --git a/vendor/github.com/prometheus/prometheus/notifier/util.go b/vendor/github.com/prometheus/prometheus/notifier/util.go new file mode 100644 index 0000000000..c21c33a57b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/notifier/util.go @@ -0,0 +1,49 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "github.com/go-openapi/strfmt" + "github.com/prometheus/alertmanager/api/v2/models" + + "github.com/prometheus/prometheus/model/labels" +) + +func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { + openAPIAlerts := models.PostableAlerts{} + for _, a := range alerts { + start := strfmt.DateTime(a.StartsAt) + end := strfmt.DateTime(a.EndsAt) + openAPIAlerts = append(openAPIAlerts, &models.PostableAlert{ + Annotations: labelsToOpenAPILabelSet(a.Annotations), + EndsAt: end, + StartsAt: start, + Alert: models.Alert{ + GeneratorURL: strfmt.URI(a.GeneratorURL), + Labels: labelsToOpenAPILabelSet(a.Labels), + }, + }) + } + + return openAPIAlerts +} + +func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { + apiLabelSet := models.LabelSet{} + modelLabelSet.Range(func(label labels.Label) { + apiLabelSet[label.Name] = label.Value + }) + + return apiLabelSet +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.gen.yaml b/vendor/github.com/prometheus/prometheus/prompb/buf.gen.yaml new file mode 100644 index 0000000000..1fda309ea7 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v2 +plugins: + - local: protoc-gen-gogofast + out: . + opt: [plugins=grpc, paths=source_relative, Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types] diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.lock b/vendor/github.com/prometheus/prometheus/prompb/buf.lock index 30b0f08479..f9907b4592 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/buf.lock +++ b/vendor/github.com/prometheus/prometheus/prompb/buf.lock @@ -4,7 +4,5 @@ deps: - remote: buf.build owner: gogo repository: protobuf - branch: main - commit: 4df00b267f944190a229ce3695781e99 - digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= - create_time: 2021-08-10T00:14:28.345069Z + commit: e1dbca2775a74a89955a99990de45a53 + digest: shake256:2523041b61927813260d369e632adb1938da2e9a0e10c42c6fca1b38acdb04661046bf20a2d99a7c9fb69676a63f9655147667dca8d49cea1644114fa97c0add diff --git a/vendor/github.com/prometheus/prometheus/prompb/codec.go b/vendor/github.com/prometheus/prometheus/prompb/codec.go index ad30cd5e7b..b2574fd9e1 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/codec.go +++ b/vendor/github.com/prometheus/prometheus/prompb/codec.go @@ -90,6 +90,7 @@ func (h Histogram) ToIntHistogram() *histogram.Histogram { PositiveBuckets: h.GetPositiveDeltas(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.CustomValues, } } @@ -109,6 +110,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { PositiveBuckets: h.GetPositiveCounts(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.CustomValues, } } // Conversion from integer histogram. diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go new file mode 100644 index 0000000000..0d62f1f7cf --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go @@ -0,0 +1,780 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package io_prometheus_client //nolint:revive + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unicode/utf8" + "unsafe" + + proto "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/labels" +) + +type MetricStreamingDecoder struct { + in []byte + inPos int + + // TODO(bwplotka): Switch to generator/plugin that won't have those fields accessible e.g. OpaqueAPI + // We leverage the fact those two don't collide. + *MetricFamily // Without Metric, guarded by overridden GetMetric method. + *Metric // Without Label, guarded by overridden GetLabel method. + + mfData []byte + metrics []pos + metricIndex int + + mData []byte + labels []pos +} + +// NewMetricStreamingDecoder returns a Go iterator that unmarshals given protobuf bytes one +// metric family and metric at the time, allowing efficient streaming. +// +// Do not modify MetricStreamingDecoder between iterations as it's reused to save allocations. +// GetGauge, GetCounter, etc are also cached, which means GetGauge will work for counter +// if previously gauge was parsed. It's up to the caller to use Type to decide what +// method to use when checking the value. +// +// TODO(bwplotka): io.Reader approach is possible too, but textparse has access to whole scrape for now. +func NewMetricStreamingDecoder(data []byte) *MetricStreamingDecoder { + return &MetricStreamingDecoder{ + in: data, + MetricFamily: &MetricFamily{}, + Metric: &Metric{}, + metrics: make([]pos, 0, 100), + } +} + +var errInvalidVarint = errors.New("clientpb: invalid varint encountered") + +func (m *MetricStreamingDecoder) NextMetricFamily() error { + b := m.in[m.inPos:] + if len(b) == 0 { + return io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) // TODO(bwplotka): Get rid of gogo. + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return fmt.Errorf("clientpb: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + m.resetMetricFamily() + m.mfData = b[varIntLength:totalLength] + + m.inPos += totalLength + return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData) +} + +// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory. +func (m *MetricStreamingDecoder) resetMetricFamily() { + m.metrics = m.metrics[:0] + m.metricIndex = 0 + m.MetricFamily.Reset() +} + +func (m *MetricStreamingDecoder) NextMetric() error { + if m.metricIndex >= len(m.metrics) { + return io.EOF + } + + m.resetMetric() + m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end] + if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil { + return err + } + m.metricIndex++ + return nil +} + +// resetMetric resets all the fields in m to equal the zero value, but re-using slices memory. +func (m *MetricStreamingDecoder) resetMetric() { + m.labels = m.labels[:0] + m.TimestampMs = 0 + + // TODO(bwplotka): Autogenerate reset functions. + if m.Metric.Counter != nil { + m.Metric.Counter.Value = 0 + m.Metric.Counter.CreatedTimestamp = nil + m.Metric.Counter.Exemplar = nil + } + if m.Metric.Gauge != nil { + m.Metric.Gauge.Value = 0 + } + if m.Metric.Histogram != nil { + m.Metric.Histogram.SampleCount = 0 + m.Metric.Histogram.SampleCountFloat = 0 + m.Metric.Histogram.SampleSum = 0 + m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0] + m.Metric.Histogram.CreatedTimestamp = nil + m.Metric.Histogram.Schema = 0 + m.Metric.Histogram.ZeroThreshold = 0 + m.Metric.Histogram.ZeroCount = 0 + m.Metric.Histogram.ZeroCountFloat = 0 + m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0] + m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0] + m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0] + m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0] + m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0] + m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0] + m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0] + } + if m.Metric.Summary != nil { + m.Metric.Summary.SampleCount = 0 + m.Metric.Summary.SampleSum = 0 + m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0] + m.Metric.Summary.CreatedTimestamp = nil + } +} + +func (m *MetricStreamingDecoder) GetMetric() { + panic("don't use GetMetric, use Metric directly") +} + +func (m *MetricStreamingDecoder) GetLabel() { + panic("don't use GetLabel, use Label instead") +} + +// Label parses labels into labels scratch builder. Metric name is missing +// given the protobuf metric model and has to be deduced from the metric family name. +// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label +// field to avoid direct use (it's not parsed). In future generator will generate +// structs tailored for streaming decoding. +func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error { + for _, l := range m.labels { + if err := parseLabel(m.mData[l.start:l.end], b); err != nil { + return err + } + } + return nil +} + +// parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder +// and reusing strings. +func parseLabel(dAtA []byte, b *labels.ScratchBuilder) error { + var name, value string + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + name = yoloString(dAtA[iNdEx:postIndex]) + if !model.LabelName(name).IsValid() { + return fmt.Errorf("invalid label name: %s", name) + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + value = yoloString(dAtA[iNdEx:postIndex]) + if !utf8.ValidString(value) { + return fmt.Errorf("invalid label value: %s", value) + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF + } + b.Add(name, value) + return nil +} + +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +type pos struct { + start, end int +} + +func (m *Metric) unmarshalWithoutLabels(p *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + p.labels = append(p.labels, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *MetricFamily) unmarshalWithoutMetrics(buf *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + buf.metrics = append(buf.metrics, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go index 25fa0d4035..4434c525fc 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go @@ -196,6 +196,9 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram } func spansToSpansProto(s []histogram.Span) []BucketSpan { + if len(s) == 0 { + return nil + } spans := make([]BucketSpan, len(s)) for i := 0; i < len(s); i++ { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go index 3420d20e25..1419de217e 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go @@ -6,11 +6,12 @@ package writev2 import ( encoding_binary "encoding/binary" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index 93883daa13..2f5dc77350 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -402,10 +402,13 @@ type Histogram struct { ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } @@ -588,6 +591,13 @@ func (m *Histogram) GetTimestamp() int64 { return 0 } +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Histogram) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1146,76 +1156,77 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 1092 bytes of a gzipped FileDescriptorProto + // 1114 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, - 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0xd9, 0xa0, 0x71, 0x54, 0x16, 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, - 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, - 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, - 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, - 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, - 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, - 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, - 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, - 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, - 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, - 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, - 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, - 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, - 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, - 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, - 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, - 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, - 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, - 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, - 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, - 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, - 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, - 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, - 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, - 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, - 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, - 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, - 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, - 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, - 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, - 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, - 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, - 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, - 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, - 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, - 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, - 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, - 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, - 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, - 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, - 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, - 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, - 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, - 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, - 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, - 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, - 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, - 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, - 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, - 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, - 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, - 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, - 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, - 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, - 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, - 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, - 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, - 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, - 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, - 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, - 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, - 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, - 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, - 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, - 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, - 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, - 0x13, 0x09, 0x00, 0x00, + 0xb2, 0x92, 0xdb, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0xc8, 0x65, 0xfb, 0x02, 0x45, 0xe1, 0xab, 0x5e, 0xf6, 0x11, + 0x8a, 0x1d, 0x92, 0x22, 0x15, 0xa7, 0x40, 0xd3, 0xbb, 0x9d, 0x6f, 0xbe, 0x99, 0xf9, 0xb8, 0x3b, + 0x3b, 0x4b, 0xa8, 0xc9, 0x55, 0xc4, 0x45, 0x27, 0x8a, 0x43, 0x19, 0x12, 0x88, 0xe2, 0xd0, 0xe7, + 0x72, 0xce, 0x97, 0xe2, 0xfe, 0xce, 0x2c, 0x9c, 0x85, 0x08, 0xef, 0xa9, 0x55, 0xc2, 0x70, 0x7f, + 0xd6, 0xa1, 0xd9, 0xe3, 0x32, 0xf6, 0x26, 0x3d, 0x2e, 0xd9, 0x94, 0x49, 0x46, 0x9e, 0x40, 0x49, + 0xe5, 0x70, 0xb4, 0x96, 0xd6, 0x6e, 0xee, 0x3f, 0xea, 0xe4, 0x39, 0x3a, 0x9b, 0xcc, 0xd4, 0x1c, + 0xad, 0x22, 0x4e, 0x31, 0x84, 0x7c, 0x0a, 0xc4, 0x47, 0x6c, 0x7c, 0xc5, 0x7c, 0x6f, 0xb1, 0x1a, + 0x07, 0xcc, 0xe7, 0x8e, 0xde, 0xd2, 0xda, 0x16, 0xb5, 0x13, 0xcf, 0x31, 0x3a, 0xfa, 0xcc, 0xe7, + 0x84, 0x40, 0x69, 0xce, 0x17, 0x91, 0x53, 0x42, 0x3f, 0xae, 0x15, 0xb6, 0x0c, 0x3c, 0xe9, 0x94, + 0x13, 0x4c, 0xad, 0xdd, 0x15, 0x40, 0x5e, 0x89, 0xd4, 0xa0, 0x72, 0xd1, 0xff, 0xba, 0x3f, 0xf8, + 0xb6, 0x6f, 0x6f, 0x29, 0xe3, 0x68, 0x70, 0xd1, 0x1f, 0x75, 0xa9, 0xad, 0x11, 0x0b, 0xca, 0x27, + 0x07, 0x17, 0x27, 0x5d, 0x5b, 0x27, 0x0d, 0xb0, 0x4e, 0xcf, 0x86, 0xa3, 0xc1, 0x09, 0x3d, 0xe8, + 0xd9, 0x06, 0x21, 0xd0, 0x44, 0x4f, 0x8e, 0x95, 0x54, 0xe8, 0xf0, 0xa2, 0xd7, 0x3b, 0xa0, 0x2f, + 0xec, 0x32, 0xa9, 0x42, 0xe9, 0xac, 0x7f, 0x3c, 0xb0, 0x4d, 0x52, 0x87, 0xea, 0x70, 0x74, 0x30, + 0xea, 0x0e, 0xbb, 0x23, 0xbb, 0xe2, 0x3e, 0x05, 0x73, 0xc8, 0xfc, 0x68, 0xc1, 0xc9, 0x0e, 0x94, + 0x5f, 0xb1, 0xc5, 0x32, 0xd9, 0x16, 0x8d, 0x26, 0x06, 0x79, 0x1f, 0x2c, 0xe9, 0xf9, 0x5c, 0x48, + 0xe6, 0x47, 0xf8, 0x9d, 0x06, 0xcd, 0x01, 0x37, 0x84, 0x6a, 0xf7, 0x9a, 0xfb, 0xd1, 0x82, 0xc5, + 0x64, 0x0f, 0xcc, 0x05, 0xbb, 0xe4, 0x0b, 0xe1, 0x68, 0x2d, 0xa3, 0x5d, 0xdb, 0xdf, 0x2e, 0xee, + 0xeb, 0xb9, 0xf2, 0x1c, 0x96, 0x5e, 0xff, 0xfe, 0x70, 0x8b, 0xa6, 0xb4, 0xbc, 0xa0, 0xfe, 0x8f, + 0x05, 0x8d, 0x37, 0x0b, 0xfe, 0x55, 0x06, 0xeb, 0xd4, 0x13, 0x32, 0x9c, 0xc5, 0xcc, 0x27, 0x0f, + 0xc0, 0x9a, 0x84, 0xcb, 0x40, 0x8e, 0xbd, 0x40, 0xa2, 0xec, 0xd2, 0xe9, 0x16, 0xad, 0x22, 0x74, + 0x16, 0x48, 0xf2, 0x01, 0xd4, 0x12, 0xf7, 0xd5, 0x22, 0x64, 0x32, 0x29, 0x73, 0xba, 0x45, 0x01, + 0xc1, 0x63, 0x85, 0x11, 0x1b, 0x0c, 0xb1, 0xf4, 0xb1, 0x8e, 0x46, 0xd5, 0x92, 0xdc, 0x03, 0x53, + 0x4c, 0xe6, 0xdc, 0x67, 0x78, 0x6a, 0xdb, 0x34, 0xb5, 0xc8, 0x23, 0x68, 0xfe, 0xc8, 0xe3, 0x70, + 0x2c, 0xe7, 0x31, 0x17, 0xf3, 0x70, 0x31, 0xc5, 0x13, 0xd4, 0x68, 0x43, 0xa1, 0xa3, 0x0c, 0x24, + 0x1f, 0xa5, 0xb4, 0x5c, 0x97, 0x89, 0xba, 0x34, 0x5a, 0x57, 0xf8, 0x51, 0xa6, 0xed, 0x13, 0xb0, + 0x0b, 0xbc, 0x44, 0x60, 0x05, 0x05, 0x6a, 0xb4, 0xb9, 0x66, 0x26, 0x22, 0x8f, 0xa0, 0x19, 0xf0, + 0x19, 0x93, 0xde, 0x2b, 0x3e, 0x16, 0x11, 0x0b, 0x84, 0x53, 0xc5, 0x1d, 0xbe, 0x57, 0xdc, 0xe1, + 0xc3, 0xe5, 0xe4, 0x25, 0x97, 0xc3, 0x88, 0x05, 0xe9, 0x36, 0x37, 0xb2, 0x18, 0x85, 0x09, 0xf2, + 0x31, 0xdc, 0x59, 0x27, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0x75, 0xee, + 0x67, 0x88, 0x6e, 0x10, 0x51, 0x9d, 0x70, 0xa0, 0x65, 0xb4, 0xb5, 0x9c, 0x88, 0xd2, 0x84, 0x92, + 0x15, 0x85, 0xc2, 0x2b, 0xc8, 0xaa, 0xfd, 0x1b, 0x59, 0x59, 0xcc, 0x5a, 0xd6, 0x3a, 0x49, 0x2a, + 0xab, 0x9e, 0xc8, 0xca, 0xe0, 0x5c, 0xd6, 0x9a, 0x98, 0xca, 0x6a, 0x24, 0xb2, 0x32, 0x38, 0x95, + 0xf5, 0x15, 0x40, 0xcc, 0x05, 0x97, 0xe3, 0xb9, 0xda, 0xfd, 0x26, 0xde, 0xf1, 0x87, 0x45, 0x49, + 0xeb, 0xfe, 0xe9, 0x50, 0xc5, 0x3b, 0xf5, 0x02, 0x49, 0xad, 0x38, 0x5b, 0x6e, 0x36, 0xe0, 0x9d, + 0x37, 0x1a, 0x90, 0x7c, 0x08, 0x8d, 0xc9, 0x52, 0xc8, 0xd0, 0x1f, 0x63, 0xbb, 0x0a, 0xc7, 0x46, + 0x11, 0xf5, 0x04, 0xfc, 0x06, 0x31, 0xf7, 0x73, 0xb0, 0xd6, 0xa9, 0x37, 0xaf, 0x73, 0x05, 0x8c, + 0x17, 0xdd, 0xa1, 0xad, 0x11, 0x13, 0xf4, 0xfe, 0xc0, 0xd6, 0xf3, 0x2b, 0x6d, 0x1c, 0x56, 0xa0, + 0x8c, 0x1f, 0x76, 0x58, 0x07, 0xc8, 0x7b, 0xc3, 0x7d, 0x0a, 0x90, 0x6f, 0xa2, 0x6a, 0xcf, 0xf0, + 0xea, 0x4a, 0xf0, 0xa4, 0xdf, 0xb7, 0x69, 0x6a, 0x29, 0x7c, 0xc1, 0x83, 0x99, 0x9c, 0x63, 0x9b, + 0x37, 0x68, 0x6a, 0xb9, 0x7f, 0x6a, 0x00, 0x23, 0xcf, 0xe7, 0x43, 0x1e, 0x7b, 0x5c, 0xbc, 0xfb, + 0x25, 0xdd, 0x87, 0x8a, 0xc0, 0xf9, 0x20, 0x1c, 0x1d, 0x23, 0x48, 0x31, 0x22, 0x19, 0x1d, 0x69, + 0x48, 0x46, 0x24, 0x5f, 0x80, 0xc5, 0xd3, 0xa9, 0x20, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, + 0xc8, 0x48, 0xe3, 0x72, 0x32, 0xf9, 0x12, 0x60, 0x9e, 0x9d, 0x8e, 0x70, 0x4a, 0x18, 0x7a, 0xf7, + 0xad, 0x67, 0x97, 0xc6, 0x16, 0xe8, 0xee, 0x63, 0x28, 0xe3, 0x17, 0xa8, 0x11, 0x8b, 0x63, 0x59, + 0x4b, 0x46, 0xac, 0x5a, 0x6f, 0x0e, 0x1b, 0x2b, 0x1d, 0x36, 0xee, 0x13, 0x30, 0xcf, 0x93, 0xef, + 0x7c, 0xd7, 0x8d, 0x71, 0x7f, 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xf1, + 0xc6, 0xab, 0xf2, 0xe0, 0x56, 0x7c, 0xca, 0xeb, 0x14, 0x5e, 0x93, 0x4c, 0xa8, 0xfe, 0x36, 0xa1, + 0x46, 0x51, 0x68, 0x1b, 0x4a, 0xf8, 0x36, 0x98, 0xa0, 0x77, 0x9f, 0x27, 0x7d, 0xd4, 0xef, 0x3e, + 0x4f, 0xfa, 0x88, 0xaa, 0xf7, 0x40, 0x01, 0xb4, 0x6b, 0x1b, 0xee, 0x2f, 0x9a, 0x6a, 0x3e, 0x36, + 0x55, 0xbd, 0x27, 0xc8, 0xff, 0xa1, 0x22, 0x24, 0x8f, 0xc6, 0xbe, 0x40, 0x5d, 0x06, 0x35, 0x95, + 0xd9, 0x13, 0xaa, 0xf4, 0xd5, 0x32, 0x98, 0x64, 0xa5, 0xd5, 0x9a, 0xbc, 0x07, 0x55, 0x21, 0x59, + 0x2c, 0x15, 0x3b, 0x99, 0xbc, 0x15, 0xb4, 0x7b, 0x82, 0xdc, 0x05, 0x93, 0x07, 0xd3, 0x31, 0x1e, + 0x8a, 0x72, 0x94, 0x79, 0x30, 0xed, 0x09, 0x72, 0x1f, 0xaa, 0xb3, 0x38, 0x5c, 0x46, 0x5e, 0x30, + 0x73, 0xca, 0x2d, 0xa3, 0x6d, 0xd1, 0xb5, 0x4d, 0x9a, 0xa0, 0x5f, 0xae, 0x70, 0xfa, 0x55, 0xa9, + 0x7e, 0xb9, 0x52, 0xd9, 0x63, 0x16, 0xcc, 0xb8, 0x4a, 0x52, 0x49, 0xb2, 0xa3, 0xdd, 0x13, 0xee, + 0x6f, 0x1a, 0x94, 0x8f, 0xe6, 0xcb, 0xe0, 0x25, 0xd9, 0x85, 0x9a, 0xef, 0x05, 0x63, 0x75, 0xdf, + 0x72, 0xcd, 0x96, 0xef, 0x05, 0xaa, 0x87, 0x7b, 0x02, 0xfd, 0xec, 0x7a, 0xed, 0x4f, 0x1f, 0x24, + 0x9f, 0x5d, 0xa7, 0xfe, 0x4e, 0x7a, 0x08, 0x06, 0x1e, 0xc2, 0xfd, 0xe2, 0x21, 0x60, 0x81, 0x4e, + 0x37, 0x98, 0x84, 0x53, 0x2f, 0x98, 0xe5, 0x27, 0xa0, 0x1e, 0x7a, 0xfc, 0xaa, 0x3a, 0xc5, 0xb5, + 0xfb, 0x0c, 0xaa, 0x19, 0xeb, 0xd6, 0xe5, 0xfd, 0x6e, 0xa0, 0xde, 0xe1, 0x8d, 0xc7, 0x57, 0x27, + 0xff, 0x83, 0x3b, 0xc7, 0xe7, 0x83, 0x83, 0xd1, 0xb8, 0xf0, 0x22, 0xbb, 0x3f, 0x40, 0x03, 0x2b, + 0xf2, 0xe9, 0x7f, 0xbd, 0x7a, 0x7b, 0x60, 0x4e, 0x54, 0x86, 0xec, 0xe6, 0x6d, 0xdf, 0xfa, 0x9a, + 0x2c, 0x20, 0xa1, 0x1d, 0xee, 0xbc, 0xbe, 0xd9, 0xd5, 0x7e, 0xbd, 0xd9, 0xd5, 0xfe, 0xb8, 0xd9, + 0xd5, 0xbe, 0x37, 0x15, 0x3b, 0xba, 0xbc, 0x34, 0xf1, 0x3f, 0xe8, 0xb3, 0xbf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x8b, 0x63, 0xd6, 0x2e, 0x38, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -1385,6 +1396,18 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if m.Timestamp != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) i-- @@ -1397,30 +1420,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.PositiveCounts) > 0 { for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) i-- dAtA[i] = 0x6a } if len(m.PositiveDeltas) > 0 { - var j2 int - dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + var j3 int + dAtA5 := make([]byte, len(m.PositiveDeltas)*10) for _, num := range m.PositiveDeltas { - x3 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x3 >= 1<<7 { - dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) - j2++ - x3 >>= 7 - } - dAtA4[j2] = uint8(x3) - j2++ + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 + } + dAtA5[j3] = uint8(x4) + j3++ } - i -= j2 - copy(dAtA[i:], dAtA4[:j2]) - i = encodeVarintTypes(dAtA, i, uint64(j2)) + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintTypes(dAtA, i, uint64(j3)) i-- dAtA[i] = 0x62 } @@ -1440,30 +1463,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCounts) > 0 { for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) i-- dAtA[i] = 0x52 } if len(m.NegativeDeltas) > 0 { - var j6 int - dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + var j7 int + dAtA9 := make([]byte, len(m.NegativeDeltas)*10) for _, num := range m.NegativeDeltas { - x7 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x7 >= 1<<7 { - dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) - j6++ - x7 >>= 7 - } - dAtA8[j6] = uint8(x7) - j6++ + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ } - i -= j6 - copy(dAtA[i:], dAtA8[:j6]) - i = encodeVarintTypes(dAtA, i, uint64(j6)) + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintTypes(dAtA, i, uint64(j7)) i-- dAtA[i] = 0x4a } @@ -2133,6 +2156,9 @@ func (m *Histogram) Size() (n int) { if m.Timestamp != 0 { n += 1 + sovTypes(uint64(m.Timestamp)) } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3248,6 +3274,60 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { break } } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index 61fc1e0143..8bc69d5b10 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -107,6 +107,10 @@ message Histogram { // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. int64 timestamp = 15; + + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + repeated double custom_values = 16; } // A BucketSpan defines a number of consecutive buckets with their diff --git a/vendor/github.com/prometheus/prometheus/promql/durations.go b/vendor/github.com/prometheus/prometheus/promql/durations.go new file mode 100644 index 0000000000..8431fa5bd4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/durations.go @@ -0,0 +1,136 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "time" + + "github.com/prometheus/prometheus/promql/parser" +) + +// durationVisitor is a visitor that visits a duration expression and calculates the duration. +type durationVisitor struct{} + +func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) { + switch n := node.(type) { + case *parser.VectorSelector: + if n.OriginalOffsetExpr != nil { + duration, err := calculateDuration(n.OriginalOffsetExpr, true) + if err != nil { + return nil, err + } + n.OriginalOffset = duration + } + case *parser.MatrixSelector: + if n.RangeExpr != nil { + duration, err := calculateDuration(n.RangeExpr, false) + if err != nil { + return nil, err + } + n.Range = duration + } + case *parser.SubqueryExpr: + if n.OriginalOffsetExpr != nil { + duration, err := calculateDuration(n.OriginalOffsetExpr, true) + if err != nil { + return nil, err + } + n.OriginalOffset = duration + } + if n.StepExpr != nil { + duration, err := calculateDuration(n.StepExpr, false) + if err != nil { + return nil, err + } + n.Step = duration + } + if n.RangeExpr != nil { + duration, err := calculateDuration(n.RangeExpr, false) + if err != nil { + return nil, err + } + n.Range = duration + } + } + return v, nil +} + +// calculateDuration computes the duration from a duration expression. +func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) { + duration, err := evaluateDurationExpr(expr) + if err != nil { + return 0, err + } + if duration <= 0 && !allowedNegative { + return 0, fmt.Errorf("%d:%d: duration must be greater than 0", expr.PositionRange().Start, expr.PositionRange().End) + } + if duration > 1<<63-1 || duration < -1<<63 { + return 0, fmt.Errorf("%d:%d: duration is out of range", expr.PositionRange().Start, expr.PositionRange().End) + } + return time.Duration(duration*1000) * time.Millisecond, nil +} + +// evaluateDurationExpr recursively evaluates a duration expression to a float64 value. +func evaluateDurationExpr(expr parser.Expr) (float64, error) { + switch n := expr.(type) { + case *parser.NumberLiteral: + return n.Val, nil + case *parser.DurationExpr: + var lhs, rhs float64 + var err error + + if n.LHS != nil { + lhs, err = evaluateDurationExpr(n.LHS) + if err != nil { + return 0, err + } + } + + rhs, err = evaluateDurationExpr(n.RHS) + if err != nil { + return 0, err + } + + switch n.Op { + case parser.ADD: + return lhs + rhs, nil + case parser.SUB: + if n.LHS == nil { + // Unary negative duration expression. + return -rhs, nil + } + return lhs - rhs, nil + case parser.MUL: + return lhs * rhs, nil + case parser.DIV: + if rhs == 0 { + return 0, fmt.Errorf("%d:%d: division by zero", expr.PositionRange().Start, expr.PositionRange().End) + } + return lhs / rhs, nil + case parser.MOD: + if rhs == 0 { + return 0, fmt.Errorf("%d:%d: modulo by zero", expr.PositionRange().Start, expr.PositionRange().End) + } + return math.Mod(lhs, rhs), nil + case parser.POW: + return math.Pow(lhs, rhs), nil + default: + return 0, fmt.Errorf("unexpected duration expression operator %q", n.Op) + } + default: + return 0, fmt.Errorf("unexpected duration expression type %T", n) + } +} diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index cf66928201..f1829efdd8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -489,9 +489,9 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts if err := ng.validateOpts(expr); err != nil { return nil, err } - *pExpr = PreprocessExpr(expr, ts, ts) + *pExpr, err = PreprocessExpr(expr, ts, ts) - return qry, nil + return qry, err } // NewRangeQuery returns an evaluation query for the given time range and with @@ -513,9 +513,9 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - *pExpr = PreprocessExpr(expr, start, end) + *pExpr, err = PreprocessExpr(expr, start, end) - return qry, nil + return qry, err } func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { @@ -558,7 +558,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { var atModifierUsed, negativeOffsetUsed bool var validationErr error - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END { @@ -1137,8 +1137,9 @@ type EvalNodeHelper struct { Out Vector // Caches. - // funcHistogramQuantile for classic histograms. + // funcHistogramQuantile and funcHistogramFraction for classic histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets + nativeHistogramSamples []Sample lb *labels.Builder lblBuf []byte @@ -1161,6 +1162,62 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { } } +// resetHistograms prepares the histogram caches by splitting the given vector into native and classic histograms. +func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annotations.Annotations { + var annos annotations.Annotations + + if enh.signatureToMetricWithBuckets == nil { + enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} + } else { + for _, v := range enh.signatureToMetricWithBuckets { + v.buckets = v.buckets[:0] + } + } + enh.nativeHistogramSamples = enh.nativeHistogramSamples[:0] + + for _, sample := range inVec { + // We are only looking for classic buckets here. Remember + // the histograms for later treatment. + if sample.H != nil { + enh.nativeHistogramSamples = append(enh.nativeHistogramSamples, sample) + continue + } + + upperBound, err := strconv.ParseFloat( + sample.Metric.Get(model.BucketLabel), 64, + ) + if err != nil { + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), arg.PositionRange())) + continue + } + enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) + mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] + if !ok { + sample.Metric = labels.NewBuilder(sample.Metric). + Del(excludedLabels...). + Labels() + mb = &metricWithBuckets{sample.Metric, nil} + enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb + } + mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) + } + + for _, sample := range enh.nativeHistogramSamples { + // We have to reconstruct the exact same signature as above for + // a classic histogram, just ignoring any le label. + enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) + if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { + // At this data point, we have classic histogram + // buckets and a native histogram with the same name and + // labels. Do not evaluate anything. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange())) + delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + continue + } + } + return annos +} + // rangeEval evaluates the given expressions, and then for each step calls // the given funcCall with the values computed for each expression at that // step. The return value is the combination into time series of all the @@ -1548,6 +1605,28 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr VectorSelector: vs, } for _, s := range mat { + // Set any "NotCounterReset" and "CounterReset" hints in native + // histograms to "UnknownCounterReset" because we might + // otherwise miss a counter reset happening in samples not + // returned by the subquery, or we might over-detect counter + // resets if the sample with a counter reset is returned + // multiple times by a high-res subquery. This intentionally + // does not attempt to be clever (like detecting if we are + // really missing underlying samples or returning underlying + // samples multiple times) because subqueries on counters are + // inherently problematic WRT counter reset handling, so we + // cannot really solve the problem for good. We only want to + // avoid problems that happen due to the explicitly set counter + // reset hints and go back to the behavior we already know from + // float samples. + for i, hp := range s.Histograms { + switch hp.H.CounterResetHint { + case histogram.NotCounterReset, histogram.CounterReset: + h := *hp.H // Shallow copy is sufficient, we only change CounterResetHint. + h.CounterResetHint = histogram.UnknownCounterReset + s.Histograms[i].H = &h + } + } vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, mat.TotalSamples(), ws @@ -1560,6 +1639,11 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } + + if ev.endTimestamp < ev.startTimestamp { + return Matrix{}, nil + } + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. @@ -1582,7 +1666,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if e.Op == parser.COUNT_VALUES { valueLabel := param.(*parser.StringLiteral) if !model.LabelName(valueLabel.Val).IsValid() { - ev.errorf("invalid label name %q", valueLabel) + ev.errorf("invalid label name %s", valueLabel) } if !e.Without { sortedGrouping = append(sortedGrouping, valueLabel.Val) @@ -1947,7 +2031,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, case *parser.NumberLiteral: span.SetAttributes(attribute.Float64("value", e.Val)) - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -2116,7 +2200,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2357,6 +2441,11 @@ func (ev *evaluator) matrixIterSlice( } } + if mint == maxt { + // Empty range: return the empty slices. + return floats, histograms + } + soughtValueType := it.Seek(maxt) if soughtValueType == chunkenc.ValNone { if it.Err() != nil { @@ -3475,15 +3564,14 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat if err == nil { return nil } - metricName := "" + op := parser.ItemTypeStr[e.Op] pos := e.PositionRange() if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { return annotations.New().Add(err) } - if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { - return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) - } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { - return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + // TODO(NeerajGartia21): Test the exact annotation output once the testing framework can do so. + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) || errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleBucketLayoutInBinOpWarning(op, pos)) } return nil } @@ -3549,11 +3637,11 @@ func formatDate(t time.Time) string { // unwrapParenExpr does the AST equivalent of removing parentheses around a expression. func unwrapParenExpr(e *parser.Expr) { for { - if p, ok := (*e).(*parser.ParenExpr); ok { - *e = p.Expr - } else { + p, ok := (*e).(*parser.ParenExpr) + if !ok { break } + *e = p.Expr } } @@ -3565,15 +3653,20 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { } // PreprocessExpr wraps all possible step invariant parts of the given expression with -// StepInvariantExpr. It also resolves the preprocessors. -func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { +// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions +// into their numeric values. +func PreprocessExpr(expr parser.Expr, start, end time.Time) (parser.Expr, error) { detectHistogramStatsDecoding(expr) + if err := parser.Walk(&durationVisitor{}, expr, nil); err != nil { + return nil, err + } + isStepInvariant := preprocessExprHelper(expr, start, end) if isStepInvariant { - return newStepInvariantExpr(expr) + return newStepInvariantExpr(expr), nil } - return expr + return expr, nil } // preprocessExprHelper wraps the child nodes of the expression @@ -3763,7 +3856,7 @@ func NewHashRatioSampler() *HashRatioSampler { return &HashRatioSampler{} } -func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { +func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 { const ( float64MaxUint64 = float64(math.MaxUint64) ) diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 605661e5a0..0662c8d451 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -20,7 +20,6 @@ import ( "math" "slices" "sort" - "strconv" "strings" "time" @@ -59,7 +58,7 @@ import ( type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -187,35 +186,48 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // not a histogram, and a warning wrapped in an annotation in that case. // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { - prev := points[0].H - usingCustomBuckets := prev.UsesCustomBuckets() - last := points[len(points)-1].H + var ( + prev = points[0].H + usingCustomBuckets = prev.UsesCustomBuckets() + last = points[len(points)-1].H + annos annotations.Annotations + ) + if last == nil { - return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) + return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } - minSchema := prev.Schema - if last.Schema < minSchema { - minSchema = last.Schema + // We check for gauge type histograms in the loop below, but the loop + // below does not run on the first and last point, so check the first + // and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) } - if last.UsesCustomBuckets() != usingCustomBuckets { - return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + // Null out the 1st sample if there is a counter reset between the 1st + // and 2nd. In this case, we want to ignore any incompatibility in the + // bucket layout of the 1st sample because we do not need to look at it. + if isCounter && len(points) > 1 { + second := points[1].H + if second != nil && second.DetectReset(prev) { + prev = &histogram.FloatHistogram{} + prev.Schema = second.Schema + prev.CustomValues = second.CustomValues + usingCustomBuckets = second.UsesCustomBuckets() + } } - var annos annotations.Annotations - - // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, - // so check the first and last point now. - if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { - annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) } // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? - // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a - // []FloatPoint and a []HistogramPoint separately. + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { @@ -286,46 +298,116 @@ func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, true) + return instantValue(vals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, false) + return instantValue(vals, args, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { + var ( + samples = vals[0].(Matrix)[0] + metricName = samples.Metric.Get(labels.MetricName) + ss = make([]Sample, 0, 2) + annos annotations.Annotations + ) + // No sense in trying to compute a rate without at least two points. Drop // this Vector element. // TODO: add RangeTooShortWarning - if len(samples.Floats) < 2 { + if len(samples.Floats)+len(samples.Histograms) < 2 { return out, nil } - lastSample := samples.Floats[len(samples.Floats)-1] - previousSample := samples.Floats[len(samples.Floats)-2] + // Add the last 2 float samples if they exist. + for i := max(0, len(samples.Floats)-2); i < len(samples.Floats); i++ { + ss = append(ss, Sample{ + F: samples.Floats[i].F, + T: samples.Floats[i].T, + }) + } - var resultValue float64 - if isRate && lastSample.F < previousSample.F { - // Counter reset. - resultValue = lastSample.F - } else { - resultValue = lastSample.F - previousSample.F + // Add the last 2 histogram samples into their correct position if they exist. + for i := max(0, len(samples.Histograms)-2); i < len(samples.Histograms); i++ { + s := Sample{ + H: samples.Histograms[i].H, + T: samples.Histograms[i].T, + } + switch { + case len(ss) == 0: + ss = append(ss, s) + case len(ss) == 1: + if s.T < ss[0].T { + ss = append([]Sample{s}, ss...) + } else { + ss = append(ss, s) + } + case s.T < ss[0].T: + // s is older than 1st, so discard it. + case s.T > ss[1].T: + // s is newest, so add it as 2nd and make the old 2nd the new 1st. + ss[0] = ss[1] + ss[1] = s + default: + // In all other cases, we just make s the new 1st. + // This establishes a correct order, even in the (irregular) + // case of equal timestamps. + ss[0] = s + } } - sampledInterval := lastSample.T - previousSample.T + resultSample := ss[1] + sampledInterval := ss[1].T - ss[0].T if sampledInterval == 0 { // Avoid dividing by 0. return out, nil } + switch { + case ss[1].H == nil && ss[0].H == nil: + if !isRate || !(ss[1].F < ss[0].F) { + // Gauge, or counter without reset, or counter with NaN value. + resultSample.F = ss[1].F - ss[0].F + } + + // In case of a counter reset, we leave resultSample at + // its current value, which is already ss[1]. + case ss[1].H != nil && ss[0].H != nil: + resultSample.H = ss[1].H.Copy() + // irate should only be applied to counters. + if isRate && (ss[1].H.CounterResetHint == histogram.GaugeType || ss[0].H.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, args.PositionRange())) + } + // idelta should only be applied to gauges. + if !isRate && (ss[1].H.CounterResetHint != histogram.GaugeType || ss[0].H.CounterResetHint != histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, args.PositionRange())) + } + if !isRate || !ss[1].H.DetectReset(ss[0].H) { + _, err := resultSample.H.Sub(ss[0].H) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args.PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return out, annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args.PositionRange())) + } + } + resultSample.H.CounterResetHint = histogram.GaugeType + resultSample.H.Compact(0) + default: + // Mix of a float and a histogram. + return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args.PositionRange())) + } if isRate { // Convert to per-second. - resultValue /= float64(sampledInterval) / 1000 + if resultSample.H == nil { + resultSample.F /= float64(sampledInterval) / 1000 + } else { + resultSample.H.Div(float64(sampledInterval) / 1000) + } } - return append(out, Sample{F: resultValue}), nil + return append(out, resultSample), annos } // Calculate the trend value at the given index i in raw data d. @@ -404,26 +486,37 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions return append(enh.Out, Sample{F: s1}), nil } +// filterFloats filters out histogram samples from the vector in-place. +func filterFloats(v Vector) Vector { + floats := v[:0] + for _, s := range v { + if s.H == nil { + floats = append(floats, s) + } + } + return floats +} + // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) + byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(vals[0].(Vector)) + byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { @@ -449,7 +542,7 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { @@ -496,7 +589,7 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F maxVal := vals[2].(Vector)[0].F @@ -504,14 +597,14 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) maxVal := vals[1].(Vector)[0].F return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F return clamp(vec, minVal, math.Inf(+1), enh) @@ -548,12 +641,28 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - v := vals[0].(Vector) - if len(v) != 1 { +func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + var ( + v = vals[0].(Vector) + value float64 + found bool + ) + + for _, s := range v { + if s.H == nil { + if found { + // More than one float found, return NaN. + return append(enh.Out, Sample{F: math.NaN()}), nil + } + found = true + value = s.F + } + } + // Return the single float if found, otherwise return NaN. + if !found { return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}), nil + return append(enh.Out, Sample{F: value}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -657,14 +766,14 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -813,7 +922,7 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva } if len(el.Histograms) > 0 { metricName := el.Metric.Get(labels.MetricName) - annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange())) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { @@ -822,8 +931,7 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func varianceOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] var annos annotations.Annotations if len(samples.Floats) == 0 { @@ -843,33 +951,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN mean, cMean = kahanSumInc(delta/count, mean, cMean) aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } - return math.Sqrt((aux + cAux) / count) + variance := (aux + cAux) / count + if varianceToResult == nil { + return variance + } + return varianceToResult(variance) }), annos } +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return varianceOverTime(vals, args, enh, math.Sqrt) +} + // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] - var annos annotations.Annotations - if len(samples.Floats) == 0 { - return enh.Out, nil - } - if len(samples.Histograms) > 0 { - metricName := samples.Metric.Get(labels.MetricName) - annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) - } - return aggrOverTime(vals, enh, func(s Series) float64 { - var count float64 - var mean, cMean float64 - var aux, cAux float64 - for _, f := range s.Floats { - count++ - delta := f.F - (mean + cMean) - mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) - } - return (aux + cAux) / count - }), annos + return varianceOverTime(vals, args, enh, nil) } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -889,13 +986,13 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(s Series) float64 { +func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(_ Series) float64 { return 1 }), nil } @@ -917,126 +1014,126 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { switch { case v < 0: @@ -1050,7 +1147,7 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { if !enh.enableDelayedNameRemoval { @@ -1175,7 +1272,7 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo } // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1196,7 +1293,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1217,7 +1314,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1237,11 +1334,9 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out, nil } -// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { +func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + for _, sample := range vec { // Skip non-histogram samples. if sample.H == nil { continue @@ -1255,9 +1350,15 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval continue } var val float64 - if bucket.Lower <= 0 && 0 <= bucket.Upper { + switch { + case sample.H.UsesCustomBuckets(): + // Use arithmetic mean in case of custom buckets. + val = (bucket.Upper + bucket.Lower) / 2.0 + case bucket.Lower <= 0 && bucket.Upper >= 0: + // Use zero (effectively the arithmetic mean) in the zero bucket of a standard exponential histogram. val = 0 - } else { + default: + // Use geometric mean in case of standard exponential buckets. val = math.Sqrt(bucket.Upper * bucket.Lower) if bucket.Upper < 0 { val = -val @@ -1271,79 +1372,65 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval if !enh.enableDelayedNameRemoval { sample.Metric = sample.Metric.DropMetricName() } + if varianceToResult != nil { + variance = varianceToResult(variance) + } enh.Out = append(enh.Out, Sample{ Metric: sample.Metric, - F: math.Sqrt(variance), + F: variance, DropName: true, }) } return enh.Out, nil } +// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, math.Sqrt) +} + // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) +func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return histogramVariance(vals, enh, nil) +} - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - mean := sample.H.Sum / sample.H.Count - var variance, cVariance float64 - it := sample.H.AllBucketIterator() - for it.Next() { - bucket := it.At() - if bucket.Count == 0 { - continue - } - var val float64 - if bucket.Lower <= 0 && 0 <= bucket.Upper { - val = 0 - } else { - val = math.Sqrt(bucket.Upper * bucket.Lower) - if bucket.Upper < 0 { - val = -val - } - } - delta := val - mean - variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) - } - variance += cVariance - variance /= sample.H.Count +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F + inVec := vals[2].(Vector) + + annos := enh.resetHistograms(inVec, args[2]) + + // Deal with the native histograms. + for _, sample := range enh.nativeHistogramSamples { if !enh.enableDelayedNameRemoval { sample.Metric = sample.Metric.DropMetricName() } enh.Out = append(enh.Out, Sample{ Metric: sample.Metric, - F: variance, + F: HistogramFraction(lower, upper, sample.H), DropName: true, }) } - return enh.Out, nil -} -// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - lower := vals[0].(Vector)[0].F - upper := vals[1].(Vector)[0].F - inVec := vals[2].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { + // Deal with classic histograms that have already been filtered for conflicting native histograms. + for _, mb := range enh.signatureToMetricWithBuckets { + if len(mb.buckets) == 0 { continue } if !enh.enableDelayedNameRemoval { - sample.Metric = sample.Metric.DropMetricName() + mb.metric = mb.metric.DropMetricName() } + enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric, - F: HistogramFraction(lower, upper, sample.H), + Metric: mb.metric, + F: BucketFraction(lower, upper, mb.buckets), DropName: true, }) } - return enh.Out, nil + + return enh.Out, annos } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === @@ -1355,58 +1442,10 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } + annos.Merge(enh.resetHistograms(inVec, args[1])) - if enh.signatureToMetricWithBuckets == nil { - enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} - } else { - for _, v := range enh.signatureToMetricWithBuckets { - v.buckets = v.buckets[:0] - } - } - - var histogramSamples []Sample - - for _, sample := range inVec { - // We are only looking for classic buckets here. Remember - // the histograms for later treatment. - if sample.H != nil { - histogramSamples = append(histogramSamples, sample) - continue - } - - upperBound, err := strconv.ParseFloat( - sample.Metric.Get(model.BucketLabel), 64, - ) - if err != nil { - annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) - continue - } - enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) - mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] - if !ok { - sample.Metric = labels.NewBuilder(sample.Metric). - Del(excludedLabels...). - Labels() - mb = &metricWithBuckets{sample.Metric, nil} - enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb - } - mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) - } - - // Now deal with the native histograms. - for _, sample := range histogramSamples { - // We have to reconstruct the exact same signature as above for - // a classic histogram, just ignoring any le label. - enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) - if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { - // At this data point, we have classic histogram - // buckets and a native histogram with the same name and - // labels. Do not evaluate anything. - annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) - delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) - continue - } - + // Deal with the native histograms. + for _, sample := range enh.nativeHistogramSamples { if !enh.enableDelayedNameRemoval { sample.Metric = sample.Metric.DropMetricName() } @@ -1417,7 +1456,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev }) } - // Now do classic histograms that have already been filtered for conflicting native histograms. + // Deal with classic histograms that have already been filtered for conflicting native histograms. for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) @@ -1441,7 +1480,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 @@ -1486,7 +1525,7 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms changes := 0 @@ -1543,7 +1582,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } - if !model.LabelNameRE.MatchString(dst) { + if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } @@ -1574,7 +1613,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio } // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1620,6 +1659,9 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) matrix[i].DropName = el.DropName } } + if matrix.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } return matrix, ws } @@ -1653,56 +1695,56 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil @@ -1811,16 +1853,7 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } @@ -1850,16 +1883,7 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } diff --git a/vendor/github.com/prometheus/prometheus/promql/info.go b/vendor/github.com/prometheus/prometheus/promql/info.go index 3fe9a2ce99..0197330822 100644 --- a/vendor/github.com/prometheus/prometheus/promql/info.go +++ b/vendor/github.com/prometheus/prometheus/promql/info.go @@ -83,7 +83,7 @@ loop: func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { var nodeTimestamp *int64 var offset int64 - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: if n.Timestamp != nil { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 132ef3f0d2..9eebaed9ab 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -19,9 +19,8 @@ import ( "time" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/storage" ) // Node is a generic interface for all nodes in an AST. @@ -111,6 +110,15 @@ type BinaryExpr struct { ReturnBool bool } +// DurationExpr represents a binary expression between two duration expressions. +type DurationExpr struct { + Op ItemType // The operation of the expression. + LHS, RHS Expr // The operands on the respective sides of the operator. + Wrapped bool // Set when the duration is wrapped in parentheses. + + StartPos posrange.Pos // For unary operations, the position of the operator. +} + // Call represents a function call. type Call struct { Func *Function // The function that was called. @@ -125,24 +133,27 @@ type MatrixSelector struct { // if the parser hasn't returned an error. VectorSelector Expr Range time.Duration - - EndPos posrange.Pos + RangeExpr *DurationExpr + EndPos posrange.Pos } // SubqueryExpr represents a subquery. type SubqueryExpr struct { - Expr Expr - Range time.Duration + Expr Expr + Range time.Duration + RangeExpr *DurationExpr // OriginalOffset is the actual offset that was set in the query. - // This never changes. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset expression that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution - // which is calculated using the original offset, at modifier time, + // which is calculated using the original offset, offset expression, at modifier time, // eval time, and subquery offsets in the AST tree. Offset time.Duration Timestamp *int64 StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration + StepExpr *DurationExpr EndPos posrange.Pos } @@ -151,6 +162,7 @@ type SubqueryExpr struct { type NumberLiteral struct { Val float64 + Duration bool // Used to format the number as a duration. PosRange posrange.PositionRange } @@ -192,9 +204,10 @@ func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { // VectorSelector represents a Vector selection. type VectorSelector struct { Name string - // OriginalOffset is the actual offset that was set in the query. - // This never changes. + // OriginalOffset is the actual offset calculated from OriginalOffsetExpr. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution // which is calculated using the original offset, at modifier time, // eval time, and subquery offsets in the AST tree. @@ -245,6 +258,7 @@ func (e *BinaryExpr) Type() ValueType { return ValueTypeVector } func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } +func (e *DurationExpr) Type() ValueType { return ValueTypeScalar } func (*AggregateExpr) PromQLExpr() {} func (*BinaryExpr) PromQLExpr() {} @@ -257,6 +271,7 @@ func (*StringLiteral) PromQLExpr() {} func (*UnaryExpr) PromQLExpr() {} func (*VectorSelector) PromQLExpr() {} func (*StepInvariantExpr) PromQLExpr() {} +func (*DurationExpr) PromQLExpr() {} // VectorMatchCardinality describes the cardinality relationship // of two Vectors in a binary operation. @@ -439,6 +454,16 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } +func (e *DurationExpr) PositionRange() posrange.PositionRange { + if e.LHS == nil { + return posrange.PositionRange{ + Start: e.StartPos, + End: e.RHS.PositionRange().End, + } + } + return mergeRanges(e.LHS, e.RHS) +} + func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index cdb4532d3b..de9234589c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -186,7 +186,7 @@ START_METRIC_SELECTOR %type int %type uint %type number series_value signed_number signed_or_unsigned_number -%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector +%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr %start start @@ -235,6 +235,7 @@ expr : | unary_expr | vector_selector | step_invariant_expr + | duration_expr ; /* @@ -433,23 +434,35 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN * Offset modifiers. */ -offset_expr: expr OFFSET number_duration_literal +positive_duration_expr : duration_expr { - numLit, _ := $3.(*NumberLiteral) - dur := time.Duration(numLit.Val * 1000) * time.Millisecond - yylex.(*parser).addOffset($1, dur) + if numLit, ok := $1.(*NumberLiteral); ok { + if numLit.Val <= 0 { + yylex.(*parser).addParseErrf(numLit.PositionRange(), "duration must be greater than 0") + $$ = &NumberLiteral{Val: 0} // Return 0 on error. + break + } + $$ = $1 + break + } $$ = $1 } - | expr OFFSET SUB number_duration_literal + ; + +offset_expr: expr OFFSET duration_expr { - numLit, _ := $4.(*NumberLiteral) - dur := time.Duration(numLit.Val * 1000) * time.Millisecond - yylex.(*parser).addOffset($1, -dur) + if numLit, ok := $3.(*NumberLiteral); ok { + yylex.(*parser).addOffset($1, time.Duration(numLit.Val*1000)*time.Millisecond) $$ = $1 + break + } + yylex.(*parser).addOffsetExpr($1, $3.(*DurationExpr)) + $$ = $1 } | expr OFFSET error { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 } ; + /* * @ modifiers. */ @@ -474,7 +487,7 @@ at_modifier_preprocessors: START | END; * Subquery and range selectors. */ -matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET +matrix_selector : expr LEFT_BRACKET positive_duration_expr RIGHT_BRACKET { var errMsg string vs, ok := $1.(*VectorSelector) @@ -491,41 +504,60 @@ matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } - numLit, _ := $3.(*NumberLiteral) + var rangeNl time.Duration + if numLit, ok := $3.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + } + rangeExpr, _ := $3.(*DurationExpr) $$ = &MatrixSelector{ VectorSelector: $1.(Expr), - Range: time.Duration(numLit.Val * 1000) * time.Millisecond, + Range: rangeNl, + RangeExpr: rangeExpr, EndPos: yylex.(*parser).lastClosing, } } ; -subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET +subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr RIGHT_BRACKET { - numLitRange, _ := $3.(*NumberLiteral) - numLitStep, _ := $5.(*NumberLiteral) + var rangeNl time.Duration + var stepNl time.Duration + if numLit, ok := $3.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + } + rangeExpr, _ := $3.(*DurationExpr) + if numLit, ok := $5.(*NumberLiteral); ok { + stepNl = time.Duration(numLit.Val*1000)*time.Millisecond + } + stepExpr, _ := $5.(*DurationExpr) $$ = &SubqueryExpr{ Expr: $1.(Expr), - Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, - Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond, + Range: rangeNl, + RangeExpr: rangeExpr, + Step: stepNl, + StepExpr: stepExpr, EndPos: $6.Pos + 1, } } - | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET - { - numLitRange, _ := $3.(*NumberLiteral) - $$ = &SubqueryExpr{ - Expr: $1.(Expr), - Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, - Step: 0, - EndPos: $5.Pos + 1, - } - } - | expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error + | expr LEFT_BRACKET positive_duration_expr COLON RIGHT_BRACKET + { + var rangeNl time.Duration + if numLit, ok := $3.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + } + rangeExpr, _ := $3.(*DurationExpr) + $$ = &SubqueryExpr{ + Expr: $1.(Expr), + Range: rangeNl, + RangeExpr: rangeExpr, + EndPos: $5.Pos + 1, + } + } + | expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } - | expr LEFT_BRACKET number_duration_literal COLON error + | expr LEFT_BRACKET positive_duration_expr COLON error { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 } - | expr LEFT_BRACKET number_duration_literal error + | expr LEFT_BRACKET positive_duration_expr error { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } | expr LEFT_BRACKET error { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 } @@ -930,6 +962,7 @@ number_duration_literal : NUMBER $$ = &NumberLiteral{ Val: dur.Seconds(), PosRange: $1.PositionRange(), + Duration: true, } } ; @@ -997,4 +1030,105 @@ maybe_grouping_labels: /* empty */ { $$ = nil } | grouping_labels ; +/* + * Duration expressions. + */ + +duration_expr : number_duration_literal + { + nl := $1.(*NumberLiteral) + if nl.Val > 1<<63/1e9 || nl.Val < -(1<<63)/1e9 { + yylex.(*parser).addParseErrf(nl.PosRange, "duration out of range") + $$ = &NumberLiteral{Val: 0} + break + } + $$ = nl + } + | unary_op duration_expr %prec MUL + { + switch expr := $2.(type) { + case *NumberLiteral: + if $1.Typ == SUB { + expr.Val *= -1 + } + if expr.Val > 1<<63/1e9 || expr.Val < -(1<<63)/1e9 { + yylex.(*parser).addParseErrf($1.PositionRange(), "duration out of range") + $$ = &NumberLiteral{Val: 0} + break + } + expr.PosRange.Start = $1.Pos + $$ = expr + break + case *DurationExpr: + if $1.Typ == SUB { + $$ = &DurationExpr{ + Op: SUB, + RHS: expr, + StartPos: $1.Pos, + } + break + } + $$ = expr + break + default: + yylex.(*parser).addParseErrf($1.PositionRange(), "expected number literal or duration expression") + $$ = &NumberLiteral{Val: 0} + break + } + } + | duration_expr ADD duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + $$ = &DurationExpr{Op: ADD, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | duration_expr SUB duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + $$ = &DurationExpr{Op: SUB, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | duration_expr MUL duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + $$ = &DurationExpr{Op: MUL, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | duration_expr DIV duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + if nl, ok := $3.(*NumberLiteral); ok && nl.Val == 0 { + yylex.(*parser).addParseErrf($2.PositionRange(), "division by zero") + $$ = &NumberLiteral{Val: 0} + break + } + $$ = &DurationExpr{Op: DIV, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | duration_expr MOD duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + if nl, ok := $3.(*NumberLiteral); ok && nl.Val == 0 { + yylex.(*parser).addParseErrf($2.PositionRange(), "modulo by zero") + $$ = &NumberLiteral{Val: 0} + break + } + $$ = &DurationExpr{Op: MOD, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | duration_expr POW duration_expr + { + yylex.(*parser).experimentalDurationExpr($1.(Expr)) + $$ = &DurationExpr{Op: POW, LHS: $1.(Expr), RHS: $3.(Expr)} + } + | paren_duration_expr + ; + +paren_duration_expr : LEFT_PAREN duration_expr RIGHT_PAREN + { + yylex.(*parser).experimentalDurationExpr($2.(Expr)) + if durationExpr, ok := $2.(*DurationExpr); ok { + durationExpr.Wrapped = true + $$ = durationExpr + break + } + $$ = $2 + } + ; + %% diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 78d5e15245..8c84b42f14 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -251,431 +251,455 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 141, - 10, 141, - 24, 141, + 1, 142, + 10, 142, + 24, 142, -2, 0, - -1, 61, - 2, 184, - 15, 184, - 79, 184, - 85, 184, - -2, 102, - -1, 62, + -1, 63, 2, 185, 15, 185, 79, 185, 85, 185, -2, 103, - -1, 63, + -1, 64, 2, 186, 15, 186, 79, 186, 85, 186, - -2, 105, - -1, 64, + -2, 104, + -1, 65, 2, 187, 15, 187, 79, 187, 85, 187, -2, 106, - -1, 65, + -1, 66, 2, 188, 15, 188, 79, 188, 85, 188, -2, 107, - -1, 66, + -1, 67, 2, 189, 15, 189, 79, 189, 85, 189, - -2, 112, - -1, 67, + -2, 108, + -1, 68, 2, 190, 15, 190, 79, 190, 85, 190, - -2, 114, - -1, 68, + -2, 113, + -1, 69, 2, 191, 15, 191, 79, 191, 85, 191, - -2, 116, - -1, 69, + -2, 115, + -1, 70, 2, 192, 15, 192, 79, 192, 85, 192, -2, 117, - -1, 70, + -1, 71, 2, 193, 15, 193, 79, 193, 85, 193, -2, 118, - -1, 71, + -1, 72, 2, 194, 15, 194, 79, 194, 85, 194, -2, 119, - -1, 72, + -1, 73, 2, 195, 15, 195, 79, 195, 85, 195, -2, 120, - -1, 73, + -1, 74, 2, 196, 15, 196, 79, 196, 85, 196, - -2, 124, - -1, 74, + -2, 121, + -1, 75, 2, 197, 15, 197, 79, 197, 85, 197, -2, 125, - -1, 204, - 9, 246, - 12, 246, - 13, 246, - 18, 246, - 19, 246, - 25, 246, - 41, 246, - 47, 246, - 48, 246, - 51, 246, - 57, 246, - 62, 246, - 63, 246, - 64, 246, - 65, 246, - 66, 246, - 67, 246, - 68, 246, - 69, 246, - 70, 246, - 71, 246, - 72, 246, - 73, 246, - 74, 246, - 75, 246, - 79, 246, - 83, 246, - 85, 246, - 88, 246, - 89, 246, + -1, 76, + 2, 198, + 15, 198, + 79, 198, + 85, 198, + -2, 126, + -1, 126, + 41, 250, + 42, 250, + 52, 250, + 53, 250, + 57, 250, + -2, 20, + -1, 224, + 9, 247, + 12, 247, + 13, 247, + 18, 247, + 19, 247, + 25, 247, + 41, 247, + 47, 247, + 48, 247, + 51, 247, + 57, 247, + 62, 247, + 63, 247, + 64, 247, + 65, 247, + 66, 247, + 67, 247, + 68, 247, + 69, 247, + 70, 247, + 71, 247, + 72, 247, + 73, 247, + 74, 247, + 75, 247, + 79, 247, + 83, 247, + 85, 247, + 88, 247, + 89, 247, -2, 0, - -1, 205, - 9, 246, - 12, 246, - 13, 246, - 18, 246, - 19, 246, - 25, 246, - 41, 246, - 47, 246, - 48, 246, - 51, 246, - 57, 246, - 62, 246, - 63, 246, - 64, 246, - 65, 246, - 66, 246, - 67, 246, - 68, 246, - 69, 246, - 70, 246, - 71, 246, - 72, 246, - 73, 246, - 74, 246, - 75, 246, - 79, 246, - 83, 246, - 85, 246, - 88, 246, - 89, 246, + -1, 225, + 9, 247, + 12, 247, + 13, 247, + 18, 247, + 19, 247, + 25, 247, + 41, 247, + 47, 247, + 48, 247, + 51, 247, + 57, 247, + 62, 247, + 63, 247, + 64, 247, + 65, 247, + 66, 247, + 67, 247, + 68, 247, + 69, 247, + 70, 247, + 71, 247, + 72, 247, + 73, 247, + 74, 247, + 75, 247, + 79, 247, + 83, 247, + 85, 247, + 88, 247, + 89, 247, -2, 0, } const yyPrivate = 57344 -const yyLast = 803 +const yyLast = 892 var yyAct = [...]int16{ - 154, 338, 336, 157, 343, 230, 39, 196, 280, 44, - 295, 294, 84, 120, 82, 233, 180, 109, 108, 350, - 351, 352, 353, 110, 111, 243, 202, 158, 203, 135, - 112, 249, 361, 6, 333, 329, 113, 332, 232, 204, - 205, 308, 271, 60, 130, 270, 297, 268, 162, 315, - 156, 360, 153, 306, 359, 344, 200, 162, 161, 55, - 245, 246, 222, 115, 247, 116, 107, 161, 269, 54, - 267, 114, 260, 306, 182, 234, 236, 238, 239, 240, - 248, 250, 253, 254, 255, 256, 257, 261, 262, 163, - 122, 235, 237, 241, 242, 244, 251, 252, 192, 328, - 111, 258, 259, 117, 190, 164, 112, 152, 103, 55, - 106, 337, 77, 113, 184, 151, 35, 165, 327, 54, - 175, 191, 169, 172, 183, 185, 167, 189, 168, 2, - 3, 4, 5, 107, 198, 105, 159, 160, 201, 186, - 188, 7, 326, 206, 207, 208, 209, 210, 211, 212, - 213, 214, 215, 216, 217, 218, 219, 220, 199, 194, - 89, 91, 221, 162, 264, 325, 197, 223, 224, 171, - 200, 100, 101, 161, 162, 103, 104, 106, 90, 263, - 233, 324, 170, 162, 161, 323, 362, 322, 321, 274, - 243, 122, 266, 161, 131, 163, 249, 272, 123, 320, - 229, 319, 105, 232, 275, 318, 163, 317, 121, 85, - 316, 164, 163, 292, 293, 163, 265, 296, 129, 83, - 276, 86, 164, 273, 10, 245, 246, 187, 164, 247, - 88, 164, 86, 50, 79, 36, 298, 260, 1, 78, - 234, 236, 238, 239, 240, 248, 250, 253, 254, 255, - 256, 257, 261, 262, 123, 49, 235, 237, 241, 242, - 244, 251, 252, 181, 121, 182, 258, 259, 128, 48, - 127, 304, 119, 305, 307, 59, 309, 86, 9, 9, - 47, 46, 134, 310, 311, 136, 137, 138, 139, 140, - 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, - 45, 43, 132, 173, 179, 184, 166, 85, 330, 178, - 331, 42, 133, 55, 41, 183, 185, 83, 339, 340, - 341, 335, 177, 54, 342, 81, 346, 345, 348, 347, - 86, 303, 40, 314, 354, 355, 302, 55, 51, 356, - 53, 77, 300, 56, 195, 358, 22, 54, 313, 55, - 174, 301, 227, 57, 8, 312, 226, 357, 37, 54, - 363, 299, 126, 277, 87, 193, 228, 125, 80, 75, - 349, 225, 155, 58, 231, 18, 19, 52, 118, 20, - 124, 0, 0, 0, 0, 76, 0, 0, 0, 0, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, - 0, 24, 0, 30, 0, 0, 31, 32, 55, 38, - 107, 53, 77, 0, 56, 279, 0, 22, 54, 0, - 0, 0, 278, 0, 57, 0, 282, 283, 281, 288, - 290, 287, 289, 284, 285, 286, 291, 0, 91, 0, - 75, 0, 0, 0, 0, 0, 18, 19, 100, 101, - 20, 0, 103, 0, 106, 90, 76, 0, 0, 0, - 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 0, 0, 0, 13, 105, - 0, 0, 24, 0, 30, 0, 55, 31, 32, 53, - 77, 0, 56, 334, 0, 22, 54, 0, 0, 0, - 0, 0, 57, 0, 282, 283, 281, 288, 290, 287, - 289, 284, 285, 286, 291, 0, 0, 0, 75, 0, - 0, 0, 0, 0, 18, 19, 0, 0, 20, 0, - 0, 0, 17, 77, 76, 0, 0, 0, 22, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 0, 0, 0, 13, 0, 0, 0, - 24, 0, 30, 0, 0, 31, 32, 18, 19, 0, - 0, 20, 0, 0, 0, 17, 35, 0, 0, 0, - 0, 22, 11, 12, 14, 15, 16, 21, 23, 25, - 26, 27, 28, 29, 33, 34, 0, 0, 0, 13, + 166, 359, 357, 169, 364, 251, 39, 216, 301, 52, + 177, 315, 86, 130, 84, 6, 316, 109, 200, 44, + 145, 117, 116, 61, 118, 195, 109, 295, 119, 170, + 371, 372, 373, 374, 120, 109, 222, 121, 223, 224, + 225, 115, 296, 327, 91, 93, 94, 140, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 297, 105, + 106, 108, 92, 93, 123, 202, 125, 124, 105, 126, + 108, 354, 293, 102, 103, 353, 122, 105, 59, 108, + 92, 358, 110, 113, 127, 329, 107, 175, 292, 349, + 318, 285, 132, 114, 112, 107, 327, 115, 111, 174, + 336, 119, 141, 176, 107, 204, 284, 120, 348, 173, + 220, 2, 3, 4, 5, 203, 205, 161, 291, 179, + 180, 181, 182, 183, 184, 185, 190, 163, 194, 163, + 163, 163, 163, 163, 163, 163, 188, 191, 186, 189, + 187, 290, 243, 121, 218, 365, 289, 79, 221, 382, + 208, 206, 35, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 381, 288, + 219, 380, 7, 10, 241, 242, 347, 346, 345, 244, + 245, 344, 343, 81, 163, 163, 164, 109, 164, 164, + 164, 164, 164, 164, 164, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 342, 132, 287, 212, 91, 93, 94, 139, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 210, 105, + 106, 108, 92, 109, 341, 168, 211, 317, 340, 313, + 314, 339, 174, 164, 164, 294, 178, 338, 337, 50, + 8, 209, 173, 56, 37, 80, 107, 179, 319, 165, + 91, 93, 94, 55, 95, 96, 97, 163, 99, 100, + 101, 102, 103, 104, 175, 105, 106, 108, 92, 248, + 138, 335, 137, 247, 60, 77, 90, 9, 9, 196, + 176, 201, 325, 202, 326, 328, 334, 330, 246, 321, + 113, 78, 107, 333, 331, 332, 56, 36, 110, 113, + 114, 112, 165, 1, 115, 62, 55, 49, 320, 114, + 112, 171, 172, 115, 111, 174, 164, 48, 47, 351, + 46, 352, 144, 204, 45, 173, 43, 383, 77, 360, + 361, 362, 356, 203, 205, 363, 162, 367, 366, 369, + 368, 254, 142, 56, 78, 375, 376, 175, 214, 165, + 377, 264, 174, 55, 192, 217, 379, 270, 133, 220, + 87, 350, 173, 176, 253, 42, 143, 199, 131, 324, + 85, 384, 198, 41, 323, 77, 286, 136, 207, 40, + 51, 88, 135, 88, 175, 197, 266, 267, 215, 322, + 268, 78, 378, 298, 89, 134, 213, 249, 281, 82, + 176, 255, 257, 259, 260, 261, 269, 271, 274, 275, + 276, 277, 278, 282, 283, 254, 370, 256, 258, 262, + 263, 265, 272, 273, 133, 264, 56, 279, 280, 167, + 87, 270, 165, 252, 131, 250, 55, 53, 253, 128, + 85, 174, 129, 0, 0, 0, 0, 88, 83, 0, + 0, 173, 0, 88, 0, 0, 0, 0, 77, 0, + 266, 267, 0, 0, 268, 0, 0, 0, 0, 0, + 0, 0, 281, 175, 78, 255, 257, 259, 260, 261, + 269, 271, 274, 275, 276, 277, 278, 282, 283, 176, + 0, 256, 258, 262, 263, 265, 272, 273, 0, 0, + 56, 279, 280, 54, 79, 0, 57, 300, 0, 22, + 55, 0, 0, 193, 299, 0, 58, 0, 303, 304, + 302, 309, 311, 308, 310, 305, 306, 307, 312, 0, + 0, 0, 77, 0, 0, 0, 0, 0, 18, 19, + 0, 0, 20, 0, 0, 0, 0, 0, 78, 0, + 0, 0, 0, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 0, 0, 0, + 13, 0, 0, 0, 24, 0, 30, 0, 0, 31, + 32, 56, 38, 109, 54, 79, 0, 57, 355, 0, + 22, 55, 0, 0, 0, 0, 0, 58, 0, 303, + 304, 302, 309, 311, 308, 310, 305, 306, 307, 312, + 91, 93, 0, 77, 0, 0, 0, 0, 0, 18, + 19, 102, 103, 20, 0, 105, 106, 108, 92, 78, + 0, 0, 0, 0, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 0, 0, + 0, 13, 107, 0, 0, 24, 0, 30, 0, 56, + 31, 32, 54, 79, 0, 57, 0, 0, 22, 55, + 0, 0, 0, 0, 0, 58, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 77, 0, 0, 0, 0, 0, 18, 19, 0, + 0, 20, 0, 0, 0, 17, 79, 78, 0, 0, + 0, 22, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, 0, 31, 32, - 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 33, 34, 107, - 0, 0, 13, 0, 0, 0, 24, 176, 30, 0, - 0, 31, 32, 0, 0, 0, 0, 0, 107, 0, - 0, 0, 0, 0, 0, 0, 89, 91, 92, 0, - 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, - 0, 103, 104, 106, 90, 89, 91, 92, 0, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 0, - 103, 104, 106, 90, 107, 0, 0, 0, 105, 0, + 18, 19, 0, 0, 20, 0, 0, 0, 17, 35, + 0, 0, 0, 0, 22, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 33, 34, 0, + 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, + 0, 31, 32, 18, 19, 0, 0, 20, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, + 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, + 33, 34, 109, 0, 0, 13, 0, 0, 0, 24, + 0, 30, 0, 0, 31, 32, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 91, + 93, 94, 0, 95, 96, 0, 0, 99, 100, 0, + 102, 103, 104, 0, 105, 106, 108, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 107, 0, 0, 0, 105, 0, 0, - 0, 89, 91, 92, 0, 93, 94, 95, 0, 97, - 98, 99, 100, 101, 102, 0, 103, 104, 106, 90, - 89, 91, 92, 0, 93, 94, 0, 0, 97, 98, - 0, 100, 101, 102, 0, 103, 104, 106, 90, 0, - 0, 0, 0, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 105, + 0, 107, } var yyPact = [...]int16{ - 31, 131, 573, 573, 409, 530, -1000, -1000, -1000, 103, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 13, 162, 746, 746, 582, 703, -1000, -1000, -1000, 139, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 305, -1000, 228, -1000, 654, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 21, 98, -1000, -1000, 487, -1000, 487, 99, + -1000, -1000, -1000, -1000, -1000, 438, -1000, 284, -1000, 173, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 252, -1000, -1000, - 360, -1000, -1000, 266, 214, -1000, -1000, 20, -1000, -49, - -49, -49, -49, -49, -49, -49, -49, -49, -49, -49, - -49, -49, -49, -49, -49, 50, 48, 304, 98, -55, - -1000, 167, 167, 328, -1000, 635, 52, -1000, 302, -1000, - -1000, 261, 70, -1000, -1000, 207, -1000, 102, -1000, 96, - 154, 487, -1000, -56, -41, -1000, 487, 487, 487, 487, - 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, - 487, -1000, 100, -1000, -1000, 47, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 39, 39, 350, -1000, -1000, -1000, -1000, - 178, -1000, -1000, 157, -1000, 654, -1000, -1000, 196, -1000, - 45, -1000, -1000, -1000, -1000, -1000, 43, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 16, 171, 163, -1000, -1000, -1000, - 408, 406, 167, 167, 167, 167, 52, 52, 119, 119, - 119, 719, 700, 119, 119, 719, 52, 52, 119, 52, - 406, -1000, 24, -1000, -1000, -1000, 340, -1000, 329, -1000, + -1000, -1000, 41, 22, 128, -1000, -1000, 660, -1000, 660, + 134, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 432, + -1000, -1000, 385, -1000, -1000, 278, 213, -1000, -1000, 23, + -1000, -58, -58, -58, -58, -58, -58, -58, -58, -58, + -58, -58, -58, -58, -58, -58, -58, 344, 233, 244, + 427, 427, 427, 427, 427, 427, 128, -51, -1000, 124, + 124, 501, -1000, 3, 267, 12, -15, -1000, 375, -1000, + -1000, 289, 61, -1000, -1000, 368, -1000, 226, -1000, 211, + 353, 660, -1000, -46, -41, -1000, 660, 660, 660, 660, + 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, + 660, -1000, -1000, -1000, 427, 427, -1000, 127, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 90, 90, 277, -1000, 41, + 258, 258, -15, -15, -15, -15, -1000, -1000, -1000, 423, + -1000, -1000, 84, -1000, 173, -1000, -1000, -1000, 366, -1000, + 144, -1000, -1000, -1000, -1000, -1000, 116, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 62, 46, 1, -1000, -1000, -1000, + 500, 21, 124, 124, 124, 124, 12, 12, 579, 579, + 579, 808, 219, 579, 579, 808, 12, 12, 579, 12, + 21, -15, 267, 68, -1000, -1000, -1000, 297, -1000, 377, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 487, -1000, -1000, -1000, -1000, -1000, - -1000, 34, 34, 15, 34, 40, 40, 331, 32, -1000, - -1000, 204, 201, 199, 195, 193, 182, 181, 179, 175, - 159, 136, -1000, -1000, -1000, -1000, -1000, -1000, 97, -1000, - -1000, -1000, 13, -1000, 654, -1000, -1000, -1000, 34, -1000, - 11, 8, 486, -1000, -1000, -1000, 54, 174, 174, 174, - 39, 41, 41, 54, 41, 54, -73, -1000, -1000, -1000, - -1000, -1000, 34, 34, -1000, -1000, -1000, 34, -1000, -1000, - -1000, -1000, -1000, -1000, 174, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000, 165, - -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 660, -1000, -1000, -1000, -1000, + -1000, -1000, 77, 77, 59, 77, 94, 94, 279, 83, + -1000, -1000, 242, 241, 235, 232, 228, 204, 176, 175, + 172, 171, 170, -1000, -1000, -1000, -1000, -1000, -1000, 87, + -1000, -1000, -1000, 349, -1000, 173, -1000, -1000, -1000, 77, + -1000, 49, 45, 581, -1000, -1000, -1000, 24, 442, 442, + 442, 90, 131, 131, 24, 131, 24, -62, -1000, -1000, + -1000, -1000, -1000, 77, 77, -1000, -1000, -1000, 77, -1000, + -1000, -1000, -1000, -1000, -1000, 442, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 147, -1000, + 316, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 378, 13, 377, 5, 16, 374, 275, 373, 372, - 12, 370, 224, 354, 368, 14, 366, 10, 11, 365, - 364, 7, 363, 8, 4, 357, 2, 1, 3, 344, - 27, 0, 338, 332, 18, 194, 314, 312, 6, 311, - 303, 17, 302, 43, 301, 9, 300, 282, 281, 280, - 269, 255, 233, 238, 235, + 0, 449, 13, 447, 5, 18, 443, 284, 78, 439, + 12, 426, 173, 250, 409, 14, 407, 16, 11, 406, + 404, 7, 403, 8, 4, 402, 2, 1, 3, 398, + 29, 0, 390, 389, 22, 102, 383, 376, 6, 375, + 364, 21, 352, 23, 336, 19, 334, 332, 330, 328, + 327, 317, 249, 9, 315, 10, 313, 307, } var yyR1 = [...]int8{ - 0, 53, 53, 53, 53, 53, 53, 53, 38, 38, + 0, 56, 56, 56, 56, 56, 56, 56, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, - 33, 33, 33, 33, 34, 34, 36, 36, 36, 36, + 38, 33, 33, 33, 33, 34, 34, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 35, 37, 37, 47, 47, 42, 42, 42, - 42, 17, 17, 17, 17, 16, 16, 16, 4, 4, - 4, 39, 41, 41, 40, 40, 40, 48, 46, 46, - 46, 32, 32, 32, 9, 9, 44, 50, 50, 50, - 50, 50, 50, 51, 52, 52, 52, 43, 43, 43, - 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, - 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, + 36, 36, 36, 35, 37, 37, 47, 47, 42, 42, + 42, 42, 17, 17, 17, 17, 16, 16, 16, 4, + 4, 4, 39, 41, 41, 40, 40, 40, 48, 55, + 46, 46, 32, 32, 32, 9, 9, 44, 50, 50, + 50, 50, 50, 50, 51, 52, 52, 52, 43, 43, + 43, 1, 1, 1, 2, 2, 2, 2, 2, 2, + 2, 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, - 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, - 54, 20, 20, 20, 20, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 29, 29, 29, 21, 21, 21, - 21, 22, 22, 22, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 23, 24, 24, 25, 25, 25, - 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 12, 12, 12, + 12, 14, 14, 14, 15, 15, 15, 15, 15, 15, + 15, 57, 20, 20, 20, 20, 19, 19, 19, 19, + 19, 19, 19, 19, 19, 29, 29, 29, 21, 21, + 21, 21, 22, 22, 22, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 24, 24, 25, 25, + 25, 11, 11, 11, 11, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, - 5, 5, 5, 45, 45, 28, 28, 30, 30, 31, - 31, 27, 26, 26, 49, 10, 18, 18, + 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, + 5, 5, 5, 5, 45, 45, 28, 28, 30, 30, + 31, 31, 27, 26, 26, 49, 10, 18, 18, 53, + 53, 53, 53, 53, 53, 53, 53, 53, 54, } var yyR2 = [...]int8{ 0, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, + 1, 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 1, 0, 1, 3, 3, 1, 1, 3, - 3, 3, 4, 2, 1, 3, 1, 2, 1, 1, - 1, 2, 3, 2, 3, 1, 2, 3, 3, 4, - 3, 3, 5, 3, 1, 1, 4, 6, 5, 6, - 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, - 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, - 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, - 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, - 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, - 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 4, 3, 3, 1, 2, + 4, 4, 4, 1, 0, 1, 3, 3, 1, 1, + 3, 3, 3, 4, 2, 1, 3, 1, 2, 1, + 1, 1, 2, 3, 2, 3, 1, 2, 3, 1, + 3, 3, 3, 5, 3, 1, 1, 4, 6, 5, + 6, 5, 4, 3, 2, 2, 1, 1, 3, 4, + 2, 3, 1, 2, 3, 3, 1, 3, 3, 2, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, + 0, 3, 1, 2, 3, 3, 1, 3, 3, 2, + 1, 2, 0, 3, 2, 1, 1, 3, 1, 3, + 4, 1, 3, 5, 5, 1, 1, 1, 4, 3, + 3, 2, 3, 1, 2, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 4, 3, 3, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, - 1, 1, 2, 1, 1, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, + 2, 3, 3, 3, 3, 3, 3, 1, 3, } var yyChk = [...]int16{ - -1000, -53, 98, 99, 100, 101, 2, 10, -13, -7, + -1000, -56, 98, 99, 100, 101, 2, 10, -13, -7, -12, 62, 63, 79, 64, 65, 66, 12, 47, 48, 51, 67, 18, 68, 83, 69, 70, 71, 72, 73, - 85, 88, 89, 74, 75, 13, -54, -13, 10, -38, + 85, 88, 89, 74, 75, 13, -57, -13, 10, -38, -33, -36, -39, -44, -45, -46, -48, -49, -50, -51, - -52, -32, -3, 12, 19, 9, 15, 25, -8, -7, - -43, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 41, 57, 13, -52, -12, - -14, 20, -15, 12, -10, 2, 25, -20, 2, 41, - 59, 42, 43, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 56, 57, 83, 58, 14, -34, -41, - 2, 79, 85, 15, -41, -38, -38, -43, -1, 20, + -52, -32, -53, -3, 12, 19, 9, 15, 25, -8, + -7, -43, -54, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 41, 57, 13, + -52, -12, -14, 20, -15, 12, -10, 2, 25, -20, + 2, 41, 59, 42, 43, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 56, 57, 83, 58, 14, + 41, 57, 53, 42, 52, 56, -34, -41, 2, 79, + 85, 15, -41, -38, -53, -38, -53, -43, -1, 20, -2, 12, -10, 2, 20, 7, 2, 4, 2, 4, 24, -35, -42, -37, -47, 78, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, - -35, -45, 57, 2, -31, -9, 2, -28, -30, 88, - 89, 19, 9, 41, 57, -45, 2, -41, -34, -17, - 15, 2, -17, -40, 22, -38, 22, 20, 7, 2, + -35, -53, 2, -45, -8, 15, -31, -9, 2, -28, + -30, 88, 89, 19, 9, 41, 57, -55, 2, -53, + -53, -53, -53, -53, -53, -53, -41, -34, -17, 15, + 2, -17, -40, 22, -38, 22, 22, 20, 7, 2, -5, 2, 4, 54, 44, 55, -5, 20, -15, 25, 2, 25, 2, -19, 5, -29, -21, 12, -28, -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, - -38, -45, 15, -28, -28, 21, 6, 2, -16, 22, - -4, -6, 25, 2, 62, 78, 63, 79, 64, 65, - 66, 80, 81, 12, 82, 47, 48, 51, 67, 18, - 68, 83, 84, 69, 70, 71, 72, 73, 88, 89, - 59, 74, 75, 22, 7, 20, -2, 25, 2, 25, - 2, 26, 26, -30, 26, 41, 57, -22, 24, 17, - -23, 30, 28, 29, 35, 36, 37, 33, 31, 34, - 32, 38, -17, -17, -18, -17, -18, 22, -45, 21, - 2, 22, 7, 2, -38, -27, 19, -27, 26, -27, - -21, -21, 24, 17, 2, 17, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 21, 2, 22, - -4, -27, 26, 26, 17, -23, -26, 57, -27, -31, - -31, -31, -28, -24, 14, -24, -26, -24, -26, -11, - 92, 93, 94, 95, -27, -27, -27, -25, -31, 24, - 21, 2, 21, -31, + -38, -53, -53, 15, -28, -28, 21, 6, 2, -16, + 22, -4, -6, 25, 2, 62, 78, 63, 79, 64, + 65, 66, 80, 81, 12, 82, 47, 48, 51, 67, + 18, 68, 83, 84, 69, 70, 71, 72, 73, 88, + 89, 59, 74, 75, 22, 7, 20, -2, 25, 2, + 25, 2, 26, 26, -30, 26, 41, 57, -22, 24, + 17, -23, 30, 28, 29, 35, 36, 37, 33, 31, + 34, 32, 38, -17, -17, -18, -17, -18, 22, -55, + 21, 2, 22, 7, 2, -38, -27, 19, -27, 26, + -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, + 22, -4, -27, 26, 26, 17, -23, -26, 57, -27, + -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, + -11, 92, 93, 94, 95, -27, -27, -27, -25, -31, + 24, 21, 2, 21, -31, } var yyDef = [...]int16{ - 0, -2, 129, 129, 0, 0, 7, 6, 1, 129, - 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, - 121, 122, 123, 124, 125, 0, 2, -2, 3, 4, + 0, -2, 130, 130, 0, 0, 7, 6, 1, 130, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 108, 233, 234, 0, 244, 0, 85, - 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, 227, 228, 0, 5, 100, - 0, 128, 131, 0, 135, 139, 245, 140, 144, 43, - 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, - 23, 0, 0, 0, 61, 0, 83, 84, 0, 89, - 91, 0, 95, 99, 126, 0, 132, 0, 138, 0, - 143, 0, 42, 47, 48, 44, 0, 0, 0, 0, + 18, 19, 20, 0, 109, 234, 235, 0, 245, 0, + 86, 87, 257, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, 228, 229, 0, + 5, 101, 0, 129, 132, 0, 136, 140, 246, 141, + 145, 44, 44, 44, 44, 44, 44, 44, 44, 44, + 44, 44, 44, 44, 44, 44, 44, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 23, 24, 0, + 0, 0, 62, 0, 20, 84, -2, 85, 0, 90, + 92, 0, 96, 100, 127, 0, 133, 0, 139, 0, + 144, 0, 43, 48, 49, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 68, 0, 70, 71, 0, 73, 239, 240, 74, - 75, 235, 236, 0, 0, 0, 82, 20, 21, 24, - 0, 54, 25, 0, 63, 65, 67, 87, 0, 92, - 0, 98, 229, 230, 231, 232, 0, 127, 130, 133, - 136, 134, 137, 142, 145, 147, 150, 154, 155, 156, - 0, 26, 0, 0, -2, -2, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 69, 0, 237, 238, 76, 0, 81, 0, 53, - 56, 58, 59, 60, 198, 199, 200, 201, 202, 203, + 0, 70, 71, 249, 0, 0, 72, 0, 74, 240, + 241, 75, 76, 236, 237, 0, 0, 0, 83, 69, + 251, 252, 253, 254, 255, 256, 21, 22, 25, 0, + 55, 26, 0, 64, 66, 68, 258, 88, 0, 93, + 0, 99, 230, 231, 232, 233, 0, 128, 131, 134, + 137, 135, 138, 143, 146, 148, 151, 155, 156, 157, + 0, 27, 0, 0, -2, -2, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 250, 0, 0, 238, 239, 77, 0, 82, 0, + 54, 57, 59, 60, 61, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 62, 66, 88, 90, 93, 97, 94, - 96, 0, 0, 0, 0, 0, 0, 0, 0, 160, - 162, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 45, 46, 49, 247, 50, 72, 0, 78, - 80, 51, 0, 57, 64, 146, 241, 148, 0, 151, - 0, 0, 0, 158, 163, 159, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 77, 79, 52, - 55, 149, 0, 0, 157, 161, 164, 0, 243, 165, - 166, 167, 168, 169, 0, 170, 171, 172, 173, 174, - 180, 181, 182, 183, 152, 153, 242, 0, 178, 0, - 176, 179, 175, 177, + 224, 225, 226, 227, 63, 67, 89, 91, 94, 98, + 95, 97, 0, 0, 0, 0, 0, 0, 0, 0, + 161, 163, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 46, 47, 50, 248, 51, 73, 0, + 79, 81, 52, 0, 58, 65, 147, 242, 149, 0, + 152, 0, 0, 0, 159, 164, 160, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 78, 80, + 53, 56, 150, 0, 0, 158, 162, 165, 0, 244, + 166, 167, 168, 169, 170, 0, 171, 172, 173, 174, + 175, 181, 182, 183, 184, 153, 154, 243, 0, 179, + 0, 177, 180, 176, 178, } var yyTok1 = [...]int8{ @@ -1060,35 +1084,35 @@ yydefault: { yylex.(*parser).unexpected("", "") } - case 20: + case 21: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node) } - case 21: + case 22: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node) } - case 22: + case 23: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node) } - case 23: + case 24: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("aggregation", "") yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{}) } - case 24: + case 25: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, } } - case 25: + case 26: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &AggregateExpr{ @@ -1096,11 +1120,6 @@ yydefault: Without: true, } } - case 26: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) - } case 27: yyDollar = yyS[yypt-4 : yypt+1] { @@ -1176,14 +1195,19 @@ yydefault: { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } - case 43: + case 42: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) + } + case 44: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, } } - case 44: + case 45: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &BinaryExpr{ @@ -1191,71 +1215,71 @@ yydefault: ReturnBool: true, } } - case 45: + case 46: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } - case 46: + case 47: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings yyVAL.node.(*BinaryExpr).VectorMatching.On = true } - case 49: + case 50: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 50: + case 51: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 51: + case 52: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 52: + case 53: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 53: + case 54: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.strings = []string{} } - case 54: + case 55: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } - case 55: + case 56: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } - case 56: + case 57: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.strings = []string{yyDollar[1].item.Val} } - case 57: + case 58: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } - case 58: + case 59: yyDollar = yyS[yypt-1 : yypt+1] { if !model.LabelName(yyDollar[1].item.Val).IsValid() { @@ -1263,7 +1287,7 @@ yydefault: } yyVAL.item = yyDollar[1].item } - case 59: + case 60: yyDollar = yyS[yypt-1 : yypt+1] { unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) @@ -1274,13 +1298,13 @@ yydefault: yyVAL.item.Pos++ yyVAL.item.Val = unquoted } - case 60: + case 61: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 61: + case 62: yyDollar = yyS[yypt-2 : yypt+1] { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) @@ -1299,78 +1323,87 @@ yydefault: }, } } - case 62: + case 63: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node } - case 63: + case 64: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = Expressions{} } - case 64: + case 65: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 65: + case 66: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 66: + case 67: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 67: + case 68: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 68: - yyDollar = yyS[yypt-3 : yypt+1] + case 69: + yyDollar = yyS[yypt-1 : yypt+1] { - numLit, _ := yyDollar[3].node.(*NumberLiteral) - dur := time.Duration(numLit.Val*1000) * time.Millisecond - yylex.(*parser).addOffset(yyDollar[1].node, dur) + if numLit, ok := yyDollar[1].node.(*NumberLiteral); ok { + if numLit.Val <= 0 { + yylex.(*parser).addParseErrf(numLit.PositionRange(), "duration must be greater than 0") + yyVAL.node = &NumberLiteral{Val: 0} // Return 0 on error. + break + } + yyVAL.node = yyDollar[1].node + break + } yyVAL.node = yyDollar[1].node } - case 69: - yyDollar = yyS[yypt-4 : yypt+1] + case 70: + yyDollar = yyS[yypt-3 : yypt+1] { - numLit, _ := yyDollar[4].node.(*NumberLiteral) - dur := time.Duration(numLit.Val*1000) * time.Millisecond - yylex.(*parser).addOffset(yyDollar[1].node, -dur) + if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { + yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(numLit.Val*1000)*time.Millisecond) + yyVAL.node = yyDollar[1].node + break + } + yylex.(*parser).addOffsetExpr(yyDollar[1].node, yyDollar[3].node.(*DurationExpr)) yyVAL.node = yyDollar[1].node } - case 70: + case 71: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("offset", "number or duration") yyVAL.node = yyDollar[1].node } - case 71: + case 72: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } - case 72: + case 73: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } - case 73: + case 74: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } - case 76: + case 77: yyDollar = yyS[yypt-4 : yypt+1] { var errMsg string @@ -1388,61 +1421,80 @@ yydefault: yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } - numLit, _ := yyDollar[3].node.(*NumberLiteral) + var rangeNl time.Duration + if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + } + rangeExpr, _ := yyDollar[3].node.(*DurationExpr) yyVAL.node = &MatrixSelector{ VectorSelector: yyDollar[1].node.(Expr), - Range: time.Duration(numLit.Val*1000) * time.Millisecond, + Range: rangeNl, + RangeExpr: rangeExpr, EndPos: yylex.(*parser).lastClosing, } } - case 77: + case 78: yyDollar = yyS[yypt-6 : yypt+1] { - numLitRange, _ := yyDollar[3].node.(*NumberLiteral) - numLitStep, _ := yyDollar[5].node.(*NumberLiteral) + var rangeNl time.Duration + var stepNl time.Duration + if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + } + rangeExpr, _ := yyDollar[3].node.(*DurationExpr) + if numLit, ok := yyDollar[5].node.(*NumberLiteral); ok { + stepNl = time.Duration(numLit.Val*1000) * time.Millisecond + } + stepExpr, _ := yyDollar[5].node.(*DurationExpr) yyVAL.node = &SubqueryExpr{ - Expr: yyDollar[1].node.(Expr), - Range: time.Duration(numLitRange.Val*1000) * time.Millisecond, - Step: time.Duration(numLitStep.Val*1000) * time.Millisecond, - EndPos: yyDollar[6].item.Pos + 1, + Expr: yyDollar[1].node.(Expr), + Range: rangeNl, + RangeExpr: rangeExpr, + Step: stepNl, + StepExpr: stepExpr, + EndPos: yyDollar[6].item.Pos + 1, } } - case 78: + case 79: yyDollar = yyS[yypt-5 : yypt+1] { - numLitRange, _ := yyDollar[3].node.(*NumberLiteral) + var rangeNl time.Duration + if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { + rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + } + rangeExpr, _ := yyDollar[3].node.(*DurationExpr) yyVAL.node = &SubqueryExpr{ - Expr: yyDollar[1].node.(Expr), - Range: time.Duration(numLitRange.Val*1000) * time.Millisecond, - Step: 0, - EndPos: yyDollar[5].item.Pos + 1, + Expr: yyDollar[1].node.(Expr), + Range: rangeNl, + RangeExpr: rangeExpr, + EndPos: yyDollar[5].item.Pos + 1, } } - case 79: + case 80: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 80: + case 81: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"") yyVAL.node = yyDollar[1].node } - case 81: + case 82: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 82: + case 83: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration") yyVAL.node = yyDollar[1].node } - case 83: + case 84: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1455,7 +1507,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 84: + case 85: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1464,7 +1516,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 85: + case 86: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1475,14 +1527,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 86: + case 87: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 87: + case 88: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1490,7 +1542,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 88: + case 89: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1498,7 +1550,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 89: + case 90: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1506,7 +1558,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 90: + case 91: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1515,144 +1567,144 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 91: + case 92: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 92: + case 93: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 93: + case 94: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } - case 94: + case 95: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } - case 95: + case 96: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) } - case 96: + case 97: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 97: + case 98: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 98: + case 99: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 99: + case 100: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 100: + case 101: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 101: + case 102: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 126: + case 127: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 127: + case 128: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 128: + case 129: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 129: + case 130: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 130: + case 131: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 131: + case 132: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 132: + case 133: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 133: + case 134: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 134: + case 135: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 135: + case 136: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} } - case 136: + case 137: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 137: + case 138: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 138: + case 139: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 139: + case 140: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 140: + case 141: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1660,33 +1712,33 @@ yydefault: values: yyDollar[2].series, } } - case 141: + case 142: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 142: + case 143: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 143: + case 144: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 144: + case 145: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 145: + case 146: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 146: + case 147: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1694,12 +1746,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 147: + case 148: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 148: + case 149: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1708,7 +1760,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 149: + case 150: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1718,12 +1770,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 150: + case 151: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 151: + case 152: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1733,7 +1785,7 @@ yydefault: //$1 += $2 } } - case 152: + case 153: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1742,7 +1794,7 @@ yydefault: } yyVAL.series = val } - case 153: + case 154: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1751,7 +1803,7 @@ yydefault: } yyVAL.series = val } - case 154: + case 155: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1759,130 +1811,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 157: + case 158: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 158: + case 159: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 159: + case 160: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 160: + case 161: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 161: + case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 162: + case 163: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 163: + case 164: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 164: + case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } - case 165: + case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } - case 166: + case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } - case 167: + case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } - case 168: + case 169: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } - case 169: + case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } - case 170: + case 171: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } - case 171: + case 172: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } - case 172: + case 173: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } - case 173: + case 174: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } - case 174: + case 175: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } - case 175: + case 176: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 176: + case 177: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 177: + case 178: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } - case 178: + case 179: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 233: + case 234: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1890,7 +1942,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 234: + case 235: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1902,14 +1954,15 @@ yydefault: yyVAL.node = &NumberLiteral{ Val: dur.Seconds(), PosRange: yyDollar[1].item.PositionRange(), + Duration: true, } } - case 235: + case 236: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 236: + case 237: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1920,17 +1973,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 237: + case 238: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 238: + case 239: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 241: + case 242: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1939,17 +1992,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 242: + case 243: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 243: + case 244: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 244: + case 245: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1957,7 +2010,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 245: + case 246: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1966,11 +2019,112 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 246: + case 247: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil } + case 249: + yyDollar = yyS[yypt-1 : yypt+1] + { + nl := yyDollar[1].node.(*NumberLiteral) + if nl.Val > 1<<63/1e9 || nl.Val < -(1<<63)/1e9 { + yylex.(*parser).addParseErrf(nl.PosRange, "duration out of range") + yyVAL.node = &NumberLiteral{Val: 0} + break + } + yyVAL.node = nl + } + case 250: + yyDollar = yyS[yypt-2 : yypt+1] + { + switch expr := yyDollar[2].node.(type) { + case *NumberLiteral: + if yyDollar[1].item.Typ == SUB { + expr.Val *= -1 + } + if expr.Val > 1<<63/1e9 || expr.Val < -(1<<63)/1e9 { + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "duration out of range") + yyVAL.node = &NumberLiteral{Val: 0} + break + } + expr.PosRange.Start = yyDollar[1].item.Pos + yyVAL.node = expr + break + case *DurationExpr: + if yyDollar[1].item.Typ == SUB { + yyVAL.node = &DurationExpr{ + Op: SUB, + RHS: expr, + StartPos: yyDollar[1].item.Pos, + } + break + } + yyVAL.node = expr + break + default: + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "expected number literal or duration expression") + yyVAL.node = &NumberLiteral{Val: 0} + break + } + } + case 251: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 252: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 253: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 254: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + if nl, ok := yyDollar[3].node.(*NumberLiteral); ok && nl.Val == 0 { + yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "division by zero") + yyVAL.node = &NumberLiteral{Val: 0} + break + } + yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 255: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + if nl, ok := yyDollar[3].node.(*NumberLiteral); ok && nl.Val == 0 { + yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "modulo by zero") + yyVAL.node = &NumberLiteral{Val: 0} + break + } + yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 256: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) + yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} + } + case 258: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr)) + if durationExpr, ok := yyDollar[2].node.(*DurationExpr); ok { + durationExpr.Wrapped = true + yyVAL.node = durationExpr + break + } + yyVAL.node = yyDollar[2].node + } } goto yystack /* stack new state and value */ } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index 7210d51b7b..66522f59da 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -277,6 +277,7 @@ type Lexer struct { braceOpen bool // Whether a { is opened. bracketOpen bool // Whether a [ is opened. gotColon bool // Whether we got a ':' after [ was opened. + gotDuration bool // Whether we got a duration after [ was opened. stringOpen rune // Quote rune of the string currently being read. // series description variables for internal PromQL testing framework as well as in promtool rules unit tests. @@ -429,11 +430,10 @@ func lexStatements(l *Lexer) stateFn { l.emit(EQL) } case r == '!': - if t := l.next(); t == '=' { - l.emit(NEQ) - } else { + if t := l.next(); t != '=' { return l.errorf("unexpected character after '!': %q", t) } + l.emit(NEQ) case r == '<': if t := l.peek(); t == '=' { l.next() @@ -492,7 +492,7 @@ func lexStatements(l *Lexer) stateFn { skipSpaces(l) } l.bracketOpen = true - return lexNumberOrDuration + return lexDurationExpr case r == ']': if !l.bracketOpen { return l.errorf("unexpected right bracket %q", r) @@ -513,7 +513,7 @@ func lexHistogram(l *Lexer) stateFn { l.histogramState = histogramStateNone l.next() l.emit(TIMES) - return lexNumber + return lexValueSequence case histogramStateAdd: l.histogramState = histogramStateNone l.next() @@ -550,6 +550,8 @@ func lexHistogram(l *Lexer) stateFn { return lexNumber case r == '[': l.bracketOpen = true + l.gotColon = false + l.gotDuration = false l.emit(LEFT_BRACKET) return lexBuckets case r == '}' && l.peek() == '}': @@ -1078,3 +1080,64 @@ func isDigit(r rune) bool { func isAlpha(r rune) bool { return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') } + +// lexDurationExpr scans arithmetic expressions within brackets for duration expressions. +func lexDurationExpr(l *Lexer) stateFn { + switch r := l.next(); { + case r == eof: + return l.errorf("unexpected end of input in duration expression") + case r == ']': + l.emit(RIGHT_BRACKET) + l.bracketOpen = false + l.gotColon = false + return lexStatements + case r == ':': + l.emit(COLON) + if !l.gotDuration { + return l.errorf("unexpected colon before duration in duration expression") + } + if l.gotColon { + return l.errorf("unexpected repeated colon in duration expression") + } + l.gotColon = true + return lexDurationExpr + case r == '(': + l.emit(LEFT_PAREN) + l.parenDepth++ + return lexDurationExpr + case r == ')': + l.emit(RIGHT_PAREN) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right parenthesis %q", r) + } + return lexDurationExpr + case isSpace(r): + skipSpaces(l) + return lexDurationExpr + case r == '+': + l.emit(ADD) + return lexDurationExpr + case r == '-': + l.emit(SUB) + return lexDurationExpr + case r == '*': + l.emit(MUL) + return lexDurationExpr + case r == '/': + l.emit(DIV) + return lexDurationExpr + case r == '%': + l.emit(MOD) + return lexDurationExpr + case r == '^': + l.emit(POW) + return lexDurationExpr + case isDigit(r) || (r == '.' && isDigit(l.peek())): + l.backup() + l.gotDuration = true + return lexNumberOrDuration + default: + return l.errorf("unexpected character in duration expression: %q", r) + } +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 9bf27264a8..5cf85ea350 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -39,6 +39,9 @@ var parserPool = sync.Pool{ }, } +// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing. +var ExperimentalDurationExpr bool + type Parser interface { ParseExpr() (Expr, error) Close() @@ -72,7 +75,7 @@ func WithFunctions(functions map[string]*Function) Opt { } // NewParser returns a new parser. -func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return. +func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return p := parserPool.Get().(*parser) p.functions = Functions @@ -881,9 +884,6 @@ func parseDuration(ds string) (time.Duration, error) { if err != nil { return 0, err } - if dur == 0 { - return 0, errors.New("duration must be greater than 0") - } return time.Duration(dur), nil } @@ -939,11 +939,13 @@ func (p *parser) newMetricNameMatcher(value Item) *labels.Matcher { // addOffset is used to set the offset in the generated parser. func (p *parser) addOffset(e Node, offset time.Duration) { var orgoffsetp *time.Duration + var orgoffsetexprp *DurationExpr var endPosp *posrange.Pos switch s := e.(type) { case *VectorSelector: orgoffsetp = &s.OriginalOffset + orgoffsetexprp = s.OriginalOffsetExpr endPosp = &s.PosRange.End case *MatrixSelector: vs, ok := s.VectorSelector.(*VectorSelector) @@ -952,9 +954,11 @@ func (p *parser) addOffset(e Node, offset time.Duration) { return } orgoffsetp = &vs.OriginalOffset + orgoffsetexprp = vs.OriginalOffsetExpr endPosp = &s.EndPos case *SubqueryExpr: orgoffsetp = &s.OriginalOffset + orgoffsetexprp = s.OriginalOffsetExpr endPosp = &s.EndPos default: p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery") @@ -963,7 +967,7 @@ func (p *parser) addOffset(e Node, offset time.Duration) { // it is already ensured by parseDuration func that there never will be a zero offset modifier switch { - case *orgoffsetp != 0: + case *orgoffsetp != 0 || orgoffsetexprp != nil: p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") case orgoffsetp != nil: *orgoffsetp = offset @@ -972,6 +976,45 @@ func (p *parser) addOffset(e Node, offset time.Duration) { *endPosp = p.lastClosing } +// addOffsetExpr is used to set the offset expression in the generated parser. +func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) { + var orgoffsetp *time.Duration + var orgoffsetexprp **DurationExpr + var endPosp *posrange.Pos + + switch s := e.(type) { + case *VectorSelector: + orgoffsetp = &s.OriginalOffset + orgoffsetexprp = &s.OriginalOffsetExpr + endPosp = &s.PosRange.End + case *MatrixSelector: + vs, ok := s.VectorSelector.(*VectorSelector) + if !ok { + p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors") + return + } + orgoffsetp = &vs.OriginalOffset + orgoffsetexprp = &vs.OriginalOffsetExpr + endPosp = &s.EndPos + case *SubqueryExpr: + orgoffsetp = &s.OriginalOffset + orgoffsetexprp = &s.OriginalOffsetExpr + endPosp = &s.EndPos + default: + p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery") + return + } + + switch { + case *orgoffsetp != 0 || *orgoffsetexprp != nil: + p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") + case orgoffsetexprp != nil: + *orgoffsetexprp = expr + } + + *endPosp = p.lastClosing +} + // setTimestamp is used to set the timestamp from the @ modifier in the generated parser. func (p *parser) setTimestamp(e Node, ts float64) { if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) || @@ -1045,6 +1088,12 @@ func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, b return timestampp, preprocp, endPosp, true } +func (p *parser) experimentalDurationExpr(e Expr) { + if !ExperimentalDurationExpr { + p.addParseErrf(e.PositionRange(), "experimental duration expression is not enabled") + } +} + func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { m, err := labels.NewMatcher(mt, name, val) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go index 9870d6da74..568e65eab5 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go @@ -79,6 +79,22 @@ func (e *BinaryExpr) Pretty(level int) string { return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1)) } +func (e *DurationExpr) Pretty(int) string { + var s string + fmt.Println("e.LHS", e.LHS) + fmt.Println("e.RHS", e.RHS) + if e.LHS == nil { + // This is a unary negative duration expression. + s = fmt.Sprintf("%s %s", e.Op, e.RHS.Pretty(0)) + } else { + s = fmt.Sprintf("%s %s %s", e.LHS.Pretty(0), e.Op, e.RHS.Pretty(0)) + } + if e.Wrapped { + s = fmt.Sprintf("(%s)", s) + } + return s +} + func (e *Call) Pretty(level int) string { s := indent(level) if !needsSplit(e) { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go index afe755e7dd..dc22f8fb52 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go @@ -146,6 +146,24 @@ func (node *BinaryExpr) getMatchingStr() string { return matching } +func (node *DurationExpr) String() string { + var expr string + if node.LHS == nil { + // This is a unary negative duration expression. + expr = fmt.Sprintf("%s%s", node.Op, node.RHS) + } else { + expr = fmt.Sprintf("%s %s %s", node.LHS, node.Op, node.RHS) + } + if node.Wrapped { + return fmt.Sprintf("(%s)", expr) + } + return expr +} + +func (node *DurationExpr) ShortString() string { + return node.Op.String() +} + func (node *Call) String() string { return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) } @@ -159,6 +177,8 @@ func (node *MatrixSelector) atOffset() (string, string) { vecSelector := node.VectorSelector.(*VectorSelector) offset := "" switch { + case vecSelector.OriginalOffsetExpr != nil: + offset = fmt.Sprintf(" offset %s", vecSelector.OriginalOffsetExpr) case vecSelector.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) case vecSelector.OriginalOffset < time.Duration(0): @@ -181,21 +201,30 @@ func (node *MatrixSelector) String() string { // Copy the Vector selector before changing the offset vecSelector := *node.VectorSelector.(*VectorSelector) // Do not print the @ and offset twice. - offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd + offsetVal, offsetExprVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd vecSelector.OriginalOffset = 0 + vecSelector.OriginalOffsetExpr = nil vecSelector.Timestamp = nil vecSelector.StartOrEnd = 0 - str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), model.Duration(node.Range), at, offset) + rangeStr := model.Duration(node.Range).String() + if node.RangeExpr != nil { + rangeStr = node.RangeExpr.String() + } + str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), rangeStr, at, offset) - vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, atVal, preproc + vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc return str } func (node *MatrixSelector) ShortString() string { at, offset := node.atOffset() - return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset) + rangeStr := model.Duration(node.Range).String() + if node.RangeExpr != nil { + rangeStr = node.RangeExpr.String() + } + return fmt.Sprintf("[%s]%s%s", rangeStr, at, offset) } func (node *SubqueryExpr) String() string { @@ -211,9 +240,13 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step := "" if node.Step != 0 { step = model.Duration(node.Step).String() + } else if node.StepExpr != nil { + step = node.StepExpr.String() } offset := "" switch { + case node.OriginalOffsetExpr != nil: + offset = fmt.Sprintf(" offset %s", node.OriginalOffsetExpr) case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) case node.OriginalOffset < time.Duration(0): @@ -228,11 +261,21 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { case node.StartOrEnd == END: at = " @ end()" } - return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) + rangeStr := model.Duration(node.Range).String() + if node.RangeExpr != nil { + rangeStr = node.RangeExpr.String() + } + return fmt.Sprintf("[%s:%s]%s%s", rangeStr, step, at, offset) } func (node *NumberLiteral) String() string { - return fmt.Sprint(node.Val) + if node.Duration { + if node.Val < 0 { + return fmt.Sprintf("-%s", model.Duration(-node.Val*1e9).String()) + } + return model.Duration(node.Val * 1e9).String() + } + return strconv.FormatFloat(node.Val, 'f', -1, 64) } func (node *ParenExpr) String() string { @@ -265,6 +308,8 @@ func (node *VectorSelector) String() string { } offset := "" switch { + case node.OriginalOffsetExpr != nil: + offset = fmt.Sprintf(" offset %s", node.OriginalOffsetExpr) case node.OriginalOffset > time.Duration(0): offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) case node.OriginalOffset < time.Duration(0): diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index f3af82487c..f21914cb94 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -448,6 +448,84 @@ func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 return (upperRank - lowerRank) / h.Count } +// BucketFraction is a version of HistogramFraction for classic histograms. +func BucketFraction(lower, upper float64, buckets Buckets) float64 { + slices.SortFunc(buckets, func(a, b Bucket) int { + // We don't expect the bucket boundary to be a NaN. + if a.UpperBound < b.UpperBound { + return -1 + } + if a.UpperBound > b.UpperBound { + return +1 + } + return 0 + }) + if !math.IsInf(buckets[len(buckets)-1].UpperBound, +1) { + return math.NaN() + } + buckets = coalesceBuckets(buckets) + + count := buckets[len(buckets)-1].Count + if count == 0 || math.IsNaN(lower) || math.IsNaN(upper) { + return math.NaN() + } + if lower >= upper { + return 0 + } + + var ( + rank, lowerRank, upperRank float64 + lowerSet, upperSet bool + ) + for i, b := range buckets { + lowerBound := math.Inf(-1) + if i > 0 { + lowerBound = buckets[i-1].UpperBound + } + upperBound := b.UpperBound + + interpolateLinearly := func(v float64) float64 { + return rank + (b.Count-rank)*(v-lowerBound)/(upperBound-lowerBound) + } + + if !lowerSet && lowerBound >= lower { + // We have hit the lower value at the lower bucket boundary. + lowerRank = rank + lowerSet = true + } + if !upperSet && lowerBound >= upper { + // We have hit the upper value at the lower bucket boundary. + upperRank = rank + upperSet = true + } + if lowerSet && upperSet { + break + } + if !lowerSet && lowerBound < lower && upperBound > lower { + // The lower value is in this bucket. + lowerRank = interpolateLinearly(lower) + lowerSet = true + } + if !upperSet && lowerBound < upper && upperBound > upper { + // The upper value is in this bucket. + upperRank = interpolateLinearly(upper) + upperSet = true + } + if lowerSet && upperSet { + break + } + rank = b.Count + } + if !lowerSet || lowerRank > count { + lowerRank = count + } + if !upperSet || upperRank > count { + upperRank = count + } + + return (upperRank - lowerRank) / count +} + // coalesceBuckets merges buckets with the same upper bound. // // The input buckets must be sorted. diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 9ad9aab093..63391bc7a1 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -23,21 +23,19 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.uber.org/atomic" - "github.com/prometheus/prometheus/promql/parser" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/common/promslog" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -1110,9 +1108,6 @@ func buildDependencyMap(rules []Rule) dependencyMap { return dependencies } - inputs := make(map[string][]Rule, len(rules)) - outputs := make(map[string][]Rule, len(rules)) - var indeterminate bool for _, rule := range rules { @@ -1120,26 +1115,46 @@ func buildDependencyMap(rules []Rule) dependencyMap { break } - name := rule.Name() - outputs[name] = append(outputs[name], rule) - - parser.Inspect(rule.Query(), func(node parser.Node, path []parser.Node) error { + parser.Inspect(rule.Query(), func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { + // Find the name matcher for the rule. + var nameMatcher *labels.Matcher + if n.Name != "" { + nameMatcher = labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, n.Name) + } else { + for _, m := range n.LabelMatchers { + if m.Name == model.MetricNameLabel { + nameMatcher = m + break + } + } + } + // A wildcard metric expression means we cannot reliably determine if this rule depends on any other, // which means we cannot safely run any rules concurrently. - if n.Name == "" && len(n.LabelMatchers) > 0 { + if nameMatcher == nil { indeterminate = true return nil } // Rules which depend on "meta-metrics" like ALERTS and ALERTS_FOR_STATE will have undefined behaviour // if they run concurrently. - if n.Name == alertMetricName || n.Name == alertForStateMetricName { + if nameMatcher.Matches(alertMetricName) || nameMatcher.Matches(alertForStateMetricName) { indeterminate = true return nil } - inputs[n.Name] = append(inputs[n.Name], rule) + // Find rules which depend on the output of this rule. + for _, other := range rules { + if other == rule { + continue + } + + otherName := other.Name() + if nameMatcher.Matches(otherName) { + dependencies[other] = append(dependencies[other], rule) + } + } } return nil }) @@ -1149,13 +1164,5 @@ func buildDependencyMap(rules []Rule) dependencyMap { return nil } - for output, outRules := range outputs { - for _, outRule := range outRules { - if inRules, found := inputs[output]; found && len(inRules) > 0 { - dependencies[outRule] = append(dependencies[outRule], inRules...) - } - } - } - return dependencies } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index b1d3e8e3d6..a38be82ebe 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -324,16 +324,16 @@ func (m *Manager) LoadGroups( rules := make([]Rule, 0, len(rg.Rules)) for _, r := range rg.Rules { - expr, err := m.opts.GroupLoader.Parse(r.Expr.Value) + expr, err := m.opts.GroupLoader.Parse(r.Expr) if err != nil { return nil, []error{fmt.Errorf("%s: %w", fn, err)} } mLabels := FromMaps(rg.Labels, r.Labels) - if r.Alert.Value != "" { + if r.Alert != "" { rules = append(rules, NewAlertingRule( - r.Alert.Value, + r.Alert, expr, time.Duration(r.For), time.Duration(r.KeepFiringFor), @@ -347,7 +347,7 @@ func (m *Manager) LoadGroups( continue } rules = append(rules, NewRecordingRule( - r.Record.Value, + r.Record, expr, mLabels, )) @@ -429,7 +429,7 @@ type Sender interface { // SendAlerts implements the rules.NotifyFunc for a Notifier. func SendAlerts(s Sender, externalURL string) NotifyFunc { - return func(ctx context.Context, expr string, alerts ...*Alert) { + return func(_ context.Context, expr string, alerts ...*Alert) { var res []*notifier.Alert for _, alert := range alerts { @@ -508,7 +508,7 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } } -func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { +func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool { return c.sema.TryAcquire(1) } @@ -561,7 +561,7 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) return false } -func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { +func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, _ *Group) []ConcurrentRules { return nil } diff --git a/vendor/github.com/prometheus/prometheus/scrape/clientprotobuf.go b/vendor/github.com/prometheus/prometheus/scrape/clientprotobuf.go index e632035b40..6dc22c959f 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/clientprotobuf.go +++ b/vendor/github.com/prometheus/prometheus/scrape/clientprotobuf.go @@ -18,7 +18,6 @@ import ( "encoding/binary" "github.com/gogo/protobuf/proto" - // Intentionally using client model to simulate client in tests. dto "github.com/prometheus/client_model/go" ) diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 5ef5dccb99..8f6a7ac7a2 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -176,7 +176,7 @@ func (m *Manager) reload() { m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) continue } - if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { + if scrapeConfig.ConvertClassicHistogramsToNHCBEnabled() && m.opts.EnableCreatedTimestampZeroIngestion { // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 2e95ee2282..704726bf41 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -102,6 +102,9 @@ type scrapePool struct { scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex + + validationScheme model.ValidationScheme + escapingScheme model.EscapingScheme } type labelLimits struct { @@ -124,7 +127,6 @@ type scrapeLoopOptions struct { timeout time.Duration alwaysScrapeClassicHist bool convertClassicHistToNHCB bool - validationScheme model.ValidationScheme fallbackScrapeProtocol string mrc []*relabel.Config @@ -147,6 +149,16 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return nil, fmt.Errorf("error creating HTTP client: %w", err) } + validationScheme, err := config.ToValidationScheme(cfg.MetricNameValidationScheme) + if err != nil { + return nil, fmt.Errorf("invalid metric name validation scheme: %w", err) + } + var escapingScheme model.EscapingScheme + escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme) + if err != nil { + return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err) + } + ctx, cancel := context.WithCancel(context.Background()) sp := &scrapePool{ cancel: cancel, @@ -160,6 +172,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, + validationScheme: validationScheme, + escapingScheme: escapingScheme, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -201,7 +215,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed options.PassMetadataInContext, metrics, options.skipOffsetting, - opts.validationScheme, + sp.validationScheme, + sp.escapingScheme, opts.fallbackScrapeProtocol, ) } @@ -309,6 +324,17 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.config = cfg oldClient := sp.client sp.client = client + validationScheme, err := config.ToValidationScheme(cfg.MetricNameValidationScheme) + if err != nil { + return fmt.Errorf("invalid metric name validation scheme: %w", err) + } + sp.validationScheme = validationScheme + var escapingScheme model.EscapingScheme + escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme) + if err != nil { + return fmt.Errorf("invalid metric name escaping scheme, %w", err) + } + sp.escapingScheme = escapingScheme sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -341,14 +367,9 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms - convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() ) - validationScheme := model.UTF8Validation - if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { - validationScheme = model.LegacyValidation - } - sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() @@ -369,7 +390,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { client: sp.client, timeout: targetTimeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), metrics: sp.metrics, } @@ -388,7 +409,6 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { cache: cache, interval: targetInterval, timeout: targetTimeout, - validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, alwaysScrapeClassicHist: alwaysScrapeClassicHist, convertClassicHistToNHCB: convertClassicHistToNHCB, @@ -456,7 +476,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { for _, t := range targets { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false - t.LabelsRange(func(l labels.Label) { nonEmpty = true }) + t.LabelsRange(func(_ labels.Label) { nonEmpty = true }) switch { case nonEmpty: all = append(all, t) @@ -503,14 +523,9 @@ func (sp *scrapePool) sync(targets []*Target) { mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms - convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() ) - validationScheme := model.UTF8Validation - if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { - validationScheme = model.LegacyValidation - } - sp.targetMtx.Lock() for _, t := range targets { hash := t.hash() @@ -526,7 +541,7 @@ func (sp *scrapePool) sync(targets []*Target) { client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), metrics: sp.metrics, } @@ -546,7 +561,6 @@ func (sp *scrapePool) sync(targets []*Target) { timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, convertClassicHistToNHCB: convertClassicHistToNHCB, - validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, }) if err != nil { @@ -777,13 +791,14 @@ var errBodySizeLimit = errors.New("body size limit exceeded") // acceptHeader transforms preference from the options into specific header values as // https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. // No validation is here, we expect scrape protocols to be validated already. -func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string { +func acceptHeader(sps []config.ScrapeProtocol, scheme model.EscapingScheme) string { var vals []string weight := len(config.ScrapeProtocolsHeaders) + 1 for _, sp := range sps { val := config.ScrapeProtocolsHeaders[sp] - if scheme == model.UTF8Validation { - val += ";" + config.UTF8NamesHeader + // Escaping header is only valid for newer versions of the text formats. + if sp == config.PrometheusText1_0_0 || sp == config.OpenMetricsText1_0_0 { + val += ";" + model.EscapingKey + "=" + scheme.String() } val += fmt.Sprintf(";q=0.%d", weight) vals = append(vals, val) @@ -820,7 +835,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { return s.client.Do(s.req.WithContext(ctx)) } -func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) { +func (s *targetScraper) readResponse(_ context.Context, resp *http.Response, w io.Writer) (string, error) { defer func() { io.Copy(io.Discard, resp.Body) resp.Body.Close() @@ -911,6 +926,7 @@ type scrapeLoop struct { alwaysScrapeClassicHist bool convertClassicHistToNHCB bool validationScheme model.ValidationScheme + escapingScheme model.EscapingScheme fallbackScrapeProtocol string // Feature flagged options. @@ -1034,8 +1050,6 @@ func (c *scrapeCache) iterDone(flushCache bool) { } } c.metaMtx.Unlock() - - c.iter++ } // Swap current and previous series. @@ -1045,6 +1059,8 @@ func (c *scrapeCache) iterDone(flushCache bool) { for k := range c.seriesCur { delete(c.seriesCur, k) } + + c.iter++ } func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) { @@ -1230,6 +1246,7 @@ func newScrapeLoop(ctx context.Context, metrics *scrapeMetrics, skipOffsetting bool, validationScheme model.ValidationScheme, + escapingScheme model.EscapingScheme, fallbackScrapeProtocol string, ) *scrapeLoop { if l == nil { @@ -1284,6 +1301,7 @@ func newScrapeLoop(ctx context.Context, metrics: metrics, skipOffsetting: skipOffsetting, validationScheme: validationScheme, + escapingScheme: escapingScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1700,7 +1718,7 @@ loop: t = *parsedTimestamp } - if sl.cache.getDropped(met) { + if sl.cache.getDropped(met) || isHistogram && !sl.enableNativeHistogramIngestion { continue } ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met) @@ -1714,7 +1732,7 @@ loop: lset = ce.lset hash = ce.hash } else { - p.Metric(&lset) + p.Labels(&lset) hash = lset.Hash() // Hash label set as it is seen local to the target. Then add target labels @@ -1747,25 +1765,25 @@ loop: err = storage.ErrDuplicateSampleForTimestamp } else { if sl.enableCTZeroIngestion { - if ctMs := p.CreatedTimestamp(); ctMs != nil { - if isHistogram && sl.enableNativeHistogramIngestion { + if ctMs := p.CreatedTimestamp(); ctMs != 0 { + if isHistogram { if h != nil { - ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, ctMs, h, nil) } else { - ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, ctMs, nil, fh) } } else { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + ref, err = app.AppendCTZeroSample(ref, lset, t, ctMs) } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. - sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", ctMs, "t", t, "err", err) } } } - if isHistogram && sl.enableNativeHistogramIngestion { + if isHistogram { if h != nil { ref, err = app.AppendHistogram(ref, lset, t, h, nil) } else { @@ -1962,12 +1980,24 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. +// Switch error cases for Sample and Bucket limits are checked first since they're more common +// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes). +// Current case ordering prevents exercising other cases when limits are exceeded. +// Remaining error cases typically occur only a few times, often during initial setup. func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { switch { case err == nil: return true, nil - case errors.Is(err, storage.ErrNotFound): - return false, storage.ErrNotFound + case errors.Is(err, errSampleLimit): + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *sampleLimitErr = err + return false, nil + case errors.Is(err, errBucketLimit): + // Keep on parsing output if we hit the limit, so we report the bucket + // total number of samples scraped. + *bucketLimitErr = err + return false, nil case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ sl.l.Debug("Out of order sample", "series", string(met)) @@ -1983,16 +2013,8 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil - case errors.Is(err, errSampleLimit): - // Keep on parsing output if we hit the limit, so we report the correct - // total number of samples scraped. - *sampleLimitErr = err - return false, nil - case errors.Is(err, errBucketLimit): - // Keep on parsing output if we hit the limit, so we report the correct - // total number of samples scraped. - *bucketLimitErr = err - return false, nil + case errors.Is(err, storage.ErrNotFound): + return false, storage.ErrNotFound default: return false, err } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 32b90cc10a..3562802495 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -43,7 +43,6 @@ var ( ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = errors.New("native histograms are disabled") - ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled") // ErrOutOfOrderCT indicates failed append of CT to the storage // due to CT being older the then newer sample. diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 1953d5df09..bc70ceea55 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -133,7 +133,7 @@ func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { } // Select returns a set of series that matches the given label matchers. -func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { +func (q *mergeGenericQuerier) Select(ctx context.Context, _ bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) var limit int if hints != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go index 20ec53d6f6..f06b57c8c4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go @@ -21,13 +21,12 @@ import ( "sync" "time" - "github.com/grafana/regexp" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/google/uuid" + "github.com/grafana/regexp" ) // Clouds. @@ -349,11 +348,10 @@ func (tokenProvider *tokenProvider) getToken(ctx context.Context) error { func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error { tokenExpiryTimestamp := accessToken.ExpiresOn.UTC() deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2) - if deltaExpirytime.After(time.Now().UTC()) { - tokenProvider.refreshTime = deltaExpirytime - } else { + if !deltaExpirytime.After(time.Now().UTC()) { return errors.New("access token expiry is less than the current time") } + tokenProvider.refreshTime = deltaExpirytime return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index aadf15307c..68891f659e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -42,6 +42,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote/azuread" "github.com/prometheus/prometheus/storage/remote/googleiam" + "github.com/prometheus/prometheus/util/compression" ) const ( @@ -53,17 +54,6 @@ const ( appProtoContentType = "application/x-protobuf" ) -// Compression represents the encoding. Currently remote storage supports only -// one, but we experiment with more, thus leaving the compression scaffolding -// for now. -// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use. -type Compression string - -const ( - // SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt - SnappyBlockCompression Compression = "snappy" -) - var ( // UserAgent represents Prometheus version to use for user agent header. UserAgent = version.PrometheusUserAgent() @@ -130,7 +120,7 @@ type Client struct { readQueriesDuration prometheus.ObserverVec writeProtoMsg config.RemoteWriteProtoMsg - writeCompression Compression // Not exposed by ClientConfig for now. + writeCompression compression.Type // Not exposed by ClientConfig for now. } // ClientConfig configures a client. @@ -155,8 +145,8 @@ type ReadClient interface { } // NewReadClient creates a new client for remote read. -func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) { - httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client") +func NewReadClient(name string, conf *ClientConfig, optFuncs ...config_util.HTTPClientOption) (ReadClient, error) { + httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", optFuncs...) if err != nil { return nil, err } @@ -232,7 +222,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { retryOnRateLimit: conf.RetryOnRateLimit, timeout: time.Duration(conf.Timeout), writeProtoMsg: writeProtoMsg, - writeCompression: SnappyBlockCompression, + writeCompression: compression.Snappy, }, nil } @@ -269,7 +259,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo return WriteResponseStats{}, err } - httpReq.Header.Add("Content-Encoding", string(c.writeCompression)) + httpReq.Header.Add("Content-Encoding", c.writeCompression) httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg]) httpReq.Header.Set("User-Agent", UserAgent) if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 { @@ -375,7 +365,8 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) httpReq.Header.Set("User-Agent", UserAgent) httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") - ctx, cancel := context.WithTimeout(ctx, c.timeout) + errTimeout := fmt.Errorf("%w: request timed out after %s", context.DeadlineExceeded, c.timeout) + ctx, cancel := context.WithTimeoutCause(ctx, c.timeout, errTimeout) ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() @@ -393,7 +384,9 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) _ = httpResp.Body.Close() cancel() - return nil, fmt.Errorf("remote server %s returned http status %s: %s", c.urlString, httpResp.Status, string(body)) + errStr := strings.Trim(string(body), "\n") + err := errors.New(errStr) + return nil, fmt.Errorf("remote server %s returned http status %s: %w", c.urlString, httpResp.Status, err) } contentType := httpResp.Header.Get("Content-Type") diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/intern.go b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go index 23047acd9b..34edeb370e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/intern.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go @@ -61,11 +61,15 @@ func (p *pool) intern(s string) string { p.mtx.RLock() interned, ok := p.pool[s] - p.mtx.RUnlock() if ok { + // Increase the reference count while we're still holding the read lock, + // This will prevent the release() from deleting the entry while we're increasing its ref count. interned.refs.Inc() + p.mtx.RUnlock() return interned.s } + p.mtx.RUnlock() + p.mtx.Lock() defer p.mtx.Unlock() if interned, ok := p.pool[s]; ok { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go deleted file mode 100644 index b51b5e945a..0000000000 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "strings" - "unicode" - - "github.com/prometheus/prometheus/util/strutil" -) - -// Normalizes the specified label to follow Prometheus label names standard. -// -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. -// -// Labels that start with non-letter rune will be prefixed with "key_". -// An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string) string { - // Trivial case. - if len(label) == 0 { - return label - } - - label = strutil.SanitizeLabelName(label) - - // If label starts with a number, prepend with "key_". - if unicode.IsDigit(rune(label[0])) { - label = "key_" + label - } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") { - label = "key" + label - } - - return label -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go deleted file mode 100644 index 39a42734d7..0000000000 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import "strings" - -var wordToUCUM = map[string]string{ - // Time - "days": "d", - "hours": "h", - "minutes": "min", - "seconds": "s", - "milliseconds": "ms", - "microseconds": "us", - "nanoseconds": "ns", - - // Bytes - "bytes": "By", - "kibibytes": "KiBy", - "mebibytes": "MiBy", - "gibibytes": "GiBy", - "tibibytes": "TiBy", - "kilobytes": "KBy", - "megabytes": "MBy", - "gigabytes": "GBy", - "terabytes": "TBy", - - // SI - "meters": "m", - "volts": "V", - "amperes": "A", - "joules": "J", - "watts": "W", - "grams": "g", - - // Misc - "celsius": "Cel", - "hertz": "Hz", - "ratio": "1", - "percent": "%", -} - -// The map that translates the "per" unit -// Example: per_second (singular) => /s -var perWordToUCUM = map[string]string{ - "second": "s", - "minute": "m", - "hour": "h", - "day": "d", - "week": "w", - "month": "mo", - "year": "y", -} - -// UnitWordToUCUM converts english unit words to UCUM units: -// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol -// It also handles rates, such as meters_per_second, by translating the first -// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. -func UnitWordToUCUM(unit string) string { - unitTokens := strings.SplitN(unit, "_per_", 2) - if len(unitTokens) == 0 { - return "" - } - ucumUnit := wordToUCUMOrDefault(unitTokens[0]) - if len(unitTokens) > 1 && unitTokens[1] != "" { - ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) - } - return ucumUnit -} - -// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to -// the specified "basic" unit. Returns the specified unit if not found in -// wordToUCUM. -func wordToUCUMOrDefault(unit string) string { - if promUnit, ok := wordToUCUM[unit]; ok { - return promUnit - } - return unit -} - -// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to -// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. -func perWordToUCUMOrDefault(perUnit string) string { - if promPerUnit, ok := perWordToUCUM[perUnit]; ok { - return promPerUnit - } - return perUnit -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 2b2d32f2f7..09be335a8b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -29,6 +29,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -36,8 +37,6 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" - - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) const ( @@ -117,7 +116,8 @@ var seps = []byte{'\xff'} // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings, - ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { + ignoreAttrs []string, logOnOverwrite bool, extras ...string, +) []prompb.Label { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) @@ -159,7 +159,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting for _, label := range labels { finalKey := label.Name if !settings.AllowUTF8 { - finalKey = prometheustranslator.NormalizeLabel(finalKey) + finalKey = otlptranslator.NormalizeLabel(finalKey) } if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value @@ -171,7 +171,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting for _, lbl := range promotedAttrs { normalized := lbl.Name if !settings.AllowUTF8 { - normalized = prometheustranslator.NormalizeLabel(normalized) + normalized = otlptranslator.NormalizeLabel(normalized) } if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value @@ -211,7 +211,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } // internal labels should be maintained if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name) + name = otlptranslator.NormalizeLabel(name) } l[name] = extras[i+1] } @@ -224,21 +224,19 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting return labels } -// isValidAggregationTemporality checks whether an OTel metric has a valid -// aggregation temporality for conversion to a Prometheus metric. -func isValidAggregationTemporality(metric pmetric.Metric) bool { +func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) { //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: - return true + return 0, false, nil case pmetric.MetricTypeSum: - return metric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + return metric.Sum().AggregationTemporality(), true, nil case pmetric.MetricTypeHistogram: - return metric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + return metric.Histogram().AggregationTemporality(), true, nil case pmetric.MetricTypeExponentialHistogram: - return metric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative + return metric.ExponentialHistogram().AggregationTemporality(), true, nil } - return false + return 0, false, fmt.Errorf("could not get aggregation temporality for %s as it has unsupported metric type %s", metric.Name(), metric.Type()) } // addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series @@ -249,7 +247,8 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string) error { + resource pcommon.Resource, settings Settings, baseName string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -273,7 +272,6 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo sumlabels := createLabels(baseName+sumStr, baseLabels) c.addSample(sum, sumlabels) - } // treat count as a sample in an individual TimeSeries @@ -413,7 +411,7 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, return promExemplars, nil } -// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics +// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics. func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { var ts pcommon.Timestamp // handle individual metric based on type @@ -449,7 +447,8 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string) error { + settings Settings, baseName string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -563,7 +562,7 @@ func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*promp // addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. // If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, // both converted to milliseconds. -func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) { +func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp, timestamp pcommon.Timestamp) { ts, created := c.getOrCreateTimeSeries(lbls) if created { ts.Samples = []prompb.Sample{ @@ -632,7 +631,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta converter.addSample(sample, labels) } -// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms +// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. func convertTimeStamp(timestamp pcommon.Timestamp) int64 { return int64(timestamp) / 1_000_000 } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 8349d4f907..6a405f104f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/annotations" @@ -35,7 +36,9 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, promName string) (annotations.Annotations, error) { + resource pcommon.Resource, settings Settings, promName string, + temporality pmetric.AggregationTemporality, +) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -44,7 +47,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont pt := dataPoints.At(x) - histogram, ws, err := exponentialToNativeHistogram(pt) + histogram, ws, err := exponentialToNativeHistogram(pt, temporality) annots.Merge(ws) if err != nil { return annots, err @@ -74,7 +77,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont // exponentialToNativeHistogram translates an OTel Exponential Histogram data point // to a Prometheus Native Histogram. -func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() if scale < -4 { @@ -89,20 +92,30 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom scale = 8 } - pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) - nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) + pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) + nSpans, nDeltas := convertBucketsLayout(p.Negative().BucketCounts().AsRaw(), p.Negative().Offset(), scaleDown, true) + + // The counter reset detection must be compatible with Prometheus to + // safely set ResetHint to NO. This is not ensured currently. + // Sending a sample that triggers counter reset but with ResetHint==NO + // would lead to Prometheus panic as it does not double check the hint. + // Thus we're explicitly saying UNKNOWN here, which is always safe. + // TODO: using created time stamp should be accurate, but we + // need to know here if it was used for the detection. + // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 + // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 + resetHint := prompb.Histogram_UNKNOWN + + if temporality == pmetric.AggregationTemporalityDelta { + // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). + // This might be changed to a different hint name as gauge type might be misleading for samples that should be + // summed over time. + resetHint = prompb.Histogram_GAUGE + } h := prompb.Histogram{ - // The counter reset detection must be compatible with Prometheus to - // safely set ResetHint to NO. This is not ensured currently. - // Sending a sample that triggers counter reset but with ResetHint==NO - // would lead to Prometheus panic as it does not double check the hint. - // Thus we're explicitly saying UNKNOWN here, which is always safe. - // TODO: using created time stamp should be accurate, but we - // need to know here if it was used for the detection. - // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 - // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 - ResetHint: prompb.Histogram_UNKNOWN, + ResetHint: resetHint, Schema: scale, ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()}, @@ -133,19 +146,25 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom return h, annots, nil } -// convertBucketsLayout translates OTel Exponential Histogram dense buckets -// representation to Prometheus Native Histogram sparse bucket representation. +// convertBucketsLayout translates OTel Explicit or Exponential Histogram dense buckets +// representation to Prometheus Native Histogram sparse bucket representation. This is used +// for translating Exponential Histograms into Native Histograms, and Explicit Histograms +// into Native Histograms with Custom Buckets. // // The translation logic is taken from the client_golang `histogram.go#makeBuckets` // function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go -// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket +// +// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. +// +// When converting from OTel Exponential Histograms to Native Histograms, the +// bucket indexes conversion is adjusted, since OTel exp. histogram bucket // index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // to the range (base 1]. // -// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. -func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) { - bucketCounts := buckets.BucketCounts() - if bucketCounts.Len() == 0 { +// When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets, +// the bucket indexes are not scaled, and the indices are not adjusted by 1. +func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]prompb.BucketSpan, []int64) { + if len(bucketCounts) == 0 { return nil, nil } @@ -164,24 +183,28 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, // Let the compiler figure out that this is const during this function by // moving it into a local variable. - numBuckets := bucketCounts.Len() + numBuckets := len(bucketCounts) + + bucketIdx := offset>>scaleDown + 1 + + initialOffset := offset + if adjustOffset { + initialOffset = initialOffset>>scaleDown + 1 + } - // The offset is scaled and adjusted by 1 as described above. - bucketIdx := buckets.Offset()>>scaleDown + 1 spans = append(spans, prompb.BucketSpan{ - Offset: bucketIdx, + Offset: initialOffset, Length: 0, }) for i := 0; i < numBuckets; i++ { - // The offset is scaled and adjusted by 1 as described above. - nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + nextBucketIdx := (int32(i)+offset)>>scaleDown + 1 if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. - count += int64(bucketCounts.At(i)) + count += int64(bucketCounts[i]) continue } if count == 0 { - count = int64(bucketCounts.At(i)) + count = int64(bucketCounts[i]) continue } @@ -202,11 +225,12 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, } } appendDelta(count) - count = int64(bucketCounts.At(i)) + count = int64(bucketCounts[i]) bucketIdx = nextBucketIdx } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. - gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + gap := (int32(numBuckets)+offset-1)>>scaleDown + 1 - bucketIdx if gap > 2 { // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in @@ -226,3 +250,112 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, return spans, deltas } + +func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, + resource pcommon.Resource, settings Settings, promName string, + temporality pmetric.AggregationTemporality, +) (annotations.Annotations, error) { + var annots annotations.Annotations + + for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return annots, err + } + + pt := dataPoints.At(x) + + histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality) + annots.Merge(ws) + if err != nil { + return annots, err + } + + lbls := createAttributes( + resource, + pt.Attributes(), + settings, + nil, + true, + model.MetricNameLabel, + promName, + ) + + ts, _ := c.getOrCreateTimeSeries(lbls) + ts.Histograms = append(ts.Histograms, histogram) + + exemplars, err := getPromExemplars[pmetric.HistogramDataPoint](ctx, &c.everyN, pt) + if err != nil { + return annots, err + } + ts.Exemplars = append(ts.Exemplars, exemplars...) + } + + return annots, nil +} + +func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) { + var annots annotations.Annotations + + buckets := p.BucketCounts().AsRaw() + offset := getBucketOffset(buckets) + bucketCounts := buckets[offset:] + positiveSpans, positiveDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) + + // The counter reset detection must be compatible with Prometheus to + // safely set ResetHint to NO. This is not ensured currently. + // Sending a sample that triggers counter reset but with ResetHint==NO + // would lead to Prometheus panic as it does not double check the hint. + // Thus we're explicitly saying UNKNOWN here, which is always safe. + // TODO: using created time stamp should be accurate, but we + // need to know here if it was used for the detection. + // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 + // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 + resetHint := prompb.Histogram_UNKNOWN + + if temporality == pmetric.AggregationTemporalityDelta { + // If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting. + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/). + // This might be changed to a different hint name as gauge type might be misleading for samples that should be + // summed over time. + resetHint = prompb.Histogram_GAUGE + } + + // TODO(carrieedwards): Add setting to limit maximum bucket count + h := prompb.Histogram{ + ResetHint: resetHint, + Schema: histogram.CustomBucketsSchema, + + PositiveSpans: positiveSpans, + PositiveDeltas: positiveDeltas, + // Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound + // of the last element in the explicit_bounds array. + // This is similar to the custom_values array in native histograms with custom buckets. + // Because of this shared property, the OTel explicit histogram's explicit_bounds array + // can be mapped directly to the custom_values array. + // See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469 + CustomValues: p.ExplicitBounds().AsRaw(), + + Timestamp: convertTimeStamp(p.Timestamp()), + } + + if p.Flags().NoRecordedValue() { + h.Sum = math.Float64frombits(value.StaleNaN) + h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + } else { + if p.HasSum() { + h.Sum = p.Sum() + } + h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + if p.Count() == 0 && h.Sum != 0 { + annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum)) + } + } + return h, annots, nil +} + +func getBucketOffset(buckets []uint64) (offset int) { + for offset < len(buckets) && buckets[offset] == 0 { + offset++ + } + return offset +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 1545accf2f..79d127bb80 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -22,12 +22,12 @@ import ( "fmt" "sort" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "github.com/prometheus/prometheus/prompb" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" "github.com/prometheus/prometheus/util/annotations" ) @@ -40,6 +40,8 @@ type Settings struct { AllowUTF8 bool PromoteResourceAttributes []string KeepIdentifyingResourceAttributes bool + ConvertHistogramsToNHCB bool + AllowDeltaTemporality bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. @@ -90,17 +92,27 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric metric := metricSlice.At(k) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) + temporality, hasTemporality, err := aggregationTemporality(metric) + if err != nil { + errs = multierr.Append(errs, err) + continue + } - if !isValidAggregationTemporality(metric) { + if hasTemporality && + // Cumulative temporality is always valid. + // Delta temporality is also valid if AllowDeltaTemporality is true. + // All other temporality values are invalid. + !(temporality == pmetric.AggregationTemporalityCumulative || + (settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) { errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) continue } var promName string if settings.AllowUTF8 { - promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName = otlptranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) } else { - promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName = otlptranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) } c.metadata = append(c.metadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), @@ -142,10 +154,21 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { - errs = multierr.Append(errs, err) - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return + if settings.ConvertHistogramsToNHCB { + ws, err := c.addCustomBucketsHistogramDataPoints(ctx, dataPoints, resource, settings, promName, temporality) + annots.Merge(ws) + if err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } + } else { + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } } } case pmetric.MetricTypeExponentialHistogram: @@ -160,6 +183,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric resource, settings, promName, + temporality, ) annots.Merge(ws) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 6cdab450e1..e89dfd9815 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -29,7 +29,8 @@ import ( ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, name string) error { + resource pcommon.Resource, settings Settings, name string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -65,7 +66,8 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index 359fc52522..716a6cd6f9 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -31,12 +31,27 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta if otelMetric.Sum().IsMonotonic() { metricType = prompb.MetricMetadata_COUNTER } + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) + // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. + if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta { + metricType = prompb.MetricMetadata_UNKNOWN + } return metricType case pmetric.MetricTypeHistogram: + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) + // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. + if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { + return prompb.MetricMetadata_UNKNOWN + } return prompb.MetricMetadata_HISTOGRAM case pmetric.MetricTypeSummary: return prompb.MetricMetadata_SUMMARY case pmetric.MetricTypeExponentialHistogram: + if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta { + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) + // We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now. + return prompb.MetricMetadata_UNKNOWN + } return prompb.MetricMetadata_HISTOGRAM } return prompb.MetricMetadata_UNKNOWN diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index b274707bff..87567fb9c6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -24,7 +24,6 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -45,6 +44,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" ) const ( @@ -421,7 +421,7 @@ type QueueManager struct { clientMtx sync.RWMutex storeClient WriteClient protoMsg config.RemoteWriteProtoMsg - enc Compression + compr compression.Type seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder. seriesLabels map[chunks.HeadSeriesRef]labels.Labels @@ -512,7 +512,7 @@ func NewQueueManager( highestRecvTimestamp: highestRecvTimestamp, protoMsg: protoMsg, - enc: SnappyBlockCompression, // Hardcoded for now, but scaffolding exists for likely future use. + compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use. } walMetadata := false @@ -574,7 +574,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples (v1 flow). - req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.enc) + req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.compr) if err != nil { return err } @@ -1502,7 +1502,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { pBuf = proto.NewBuffer(nil) pBufRaw []byte - buf []byte + encBuf = compression.NewSyncEncodeBuffer() ) // TODO(@tpaschalis) Should we also raise the max if we have WAL metadata? if s.qm.sendExemplars { @@ -1534,7 +1534,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { } defer stop() - sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, enc Compression, timer bool) { + sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, compr compression.Type, timer bool) { switch protoMsg { case config.RemoteWriteProtoMsgV1: nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) @@ -1543,11 +1543,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { s.qm.logger.Debug("runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) } - _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc) + _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, encBuf, compr) case config.RemoteWriteProtoMsgV2: nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms) n := nPendingSamples + nPendingExemplars + nPendingHistograms - _ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, enc) + _ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, encBuf, compr) symbolTable.Reset() } } @@ -1576,7 +1576,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { return } - sendBatch(batch, s.qm.protoMsg, s.qm.enc, false) + sendBatch(batch, s.qm.protoMsg, s.qm.compr, false) // TODO(bwplotka): Previously the return was between popular and send. // Consider this when DRY-ing https://github.com/prometheus/prometheus/issues/14409 queue.ReturnForReuse(batch) @@ -1587,7 +1587,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { case <-timer.C: batch := queue.Batch() if len(batch) > 0 { - sendBatch(batch, s.qm.protoMsg, s.qm.enc, true) + sendBatch(batch, s.qm.protoMsg, s.qm.compr, true) } queue.ReturnForReuse(batch) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) @@ -1636,18 +1636,18 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen return nPendingSamples, nPendingExemplars, nPendingHistograms } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf compression.EncodeBuffer, compr compression.Type) error { begin := time.Now() - rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) + rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, compr) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin)) return err } // TODO(bwplotka): DRY this (have one logic for both v1 and v2). // See https://github.com/prometheus/prometheus/issues/14409 -func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { +func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *[]byte, buf compression.EncodeBuffer, compr compression.Type) error { begin := time.Now() - rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) + rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, compr) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin)) return err } @@ -1669,7 +1669,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl if err != nil { s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) } else if sampleDiff+exemplarDiff+histogramDiff > 0 { - s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) + s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some data was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -1689,9 +1689,9 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl } // sendSamplesWithBackoff to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf compression.EncodeBuffer, compr compression.Type) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. - req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc) + req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, nil, buf, compr) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will @@ -1700,7 +1700,6 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti } reqSize := len(req) - *buf = req // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need // to track the total amount of accepted data across the various attempts. @@ -1720,20 +1719,20 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti lowest := s.qm.buildRequestLimitTimestamp.Load() if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) { // This will filter out old samples during retries. - req, _, lowest, err := buildWriteRequest( + req2, _, lowest, err := buildWriteRequest( s.qm.logger, samples, nil, pBuf, - buf, isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)), - enc, + buf, + compr, ) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { return err } - *buf = req + req = req2 } ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch") @@ -1761,7 +1760,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti s.qm.metrics.metadataTotal.Add(float64(metadataCount)) // Technically for v1, we will likely have empty response stats, but for // newer Receivers this might be not, so used it in a best effort. - rs, err := s.qm.client().Store(ctx, *buf, try) + rs, err := s.qm.client().Store(ctx, req, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error // so far we don't have those, so it's ok to potentially skew statistics. @@ -1803,9 +1802,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti } // sendV2SamplesWithBackoff to the remote storage with backoff for recoverable errors. -func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) { +func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *[]byte, buf compression.EncodeBuffer, compr compression.Type) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. - req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc) + req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, nil, buf, compr) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will @@ -1814,7 +1813,6 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 } reqSize := len(req) - *buf = req // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need // to track the total amount of accepted data across the various attempts. @@ -1834,20 +1832,20 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 lowest := s.qm.buildRequestLimitTimestamp.Load() if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) { // This will filter out old samples during retries. - req, _, lowest, err := buildV2WriteRequest( + req2, _, lowest, err := buildV2WriteRequest( s.qm.logger, samples, labels, pBuf, - buf, isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)), - enc, + buf, + compr, ) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { return err } - *buf = req + req = req2 } ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch") @@ -1873,7 +1871,7 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - rs, err := s.qm.client().Store(ctx, *buf, try) + rs, err := s.qm.client().Store(ctx, req, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error // so far we don't have those, so it's ok to potentially skew statistics. @@ -2114,21 +2112,7 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms } -func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []byte, _ error) { - switch enc { - case SnappyBlockCompression: - compressed = snappy.Encode(*tmpbuf, inp) - if n := snappy.MaxEncodedLen(len(inp)); n > len(*tmpbuf) { - // grow the buffer for the next time - *tmpbuf = make([]byte, n) - } - return compressed, nil - default: - return compressed, fmt.Errorf("unknown compression scheme [%v]", enc) - } -} - -func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, filter func(prompb.TimeSeries) bool, buf compression.EncodeBuffer, compr compression.Type) (_ []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) @@ -2146,27 +2130,18 @@ func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, meta } else { pBuf.Reset() } - err := pBuf.Marshal(req) - if err != nil { + if err := pBuf.Marshal(req); err != nil { return nil, highest, lowest, err } - // snappy uses len() to see if it needs to allocate a new slice. Make the - // buffer as long as possible. - if buf != nil { - *buf = (*buf)[0:cap(*buf)] - } else { - buf = &[]byte{} - } - - compressed, err = compressPayload(buf, pBuf.Bytes(), enc) + compressed, err := compression.Encode(compr, pBuf.Bytes(), buf) if err != nil { return nil, highest, lowest, err } return compressed, highest, lowest, nil } -func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf *[]byte, filter func(writev2.TimeSeries) bool, buf compression.EncodeBuffer, compr compression.Type) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { @@ -2188,15 +2163,7 @@ func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labe } *pBuf = data - // snappy uses len() to see if it needs to allocate a new slice. Make the - // buffer as long as possible. - if buf != nil { - *buf = (*buf)[0:cap(*buf)] - } else { - buf = &[]byte{} - } - - compressed, err = compressPayload(buf, data, enc) + compressed, err = compression.Encode(compr, *pBuf, buf) if err != nil { return nil, highest, lowest, err } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index 14c3c87d93..ba6d100bdf 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -64,7 +64,7 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage { if l == nil { l = promslog.NewNopLogger() } @@ -76,7 +76,7 @@ func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeC deduper: deduper, localStartTimeCallback: stCallback, } - s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) + s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) return s } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 0363095444..51daeedb72 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -15,7 +15,6 @@ package remote import ( "context" - "errors" "fmt" "log/slog" "math" @@ -67,7 +66,6 @@ type WriteStorage struct { externalLabels labels.Labels dir string queues map[string]*QueueManager - metadataInWAL bool samplesIn *ewmaRate flushDeadline time.Duration interner *pool @@ -79,7 +77,7 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage { if logger == nil { logger = promslog.NewNopLogger() } @@ -95,7 +93,6 @@ func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, interner: newPool(), scraper: sm, quit: make(chan struct{}), - metadataInWAL: metadataInWal, highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -149,9 +146,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { newQueues := make(map[string]*QueueManager) newHashes := []string{} for _, rwConf := range conf.RemoteWriteConfigs { - if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL { - return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled") - } hash, err := toHash(rwConf) if err != nil { return err diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 02585539c0..d43edd78bb 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -24,10 +24,15 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/golang/snappy" + deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/otel/metric/noop" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -38,13 +43,7 @@ import ( writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" - - deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/otel/metric/noop" + "github.com/prometheus/prometheus/util/compression" ) type writeHandler struct { @@ -150,8 +149,8 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Don't break yolo 1.0 clients if not needed. This is similar to what we did // before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62 // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default. - } else if enc != string(SnappyBlockCompression) { - err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) + } else if strings.ToLower(enc) != compression.Snappy { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, compression.Snappy) h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } @@ -164,7 +163,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - decompressed, err := snappy.Decode(nil, body) + decompressed, err := compression.Decode(compression.Snappy, body, nil) if err != nil { // TODO(bwplotka): Add more context to responded error? h.logger.Error("Error decompressing remote write request", "err", err.Error()) @@ -250,7 +249,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. - if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { + if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) { h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue @@ -391,7 +390,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Validate series labels early. // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case. - if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { + if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) { badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String())) samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) continue @@ -513,7 +512,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } // handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. -// It doens't return errors in case of out of order CT. +// It doesn't return errors in case of out of order CT. func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { var err error if hist.IsFloatHistogram() { @@ -527,25 +526,38 @@ func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref stora type OTLPOptions struct { // Convert delta samples to their cumulative equivalent by aggregating in-memory ConvertDelta bool + // Store the raw delta samples as metrics with unknown type (we don't have a proper type for delta yet, therefore + // marking the metric type as unknown for now). + // We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/) + NativeDelta bool } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { + if opts.NativeDelta && opts.ConvertDelta { + // This should be validated when iterating through feature flags, so not expected to fail here. + panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time") + } + ex := &rwExporter{ writeHandler: &writeHandler{ logger: logger, appendable: appendable, }, - config: configFunc, + config: configFunc, + allowDeltaTemporality: opts.NativeDelta, } - wh := &otlpWriteHandler{logger: logger, cumul: ex} + wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex} if opts.ConvertDelta { fac := deltatocumulative.NewFactory() - set := processor.Settings{TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}} - d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.cumul) + set := processor.Settings{ + ID: component.NewID(fac.Type()), + TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}, + } + d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.defaultConsumer) if err != nil { // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor], // which only errors if: @@ -555,13 +567,13 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings. // as such, we assume this error to never occur. // if it is, our assumptions are broken in which case a panic seems acceptable. - panic(err) + panic(fmt.Errorf("failed to create metrics processor: %w", err)) } if err := d2c.Start(context.Background(), nil); err != nil { // deltatocumulative does not error on start. see above for panic reasoning panic(err) } - wh.delta = d2c + wh.d2cConsumer = d2c } return wh @@ -569,7 +581,8 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda type rwExporter struct { *writeHandler - config func() config.Config + config func() config.Config + allowDeltaTemporality bool } func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { @@ -577,10 +590,12 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er converter := otlptranslator.NewPrometheusConverter() annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ - AddMetricSuffixes: true, - AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, + AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation, + AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, + ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, + AllowDeltaTemporality: rw.allowDeltaTemporality, }) if err != nil { rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) @@ -604,8 +619,8 @@ func (rw *rwExporter) Capabilities() consumer.Capabilities { type otlpWriteHandler struct { logger *slog.Logger - cumul consumer.Metrics // only cumulative - delta consumer.Metrics // delta capable + defaultConsumer consumer.Metrics // stores deltas as-is + d2cConsumer consumer.Metrics // converts deltas to cumulative } func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -617,13 +632,15 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } md := req.Metrics() - // if delta conversion enabled AND delta samples exist, use slower delta capable path - if h.delta != nil && hasDelta(md) { - err = h.delta.ConsumeMetrics(r.Context(), md) + // If deltatocumulative conversion enabled AND delta samples exist, use slower conversion path. + // While deltatocumulative can also accept cumulative metrics (and then just forwards them as-is), it currently + // holds a sync.Mutex when entering ConsumeMetrics. This is slow and not necessary when ingesting cumulative metrics. + if h.d2cConsumer != nil && hasDelta(md) { + err = h.d2cConsumer.ConsumeMetrics(r.Context(), md) } else { - // deltatocumulative currently holds a sync.Mutex when entering ConsumeMetrics. - // This is slow and not necessary when no delta samples exist anyways - err = h.cumul.ConsumeMetrics(r.Context(), md) + // Otherwise use default consumer (alongside cumulative samples, this will accept delta samples and write as-is + // if native-delta-support is enabled). + err = h.defaultConsumer.ConsumeMetrics(r.Context(), md) } switch { diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go index a3dbec7088..e61b225937 100644 --- a/vendor/github.com/prometheus/prometheus/storage/series.go +++ b/vendor/github.com/prometheus/prometheus/storage/series.go @@ -65,7 +65,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sampl if err != nil { return &ChunkSeriesEntry{ Lset: lset, - ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator { + ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator { return errChunksIterator{err: err} }, } diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 25b65eb577..75a9f33bd2 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -29,12 +29,11 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" + common_templates "github.com/prometheus/common/helpers/templates" "github.com/prometheus/common/model" "golang.org/x/text/cases" "golang.org/x/text/language" - common_templates "github.com/prometheus/common/helpers/templates" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/strutil" ) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 4ffd2463c3..7f7d993800 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -26,8 +26,7 @@ import ( "slices" "sync" - "github.com/oklog/ulid" - + "github.com/oklog/ulid/v2" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" @@ -656,7 +655,7 @@ Outer: func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) { numStones := 0 - if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { + if err := pb.tombstones.Iter(func(_ storage.SeriesRef, ivs tombstones.Intervals) error { numStones += len(ivs) return nil }); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 63f82e28df..5eb8a649a9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -21,7 +21,7 @@ import ( "math" "os" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index d2eec6b75a..7f528df8d5 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -262,17 +262,23 @@ func (a *HistogramAppender) Append(int64, float64) { // The method returns an additional boolean set to true if it is not appendable // because of a counter reset. If the given sample is stale, it is always ok to // append. If counterReset is true, okToAppend is always false. +// +// The method returns an additional CounterResetHeader value that indicates the +// status of the counter reset detection. But it returns UnknownCounterReset +// when schema or zero threshold changed, because we don't do a full counter +// reset detection. func (a *HistogramAppender) appendable(h *histogram.Histogram) ( positiveInserts, negativeInserts []Insert, backwardPositiveInserts, backwardNegativeInserts []Insert, - okToAppend, counterReset bool, + okToAppend bool, counterResetHint CounterResetHeader, ) { + counterResetHint = NotCounterReset if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType { return } if h.CounterResetHint == histogram.CounterReset { // Always honor the explicit counter reset hint. - counterReset = true + counterResetHint = CounterReset return } if value.IsStaleNaN(h.Sum) { @@ -283,39 +289,45 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( if value.IsStaleNaN(a.sum) { // If the last sample was stale, then we can only accept stale // samples in this chunk. + counterResetHint = UnknownCounterReset return } if h.Count < a.cnt { // There has been a counter reset. - counterReset = true + counterResetHint = CounterReset return } if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold { + // This case might or might not go along with a counter reset and + // we do not want to invest the work of a full counter reset detection + // as long as https://github.com/prometheus/prometheus/issues/15346 is still open. + // TODO: consider adding the counter reset detection here once #15346 is fixed. + counterResetHint = UnknownCounterReset return } if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) { - counterReset = true + counterResetHint = CounterReset return } if h.ZeroCount < a.zCnt { // There has been a counter reset since ZeroThreshold didn't change. - counterReset = true + counterResetHint = CounterReset return } var ok bool positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets) if !ok { - counterReset = true + counterResetHint = CounterReset return } negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets) if !ok { - counterReset = true + counterResetHint = CounterReset return } @@ -781,21 +793,17 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h case prev != nil: // This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set. _, _, _, _, _, counterReset := prev.appendable(h) - if counterReset { - a.setCounterResetHeader(CounterReset) - } else { - a.setCounterResetHeader(NotCounterReset) - } + a.setCounterResetHeader(counterReset) } return nil, false, a, nil } // Adding counter-like histogram. if h.CounterResetHint != histogram.GaugeType { - pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h) - if !okToAppend || counterReset { + pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterResetHint := a.appendable(h) + if !okToAppend || counterResetHint != NotCounterReset { if appendOnly { - if counterReset { + if counterResetHint == CounterReset { return nil, false, a, errors.New("histogram counter reset") } return nil, false, a, errors.New("histogram schema change") @@ -806,9 +814,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h panic(err) // This should never happen for an empty histogram chunk. } happ := app.(*HistogramAppender) - if counterReset { - happ.setCounterResetHeader(CounterReset) - } + happ.setCounterResetHeader(counterResetHint) happ.appendHistogram(t, h) return newChunk, false, app, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 31b445f227..b66f7eed8f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -25,7 +25,7 @@ import ( "slices" "time" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/promslog" @@ -169,6 +169,8 @@ type LeveledCompactorOptions struct { // EnableOverlappingCompaction enables compaction of overlapping blocks. In Prometheus it is always enabled. // It is useful for downstream projects like Mimir, Cortex, Thanos where they have a separate component that does compaction. EnableOverlappingCompaction bool + // Metrics is set of metrics for Compactor. By default, NewCompactorMetrics would be called to initialize metrics unless it is provided. + Metrics *CompactorMetrics } type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder @@ -214,11 +216,14 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer if pe == nil { pe = index.EncodePostingsRaw } + if opts.Metrics == nil { + opts.Metrics = NewCompactorMetrics(r) + } return &LeveledCompactor{ ranges: ranges, chunkPool: pool, logger: l, - metrics: NewCompactorMetrics(r), + metrics: opts.Metrics, ctx: ctx, maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, mergeFunc: mergeFunc, @@ -470,6 +475,12 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, start := time.Now() for _, d := range dirs { + select { + case <-c.ctx.Done(): + return nil, c.ctx.Err() + default: + } + meta, _, err := readMetaFile(d) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 2d35e3fb00..2d0af5c940 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -30,7 +30,7 @@ import ( "sync" "time" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/promslog" "go.uber.org/atomic" @@ -46,6 +46,7 @@ import ( _ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met. "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" ) const ( @@ -80,7 +81,7 @@ func DefaultOptions() *Options { MaxBlockDuration: DefaultBlockDuration, NoLockfile: false, SamplesPerChunk: DefaultSamplesPerChunk, - WALCompression: wlog.CompressionNone, + WALCompression: compression.None, StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, @@ -124,7 +125,7 @@ type Options struct { NoLockfile bool // WALCompression configures the compression type to use on records in the WAL. - WALCompression wlog.CompressionType + WALCompression compression.Type // Maximum number of CPUs that can simultaneously processes WAL replay. // If it is <=0, then GOMAXPROCS is used. @@ -178,12 +179,6 @@ type Options struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms bool - // EnableOOONativeHistograms enables the ingestion of OOO native histograms. - // It will only take effect if EnableNativeHistograms is set to true and the - // OutOfOrderTimeWindow is > 0. This flag will be removed after testing of - // OOO Native Histogram ingestion is complete. - EnableOOONativeHistograms bool - // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. // This can change during run-time, so this value from here should only be used // while initialising. @@ -966,7 +961,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms) - headOpts.EnableOOONativeHistograms.Store(opts.EnableOOONativeHistograms) headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow) headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) headOpts.EnableSharding = opts.EnableSharding @@ -992,9 +986,14 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.metrics.maxBytes.Set(float64(maxBytes)) db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds()) + // Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked. + db.cmtx.Lock() if err := db.reload(); err != nil { + db.cmtx.Unlock() return nil, err } + db.cmtx.Unlock() + // Set the min valid time for the ingested samples // to be no lower than the maxt of the last block. minValidTime := int64(math.MinInt64) @@ -1191,16 +1190,6 @@ func (db *DB) DisableNativeHistograms() { db.head.DisableNativeHistograms() } -// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms. -func (db *DB) EnableOOONativeHistograms() { - db.head.EnableOOONativeHistograms() -} - -// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms. -func (db *DB) DisableOOONativeHistograms() { - db.head.DisableOOONativeHistograms() -} - // dbAppender wraps the DB's head appender and triggers compactions on commit // if necessary. type dbAppender struct { @@ -1363,6 +1352,7 @@ func (db *DB) CompactOOOHead(ctx context.Context) error { // Callback for testing. var compactOOOHeadTestingCallback func() +// The db.cmtx mutex should be held before calling this method. func (db *DB) compactOOOHead(ctx context.Context) error { if !db.oooWasEnabled.Load() { return nil @@ -1417,6 +1407,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { // compactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given. // Each ULID in the result corresponds to a block in a unique time range. +// The db.cmtx mutex should be held before calling this method. func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID, err error) { start := time.Now() @@ -1461,7 +1452,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } // compactHead compacts the given RangeHead. -// The compaction mutex should be held before calling this method. +// The db.cmtx should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { @@ -1487,7 +1478,7 @@ func (db *DB) compactHead(head *RangeHead) error { } // compactBlocks compacts all the eligible on-disk blocks. -// The compaction mutex should be held before calling this method. +// The db.cmtx should be held before calling this method. func (db *DB) compactBlocks() (err error) { // Check for compactions of multiple blocks. for { @@ -1495,7 +1486,7 @@ func (db *DB) compactBlocks() (err error) { // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. if db.head.compactable() && !db.waitingForCompactionDelay() { - db.logger.Warn("aborting block compactions to persit the head block") + db.logger.Warn("aborting block compactions to persist the head block") return nil } @@ -1544,6 +1535,7 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { } // reload reloads blocks and truncates the head and its WAL. +// The db.cmtx mutex should be held before calling this method. func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { return fmt.Errorf("reloadBlocks: %w", err) @@ -1560,6 +1552,7 @@ func (db *DB) reload() error { // reloadBlocks reloads blocks without touching head. // Blocks that are obsolete due to replacement or retention will be deleted. +// The db.cmtx mutex should be held before calling this method. func (db *DB) reloadBlocks() (err error) { defer func() { if err != nil { @@ -1568,13 +1561,9 @@ func (db *DB) reloadBlocks() (err error) { db.metrics.reloads.Inc() }() - // Now that we reload TSDB every minute, there is a high chance for a race condition with a reload - // triggered by CleanTombstones(). We need to lock the reload to avoid the situation where - // a normal reload and CleanTombstones try to delete the same block. - db.mtx.Lock() - defer db.mtx.Unlock() - + db.mtx.RLock() loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool, db.opts.PostingsDecoderFactory) + db.mtx.RUnlock() if err != nil { return err } @@ -1600,11 +1589,13 @@ func (db *DB) reloadBlocks() (err error) { if len(corrupted) > 0 { // Corrupted but no child loaded for it. // Close all new blocks to release the lock for windows. + db.mtx.RLock() for _, block := range loadable { if _, open := getBlock(db.blocks, block.Meta().ULID); !open { block.Close() } } + db.mtx.RUnlock() errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { if err != nil { @@ -1643,8 +1634,10 @@ func (db *DB) reloadBlocks() (err error) { }) // Swap new blocks first for subsequently created readers to be seen. + db.mtx.Lock() oldBlocks := db.blocks db.blocks = toLoad + db.mtx.Unlock() // Only check overlapping blocks when overlapping compaction is enabled. if db.opts.EnableOverlappingCompaction { @@ -2287,10 +2280,9 @@ func (db *DB) CleanTombstones() (err error) { db.logger.Error("failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } - if err != nil { - return fmt.Errorf("reload blocks: %w", err) - } - return nil + + // This should only be reached if an error occurred. + return fmt.Errorf("reload blocks: %w", err) } } return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go index ff230c44b1..a86ce59bd8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go @@ -25,7 +25,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return. +func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return m := multiError{} m.Add(errs...) return m diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go index e6ac4ec989..1672a92d4c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go @@ -20,7 +20,7 @@ import ( func DirSize(dir string) (int64, error) { var size int64 - err := filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { + err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error { if err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go index 5e479f48b9..523f99292c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go @@ -76,7 +76,7 @@ func copyFile(src, dest string) error { func readDirs(src string) ([]string, error) { var files []string - err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error { + err := filepath.Walk(src, func(path string, _ os.FileInfo, _ error) error { relativePath := strings.TrimPrefix(path, src) if len(relativePath) > 0 { files = append(files, relativePath) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 4fbb4b5710..7763d272b7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -26,11 +26,10 @@ import ( "sync" "time" - "github.com/oklog/ulid" - "go.uber.org/atomic" - + "github.com/oklog/ulid/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/promslog" + "go.uber.org/atomic" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -94,11 +93,22 @@ type Head struct { bytesPool zeropool.Pool[[]byte] memChunkPool sync.Pool + // These pools are only used during WAL/WBL replay and are reset at the end. + // NOTE: Adjust resetWLReplayResources() upon changes to the pools. + wlReplaySeriesPool zeropool.Pool[[]record.RefSeries] + wlReplaySamplesPool zeropool.Pool[[]record.RefSample] + wlReplaytStonesPool zeropool.Pool[[]tombstones.Stone] + wlReplayExemplarsPool zeropool.Pool[[]record.RefExemplar] + wlReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample] + wlReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] + wlReplayMetadataPool zeropool.Pool[[]record.RefMetadata] + wlReplayMmapMarkersPool zeropool.Pool[[]record.RefMmapMarker] + // All series addressable by their ID or hash. series *stripeSeries - deletedMtx sync.Mutex - deleted map[chunks.HeadSeriesRef]int // Deleted series, and what WAL segment they must be kept until. + walExpiriesMtx sync.Mutex + walExpiries map[chunks.HeadSeriesRef]int // Series no longer in the head, and what WAL segment they must be kept until. // TODO(codesome): Extend MemPostings to return only OOOPostings, Set OOOStatus, ... Like an additional map of ooo postings. postings *index.MemPostings // Postings lists for terms. @@ -150,11 +160,6 @@ type HeadOptions struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms atomic.Bool - // EnableOOONativeHistograms enables the ingestion of OOO native histograms. - // It will only take effect if EnableNativeHistograms is set to true and the - // OutOfOrderTimeWindow is > 0 - EnableOOONativeHistograms atomic.Bool - ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -330,7 +335,7 @@ func (h *Head) resetInMemoryState() error { h.exemplars = es h.postings = index.NewUnorderedMemPostings() h.tombstones = tombstones.NewMemTombstones() - h.deleted = map[chunks.HeadSeriesRef]int{} + h.walExpiries = map[chunks.HeadSeriesRef]int{} h.chunkRange.Store(h.opts.ChunkRange) h.minTime.Store(math.MaxInt64) h.maxTime.Store(math.MinInt64) @@ -341,6 +346,17 @@ func (h *Head) resetInMemoryState() error { return nil } +func (h *Head) resetWLReplayResources() { + h.wlReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{} + h.wlReplaySamplesPool = zeropool.Pool[[]record.RefSample]{} + h.wlReplaytStonesPool = zeropool.Pool[[]tombstones.Stone]{} + h.wlReplayExemplarsPool = zeropool.Pool[[]record.RefExemplar]{} + h.wlReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{} + h.wlReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{} + h.wlReplayMetadataPool = zeropool.Pool[[]record.RefMetadata]{} + h.wlReplayMmapMarkersPool = zeropool.Pool[[]record.RefMmapMarker]{} +} + type headMetrics struct { activeAppenders prometheus.Gauge series prometheus.GaugeFunc @@ -369,6 +385,8 @@ type headMetrics struct { snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. oooHistogram prometheus.Histogram mmapChunksTotal prometheus.Counter + walReplayUnknownRefsTotal *prometheus.CounterVec + wblReplayUnknownRefsTotal *prometheus.CounterVec } const ( @@ -500,6 +518,14 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_mmap_chunks_total", Help: "Total number of chunks that were memory-mapped.", }), + walReplayUnknownRefsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WAL replay.", + }, []string{"type"}), + wblReplayUnknownRefsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wbl_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WBL replay.", + }, []string{"type"}), } if r != nil { @@ -567,6 +593,8 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { } return float64(val) }), + m.walReplayUnknownRefsTotal, + m.wblReplayUnknownRefsTotal, ) } return m @@ -613,6 +641,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second // limits the ingested samples to the head min valid time. func (h *Head) Init(minValidTime int64) error { h.minValidTime.Store(minValidTime) + defer h.resetWLReplayResources() defer func() { h.postings.EnsureOrder(h.opts.WALReplayConcurrency) }() @@ -752,7 +781,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. - if err := h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks); err != nil { + if err := h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks, endAt); err != nil { return fmt.Errorf("backfill checkpoint: %w", err) } h.updateWALReplayStatusRead(startFrom) @@ -768,6 +797,7 @@ func (h *Head) Init(minValidTime int64) error { } // Backfill segments from the most recent checkpoint onwards. for i := startFrom; i <= endAt; i++ { + walSegmentStart := time.Now() s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i)) if err != nil { return fmt.Errorf("open WAL segment: %d: %w", i, err) @@ -785,14 +815,14 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { return fmt.Errorf("segment reader (offset=%d): %w", offset, err) } - err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks) + err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks, endAt) if err := sr.Close(); err != nil { h.logger.Warn("Error while closing the wal segments reader", "err", err) } if err != nil { return err } - h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt, "duration", time.Since(walSegmentStart)) h.updateWALReplayStatusRead(i) } walReplayDuration := time.Since(walReplayStart) @@ -1019,16 +1049,6 @@ func (h *Head) DisableNativeHistograms() { h.opts.EnableNativeHistograms.Store(false) } -// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms. -func (h *Head) EnableOOONativeHistograms() { - h.opts.EnableOOONativeHistograms.Store(true) -} - -// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms. -func (h *Head) DisableOOONativeHistograms() { - h.opts.EnableOOONativeHistograms.Store(false) -} - // PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { cacheKey := statsByLabelName + ";" + strconv.Itoa(limit) @@ -1252,6 +1272,34 @@ func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) return false, false, 0 } +func (h *Head) getWALExpiry(id chunks.HeadSeriesRef) (int, bool) { + h.walExpiriesMtx.Lock() + defer h.walExpiriesMtx.Unlock() + + keepUntil, ok := h.walExpiries[id] + return keepUntil, ok +} + +func (h *Head) setWALExpiry(id chunks.HeadSeriesRef, keepUntil int) { + h.walExpiriesMtx.Lock() + defer h.walExpiriesMtx.Unlock() + + h.walExpiries[id] = keepUntil +} + +// keepSeriesInWALCheckpoint is used to determine whether a series record should be kept in the checkpoint +// last is the last WAL segment that was considered for checkpointing. +func (h *Head) keepSeriesInWALCheckpoint(id chunks.HeadSeriesRef, last int) bool { + // Keep the record if the series exists in the head. + if h.series.getByID(id) != nil { + return true + } + + // Keep the record if the series has an expiry set. + keepUntil, ok := h.getWALExpiry(id) + return ok && keepUntil > last +} + // truncateWAL removes old data before mint from the WAL. func (h *Head) truncateWAL(mint int64) error { h.chunkSnapshotMtx.Lock() @@ -1285,17 +1333,8 @@ func (h *Head) truncateWAL(mint int64) error { return nil } - keep := func(id chunks.HeadSeriesRef) bool { - if h.series.getByID(id) != nil { - return true - } - h.deletedMtx.Lock() - keepUntil, ok := h.deleted[id] - h.deletedMtx.Unlock() - return ok && keepUntil > last - } h.metrics.checkpointCreationTotal.Inc() - if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { + if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, h.keepSeriesInWALCheckpoint, mint); err != nil { h.metrics.checkpointCreationFail.Inc() var cerr *chunks.CorruptionErr if errors.As(err, &cerr) { @@ -1310,15 +1349,15 @@ func (h *Head) truncateWAL(mint int64) error { h.logger.Error("truncating segments failed", "err", err) } - // The checkpoint is written and segments before it is truncated, so we no - // longer need to track deleted series that are before it. - h.deletedMtx.Lock() - for ref, segment := range h.deleted { + // The checkpoint is written and segments before it is truncated, so stop + // tracking expired series. + h.walExpiriesMtx.Lock() + for ref, segment := range h.walExpiries { if segment <= last { - delete(h.deleted, ref) + delete(h.walExpiries, ref) } } - h.deletedMtx.Unlock() + h.walExpiriesMtx.Unlock() h.metrics.checkpointDeleteTotal.Inc() if err := wlog.DeleteCheckpoints(h.wal.Dir(), last); err != nil { @@ -1585,7 +1624,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { if h.wal != nil { _, last, _ := wlog.Segments(h.wal.Dir()) - h.deletedMtx.Lock() + h.walExpiriesMtx.Lock() // Keep series records until we're past segment 'last' // because the WAL will still have samples records with // this ref ID. If we didn't keep these series records then @@ -1593,9 +1632,9 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { // that reads the WAL, wouldn't be able to use those // samples since we would have no labels for that ref ID. for ref := range deleted { - h.deleted[chunks.HeadSeriesRef(ref)] = last + h.walExpiries[chunks.HeadSeriesRef(ref)] = last } - h.deletedMtx.Unlock() + h.walExpiriesMtx.Unlock() } return actualInOrderMint, minOOOTime, minMmapFile @@ -1695,7 +1734,7 @@ func (h *Head) String() string { return "head" } -func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, error) { +func (h *Head) getOrCreate(hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) { // Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create // a new series on every sample inserted via Add(), which causes allocations // and makes our series IDs rather random and harder to compress in postings. @@ -1707,17 +1746,17 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e // Optimistically assume that we are the first one to create the series. id := chunks.HeadSeriesRef(h.lastSeriesID.Inc()) - return h.getOrCreateWithID(id, hash, lset) + return h.getOrCreateWithID(id, hash, lset, pendingCommit) } -func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { +func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { shardHash := uint64(0) if h.opts.EnableSharding { shardHash = labels.StableHash(lset) } - return newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled) + return newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled, pendingCommit) }) if err != nil { return nil, false, err @@ -2158,12 +2197,13 @@ type memSeriesOOOFields struct { firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled, pendingCommit bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, - shardHash: shardHash, + lset: lset, + ref: id, + nextAt: math.MinInt64, + shardHash: shardHash, + pendingCommit: pendingCommit, } if !isolationDisabled { s.txs = newTxRing(0) @@ -2267,6 +2307,10 @@ type memChunk struct { // len returns the length of memChunk list, including the element it was called on. func (mc *memChunk) len() (count int) { + if mc.prev == nil { + return 1 + } + elem := mc for elem != nil { count++ @@ -2278,6 +2322,9 @@ func (mc *memChunk) len() (count int) { // oldest returns the oldest element on the list. // For single element list this will be the same memChunk oldest() was called on. func (mc *memChunk) oldest() (elem *memChunk) { + if mc.prev == nil { + return mc + } elem = mc for elem.prev != nil { elem = elem.prev @@ -2290,6 +2337,9 @@ func (mc *memChunk) atOffset(offset int) (elem *memChunk) { if offset == 0 { return mc } + if offset == 1 { + return mc.prev + } if offset < 0 { return nil } @@ -2303,7 +2353,6 @@ func (mc *memChunk) atOffset(offset int) (elem *memChunk) { break } } - return elem } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index c94c42bc53..03800b2455 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -319,7 +319,8 @@ type headAppender struct { headMaxt int64 // We track it here to not take the lock for every sample appended. oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample. - series []record.RefSeries // New series held by this appender. + seriesRefs []record.RefSeries // New series records held by this appender. + series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs) samples []record.RefSample // New float samples held by this appender. sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). histograms []record.RefHistogramSample // New histogram samples held by this appender. @@ -461,15 +462,16 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo if l, dup := lset.HasDuplicateLabelNames(); dup { return nil, false, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) } - s, created, err = a.head.getOrCreate(lset.Hash(), lset) + s, created, err = a.head.getOrCreate(lset.Hash(), lset, true) if err != nil { return nil, false, err } if created { - a.series = append(a.series, record.RefSeries{ + a.seriesRefs = append(a.seriesRefs, record.RefSeries{ Ref: s.ref, Labels: lset, }) + a.series = append(a.series, s) } return s, created, nil } @@ -523,7 +525,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi // appendableHistogram checks whether the given histogram sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. -func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) { +func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { if s.headChunks == nil { @@ -549,9 +551,6 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMax // The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk. if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow { - if !oooHistogramsEnabled { - return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled - } return true, headMaxt - t, nil } @@ -568,7 +567,7 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMax // appendableFloatHistogram checks whether the given float histogram sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. -func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) { +func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { if s.headChunks == nil { @@ -594,9 +593,6 @@ func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogr // The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk. if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow { - if !oooHistogramsEnabled { - return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled - } return true, headMaxt - t, nil } @@ -654,7 +650,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // Fail fast if OOO is disabled and the sample is out of bounds. // Otherwise a full check will be done later to decide if the sample is in-order or out-of-order. - if (a.oooTimeWindow == 0 || !a.head.opts.EnableOOONativeHistograms.Load()) && t < a.minValidTime { + if a.oooTimeWindow == 0 && t < a.minValidTime { a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() return 0, storage.ErrOutOfBounds } @@ -694,7 +690,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err != nil { s.pendingCommit = true } @@ -705,8 +701,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels if err != nil { switch { case errors.Is(err, storage.ErrOutOfOrderSample): - fallthrough - case errors.Is(err, storage.ErrOOONativeHistogramsDisabled): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() case errors.Is(err, storage.ErrTooOldSample): a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() @@ -731,7 +725,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + _, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err == nil { s.pendingCommit = true } @@ -742,8 +736,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels if err != nil { switch { case errors.Is(err, storage.ErrOutOfOrderSample): - fallthrough - case errors.Is(err, storage.ErrOOONativeHistogramsDisabled): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() case errors.Is(err, storage.ErrTooOldSample): a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() @@ -799,9 +791,9 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l s.lastHistogramValue = zeroHistogram } - // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // For CTZeroSamples OOO is not allowed. // We set it to true to make this implementation as close as possible to the float implementation. - isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err != nil { s.Unlock() if errors.Is(err, storage.ErrOutOfOrderSample) { @@ -833,9 +825,8 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l s.lastFloatHistogramValue = zeroFloatHistogram } - // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. // We set it to true to make this implementation as close as possible to the float implementation. - isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) // OOO is not allowed for CTZeroSamples. if err != nil { s.Unlock() if errors.Is(err, storage.ErrOutOfOrderSample) { @@ -918,8 +909,8 @@ func (a *headAppender) log() error { var rec []byte var enc record.Encoder - if len(a.series) > 0 { - rec = enc.Series(a.series, buf) + if len(a.seriesRefs) > 0 { + rec = enc.Series(a.seriesRefs, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { @@ -1256,7 +1247,7 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) { series = a.histogramSeries[i] series.Lock() - oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err != nil { handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } @@ -1344,7 +1335,7 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { series = a.floatHistogramSeries[i] series.Lock() - oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err != nil { handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } @@ -1437,6 +1428,14 @@ func (a *headAppender) commitMetadata() { } } +func (a *headAppender) unmarkCreatedSeriesAsPendingCommit() { + for _, s := range a.series { + s.Lock() + s.pendingCommit = false + s.Unlock() + } +} + // Commit writes to the WAL and adds the data to the Head. // TODO(codesome): Refactor this method to reduce indentation and make it more readable. func (a *headAppender) Commit() (err error) { @@ -1490,6 +1489,8 @@ func (a *headAppender) Commit() (err error) { a.commitHistograms(acc) a.commitFloatHistograms(acc) a.commitMetadata() + // Unmark all series as pending commit after all samples have been committed. + a.unmarkCreatedSeriesAsPendingCommit() a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) @@ -1963,6 +1964,7 @@ func (a *headAppender) Rollback() (err error) { defer a.head.metrics.activeAppenders.Dec() defer a.head.iso.closeAppend(a.appendID) defer a.head.putSeriesBuffer(a.sampleSeries) + defer a.unmarkCreatedSeriesAsPendingCommit() var series *memSeries for i := range a.samples { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go index c73872c12e..45bb2285f0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(_ *slog.Logger) *labels.SymbolTable { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index b95257c28a..f37fd17d60 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -117,15 +117,19 @@ func (h *headIndexReader) PostingsForAllLabelValues(ctx context.Context, name st func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { series := make([]*memSeries, 0, 128) + notFoundSeriesCount := 0 // Fetch all the series only once. for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - h.head.logger.Debug("Looked up series not found") + notFoundSeriesCount++ } else { series = append(series, s) } } + if notFoundSeriesCount > 0 { + h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount) + } if err := p.Err(); err != nil { return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } @@ -150,11 +154,12 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou } out := make([]storage.SeriesRef, 0, 128) + notFoundSeriesCount := 0 for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - h.head.logger.Debug("Looked up series not found") + notFoundSeriesCount++ continue } @@ -165,6 +170,9 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou out = append(out, storage.SeriesRef(s.ref)) } + if notFoundSeriesCount > 0 { + h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount) + } return index.NewListPostings(out) } @@ -481,7 +489,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi // oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk. // It never returns the head OOO chunk. -func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) { +func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, _ *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) { // ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are // incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index. ix := int(id) - int(s.ooo.firstOOOChunkID) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 0afe84a875..926af84603 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -16,6 +16,7 @@ package tsdb import ( "errors" "fmt" + "maps" "math" "os" "path/filepath" @@ -24,6 +25,7 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -39,7 +41,6 @@ import ( "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wlog" - "github.com/prometheus/prometheus/util/zeropool" ) // histogramRecord combines both RefHistogramSample and RefFloatHistogramSample @@ -51,13 +52,39 @@ type histogramRecord struct { fh *histogram.FloatHistogram } -func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { - // Track number of samples that referenced a series we don't know about +type seriesRefSet struct { + refs map[chunks.HeadSeriesRef]struct{} + mtx sync.Mutex +} + +func (s *seriesRefSet) merge(other map[chunks.HeadSeriesRef]struct{}) { + s.mtx.Lock() + defer s.mtx.Unlock() + maps.Copy(s.refs, other) +} + +func (s *seriesRefSet) count() int { + s.mtx.Lock() + defer s.mtx.Unlock() + return len(s.refs) +} + +func counterAddNonZero(v *prometheus.CounterVec, value float64, lvs ...string) { + if value > 0 { + v.WithLabelValues(lvs...).Add(value) + } +} + +func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk, lastSegment int) (err error) { + // Track number of missing series records that were referenced by other records. + unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}} + // Track number of different records that referenced a series we don't know about // for error reporting. - var unknownRefs atomic.Uint64 + var unknownSampleRefs atomic.Uint64 var unknownExemplarRefs atomic.Uint64 var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 + var unknownTombstoneRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -73,14 +100,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch decoded = make(chan interface{}, 10) decodeErr, seriesCreationErr error - - seriesPool zeropool.Pool[[]record.RefSeries] - samplesPool zeropool.Pool[[]record.RefSample] - tstonesPool zeropool.Pool[[]tombstones.Stone] - exemplarsPool zeropool.Pool[[]record.RefExemplar] - histogramsPool zeropool.Pool[[]record.RefHistogramSample] - floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] - metadataPool zeropool.Pool[[]record.RefMetadata] ) defer func() { @@ -100,8 +119,9 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch processors[i].setup() go func(wp *walSubsetProcessor) { - unknown, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks) - unknownRefs.Add(unknown) + missingSeries, unknownSamples, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks) + unknownSeriesRefs.merge(missingSeries) + unknownSampleRefs.Add(unknownSamples) mmapOverlappingChunks.Add(overlapping) unknownHistogramRefs.Add(unknownHistograms) wg.Done() @@ -111,16 +131,14 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch wg.Add(1) exemplarsInput = make(chan record.RefExemplar, 300) go func(input <-chan record.RefExemplar) { + missingSeries := make(map[chunks.HeadSeriesRef]struct{}) var err error defer wg.Done() for e := range input { ms := h.series.getByID(e.Ref) if ms == nil { unknownExemplarRefs.Inc() - continue - } - - if e.T < h.minValidTime.Load() { + missingSeries[e.Ref] = struct{}{} continue } // At the moment the only possible error here is out of order exemplars, which we shouldn't see when @@ -130,6 +148,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err) } } + unknownSeriesRefs.merge(missingSeries) }(exemplarsInput) go func() { @@ -137,11 +156,10 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch var err error dec := record.NewDecoder(syms) for r.Next() { - rec := r.Record() - switch dec.Type(rec) { + switch dec.Type(r.Record()) { case record.Series: - series := seriesPool.Get()[:0] - series, err = dec.Series(rec, series) + series := h.wlReplaySeriesPool.Get()[:0] + series, err = dec.Series(r.Record(), series) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode series: %w", err), @@ -152,8 +170,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- series case record.Samples: - samples := samplesPool.Get()[:0] - samples, err = dec.Samples(rec, samples) + samples := h.wlReplaySamplesPool.Get()[:0] + samples, err = dec.Samples(r.Record(), samples) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode samples: %w", err), @@ -164,8 +182,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- samples case record.Tombstones: - tstones := tstonesPool.Get()[:0] - tstones, err = dec.Tombstones(rec, tstones) + tstones := h.wlReplaytStonesPool.Get()[:0] + tstones, err = dec.Tombstones(r.Record(), tstones) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode tombstones: %w", err), @@ -176,8 +194,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- tstones case record.Exemplars: - exemplars := exemplarsPool.Get()[:0] - exemplars, err = dec.Exemplars(rec, exemplars) + exemplars := h.wlReplayExemplarsPool.Get()[:0] + exemplars, err = dec.Exemplars(r.Record(), exemplars) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode exemplars: %w", err), @@ -188,8 +206,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- exemplars case record.HistogramSamples, record.CustomBucketsHistogramSamples: - hists := histogramsPool.Get()[:0] - hists, err = dec.HistogramSamples(rec, hists) + hists := h.wlReplayHistogramsPool.Get()[:0] + hists, err = dec.HistogramSamples(r.Record(), hists) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode histograms: %w", err), @@ -200,8 +218,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- hists case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: - hists := floatHistogramsPool.Get()[:0] - hists, err = dec.FloatHistogramSamples(rec, hists) + hists := h.wlReplayFloatHistogramsPool.Get()[:0] + hists, err = dec.FloatHistogramSamples(r.Record(), hists) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode float histograms: %w", err), @@ -212,8 +230,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- hists case record.Metadata: - meta := metadataPool.Get()[:0] - meta, err := dec.Metadata(rec, meta) + meta := h.wlReplayMetadataPool.Get()[:0] + meta, err := dec.Metadata(r.Record(), meta) if err != nil { decodeErr = &wlog.CorruptionErr{ Err: fmt.Errorf("decode metadata: %w", err), @@ -230,12 +248,13 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch }() // The records are always replayed from the oldest to the newest. + missingSeries := make(map[chunks.HeadSeriesRef]struct{}) Outer: for d := range decoded { switch v := d.(type) { case []record.RefSeries: for _, walSeries := range v { - mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels) + mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false) if err != nil { seriesCreationErr = err break Outer @@ -246,12 +265,14 @@ Outer: } if !created { multiRef[walSeries.Ref] = mSeries.ref + // Set the WAL expiry for the duplicate series, so it is kept in subsequent WAL checkpoints. + h.setWALExpiry(walSeries.Ref, lastSegment) } idx := uint64(mSeries.ref) % uint64(concurrency) processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries} } - seriesPool.Put(v) + h.wlReplaySeriesPool.Put(v) case []record.RefSample: samples := v minValidTime := h.minValidTime.Load() @@ -287,26 +308,36 @@ Outer: } samples = samples[m:] } - samplesPool.Put(v) + h.wlReplaySamplesPool.Put(v) case []tombstones.Stone: for _, s := range v { for _, itv := range s.Intervals { if itv.Maxt < h.minValidTime.Load() { continue } + if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok { + s.Ref = storage.SeriesRef(r) + } if m := h.series.getByID(chunks.HeadSeriesRef(s.Ref)); m == nil { - unknownRefs.Inc() + unknownTombstoneRefs.Inc() + missingSeries[chunks.HeadSeriesRef(s.Ref)] = struct{}{} continue } h.tombstones.AddInterval(s.Ref, itv) } } - tstonesPool.Put(v) + h.wlReplaytStonesPool.Put(v) case []record.RefExemplar: for _, e := range v { + if e.T < h.minValidTime.Load() { + continue + } + if r, ok := multiRef[e.Ref]; ok { + e.Ref = r + } exemplarsInput <- e } - exemplarsPool.Put(v) + h.wlReplayExemplarsPool.Put(v) case []record.RefHistogramSample: samples := v minValidTime := h.minValidTime.Load() @@ -342,7 +373,7 @@ Outer: } samples = samples[m:] } - histogramsPool.Put(v) + h.wlReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: samples := v minValidTime := h.minValidTime.Load() @@ -378,12 +409,16 @@ Outer: } samples = samples[m:] } - floatHistogramsPool.Put(v) + h.wlReplayFloatHistogramsPool.Put(v) case []record.RefMetadata: for _, m := range v { + if r, ok := multiRef[m.Ref]; ok { + m.Ref = r + } s := h.series.getByID(m.Ref) if s == nil { unknownMetadataRefs.Inc() + missingSeries[m.Ref] = struct{}{} continue } s.meta = &metadata.Metadata{ @@ -392,11 +427,12 @@ Outer: Help: m.Help, } } - metadataPool.Put(v) + h.wlReplayMetadataPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } } + unknownSeriesRefs.merge(missingSeries) if decodeErr != nil { return decodeErr @@ -419,14 +455,23 @@ Outer: return fmt.Errorf("read records: %w", err) } - if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { + if unknownSampleRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load()+unknownTombstoneRefs.Load() > 0 { h.logger.Warn( "Unknown series references", - "samples", unknownRefs.Load(), + "series", unknownSeriesRefs.count(), + "samples", unknownSampleRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load(), "metadata", unknownMetadataRefs.Load(), + "tombstones", unknownTombstoneRefs.Load(), ) + + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownExemplarRefs.Load()), "exemplars") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownMetadataRefs.Load()), "metadata") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownTombstoneRefs.Load()), "tombstones") } if count := mmapOverlappingChunks.Load(); count > 0 { h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) @@ -556,10 +601,13 @@ func (wp *walSubsetProcessor) reuseHistogramBuf() []histogramRecord { // processWALSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. // Samples before the minValidTime timestamp are discarded. -func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, unknownHistogramRefs, mmapOverlappingChunks uint64) { +func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (map[chunks.HeadSeriesRef]struct{}, uint64, uint64, uint64) { defer close(wp.output) defer close(wp.histogramsOutput) + missingSeries := make(map[chunks.HeadSeriesRef]struct{}) + var unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks uint64 + minValidTime := h.minValidTime.Load() mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) appendChunkOpts := chunkOpts{ @@ -581,7 +629,8 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp for _, s := range in.samples { ms := h.series.getByID(s.Ref) if ms == nil { - unknownRefs++ + unknownSampleRefs++ + missingSeries[s.Ref] = struct{}{} continue } if s.T <= ms.mmMaxTime { @@ -611,6 +660,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp ms := h.series.getByID(s.ref) if ms == nil { unknownHistogramRefs++ + missingSeries[s.ref] = struct{}{} continue } if s.t <= ms.mmMaxTime { @@ -641,13 +691,15 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp } h.updateMinMaxTime(mint, maxt) - return unknownRefs, unknownHistogramRefs, mmapOverlappingChunks + return missingSeries, unknownSampleRefs, unknownHistogramRefs, mmapOverlappingChunks } func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { - // Track number of samples, histogram samples, m-map markers, that referenced a series we don't know about + // Track number of missing series records that were referenced by other records. + unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}} + // Track number of samples, histogram samples, and m-map markers that referenced a series we don't know about // for error reporting. - var unknownRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64 + var unknownSampleRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64 lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. @@ -659,12 +711,8 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decodedCh = make(chan interface{}, 10) - decodeErr error - samplesPool zeropool.Pool[[]record.RefSample] - markersPool zeropool.Pool[[]record.RefMmapMarker] - histogramSamplesPool zeropool.Pool[[]record.RefHistogramSample] - floatHistogramSamplesPool zeropool.Pool[[]record.RefFloatHistogramSample] + decodedCh = make(chan interface{}, 10) + decodeErr error ) defer func() { @@ -685,8 +733,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch processors[i].setup() go func(wp *wblSubsetProcessor) { - unknown, unknownHistograms := wp.processWBLSamples(h) - unknownRefs.Add(unknown) + missingSeries, unknownSamples, unknownHistograms := wp.processWBLSamples(h) + unknownSeriesRefs.merge(missingSeries) + unknownSampleRefs.Add(unknownSamples) unknownHistogramRefs.Add(unknownHistograms) wg.Done() }(&processors[i]) @@ -700,7 +749,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch rec := r.Record() switch dec.Type(rec) { case record.Samples: - samples := samplesPool.Get()[:0] + samples := h.wlReplaySamplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -712,7 +761,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- samples case record.MmapMarkers: - markers := markersPool.Get()[:0] + markers := h.wlReplayMmapMarkersPool.Get()[:0] markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -724,7 +773,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- markers case record.HistogramSamples, record.CustomBucketsHistogramSamples: - hists := histogramSamplesPool.Get()[:0] + hists := h.wlReplayHistogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -736,7 +785,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- hists case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: - hists := floatHistogramSamplesPool.Get()[:0] + hists := h.wlReplayFloatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -754,6 +803,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch }() // The records are always replayed from the oldest to the newest. + missingSeries := make(map[chunks.HeadSeriesRef]struct{}) for d := range decodedCh { switch v := d.(type) { case []record.RefSample: @@ -787,7 +837,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - samplesPool.Put(v) + h.wlReplaySamplesPool.Put(v) case []record.RefMmapMarker: markers := v for _, rm := range markers { @@ -806,6 +856,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch ms := h.series.getByID(rm.Ref) if ms == nil { mmapMarkerUnknownRefs.Inc() + missingSeries[rm.Ref] = struct{}{} continue } idx := uint64(ms.ref) % uint64(concurrency) @@ -842,7 +893,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - histogramSamplesPool.Put(v) + h.wlReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: samples := v // We split up the samples into chunks of 5000 samples or less. @@ -874,11 +925,12 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - floatHistogramSamplesPool.Put(v) + h.wlReplayFloatHistogramsPool.Put(v) default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) } } + unknownSeriesRefs.merge(missingSeries) if decodeErr != nil { return decodeErr @@ -894,9 +946,21 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return fmt.Errorf("read records: %w", err) } - if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { - h.logger.Warn("Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + if unknownSampleRefs.Load()+unknownHistogramRefs.Load()+mmapMarkerUnknownRefs.Load() > 0 { + h.logger.Warn( + "Unknown series references for ooo WAL replay", + "series", unknownSeriesRefs.count(), + "samples", unknownSampleRefs.Load(), + "histograms", unknownHistogramRefs.Load(), + "mmap_markers", mmapMarkerUnknownRefs.Load(), + ) + + counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSeriesRefs.count()), "series") + counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownSampleRefs.Load()), "samples") + counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms") + counterAddNonZero(h.metrics.wblReplayUnknownRefsTotal, float64(mmapMarkerUnknownRefs.Load()), "mmap_markers") } + return nil } @@ -964,10 +1028,13 @@ func (wp *wblSubsetProcessor) reuseHistogramBuf() []histogramRecord { // processWBLSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. -func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHistogramRefs uint64) { +func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (map[chunks.HeadSeriesRef]struct{}, uint64, uint64) { defer close(wp.output) defer close(wp.histogramsOutput) + missingSeries := make(map[chunks.HeadSeriesRef]struct{}) + var unknownSampleRefs, unknownHistogramRefs uint64 + oooCapMax := h.opts.OutOfOrderCapMax.Load() // We don't check for minValidTime for ooo samples. mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) @@ -984,7 +1051,8 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHi for _, s := range in.samples { ms := h.series.getByID(s.Ref) if ms == nil { - unknownRefs++ + unknownSampleRefs++ + missingSeries[s.Ref] = struct{}{} continue } ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger) @@ -1009,6 +1077,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHi ms := h.series.getByID(s.ref) if ms == nil { unknownHistogramRefs++ + missingSeries[s.ref] = struct{}{} continue } var chunkCreated bool @@ -1039,7 +1108,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHi h.updateMinOOOMaxOOOTime(mint, maxt) - return unknownRefs, unknownHistogramRefs + return missingSeries, unknownSampleRefs, unknownHistogramRefs } const ( @@ -1508,7 +1577,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie localRefSeries := shardedRefSeries[idx] for csr := range rc { - series, _, err := h.getOrCreateWithID(csr.ref, csr.lset.Hash(), csr.lset) + series, _, err := h.getOrCreateWithID(csr.ref, csr.lset.Hash(), csr.lset, false) if err != nil { errChan <- err return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 911b1a6ecc..42ecd7245d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -1473,7 +1473,7 @@ func (r *Reader) Close() error { return r.c.Close() } -func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) { +func (r *Reader) lookupSymbol(_ context.Context, o uint32) (string, error) { if s, ok := r.nameSymbols[o]; ok { return s, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go index 86330f36e4..1035991e74 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go @@ -275,12 +275,11 @@ func (txr *txRing) cleanupAppendIDsBelow(bound uint64) { pos := int(txr.txIDFirst) for txr.txIDCount > 0 { - if txr.txIDs[pos] < bound { - txr.txIDFirst++ - txr.txIDCount-- - } else { + if txr.txIDs[pos] >= bound { break } + txr.txIDFirst++ + txr.txIDCount-- pos++ if pos == len(txr.txIDs) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go index 0ed9f36484..a3d6b3567b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -72,7 +72,7 @@ func (o *OOOChunk) NumSamples() int { // ToEncodedChunks returns chunks with the samples in the OOOChunk. // -//nolint:revive // unexported-return. +//nolint:revive func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) { if len(o.samples) == 0 { return nil, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index 2a1a44d18e..5eb63edfd5 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -20,7 +20,7 @@ import ( "math" "slices" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -484,15 +484,15 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } -func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { +func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, _ bool, _ ...*labels.Matcher) (index.Postings, error) { return nil, errors.New("not implemented") } @@ -504,7 +504,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index ce99df6a13..5d9801f2b8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -20,7 +20,7 @@ import ( "math" "slices" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er }, nil } -func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.SortedLabelValues(ctx, name, matchers...) return res, nil, err } -func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.LabelNames(ctx, matchers...) return res, nil, err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 4d2a52b9af..692976cdf8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -204,7 +204,7 @@ type Decoder struct { builder labels.ScratchBuilder } -func NewDecoder(t *labels.SymbolTable) Decoder { // FIXME remove t +func NewDecoder(_ *labels.SymbolTable) Decoder { // FIXME remove t return Decoder{builder: labels.NewScratchBuilder(0)} } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go index e957b0307b..4dac8c29ff 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go @@ -16,16 +16,14 @@ package tsdb import ( "testing" - "github.com/prometheus/prometheus/tsdb/tsdbutil" - prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) const ( diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go index 7228dbafed..5a335989c7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go @@ -60,11 +60,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat for _, c := range cases { t.Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "test") - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, os.RemoveAll(tmpdir)) - }) + tmpdir := t.TempDir() // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { @@ -82,7 +78,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Check that the lockfile is always deleted if !c.lockFileDisabled { - _, err = os.Stat(locker.path) + _, err := os.Stat(locker.path) require.True(t, os.IsNotExist(err), "lockfile was not deleted") } }) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index 5c607d7030..2c1b0c0534 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -93,7 +93,7 @@ const CheckpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef, last int) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser @@ -181,7 +181,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He // Drop irrelevant series in place. repl := series[:0] for _, s := range series { - if keep(s.Ref) { + if keep(s.Ref, to) { repl = append(repl, s) } } @@ -323,7 +323,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He // Only keep reference to the latest found metadata for each refID. repl := 0 for _, m := range metadata { - if keep(m.Ref) { + if keep(m.Ref, to) { if _, ok := latestMetadataMap[m.Ref]; !ok { repl++ } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go index a017d362d1..04f24387bf 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go @@ -22,9 +22,9 @@ import ( "io" "log/slog" - "github.com/golang/snappy" - "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/util/compression" ) // LiveReaderMetrics holds all metrics exposed by the LiveReader. @@ -51,14 +51,11 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { // NewLiveReader returns a new live reader. func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { - // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. - zstdReader, _ := zstd.NewReader(nil) - lr := &LiveReader{ - logger: logger, - rdr: r, - zstdReader: zstdReader, - metrics: metrics, + logger: logger, + rdr: r, + decBuf: compression.NewSyncDecodeBuffer(), + metrics: metrics, // Until we understand how they come about, make readers permissive // to records spanning pages. @@ -72,12 +69,13 @@ func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger *slog.Logger - rdr io.Reader - err error - rec []byte - compressBuf []byte - zstdReader *zstd.Decoder + logger *slog.Logger + rdr io.Reader + err error + rec []byte + + precomprBuf []byte + decBuf compression.DecodeBuffer hdr [recordHeaderSize]byte buf [pageSize]byte readIndex int // Index in buf to start at for next read. @@ -195,39 +193,29 @@ func (r *LiveReader) buildRecord() (bool, error) { rt := recTypeFromHeader(r.hdr[0]) if rt == recFirst || rt == recFull { - r.rec = r.rec[:0] - r.compressBuf = r.compressBuf[:0] + r.precomprBuf = r.precomprBuf[:0] } - isSnappyCompressed := r.hdr[0]&snappyMask == snappyMask - isZstdCompressed := r.hdr[0]&zstdMask == zstdMask - - if isSnappyCompressed || isZstdCompressed { - r.compressBuf = append(r.compressBuf, temp...) - } else { - r.rec = append(r.rec, temp...) + // Segment format has only 2 bits, so it's either of those 3 options. + // https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/wal.md#records-encoding + compr := compression.None + if r.hdr[0]&snappyMask == snappyMask { + compr = compression.Snappy + } else if r.hdr[0]&zstdMask == zstdMask { + compr = compression.Zstd } + r.precomprBuf = append(r.precomprBuf, temp...) + if err := validateRecord(rt, r.index); err != nil { r.index = 0 return false, err } if rt == recLast || rt == recFull { r.index = 0 - if isSnappyCompressed && len(r.compressBuf) > 0 { - // The snappy library uses `len` to calculate if we need a new buffer. - // In order to allocate as few buffers as possible make the length - // equal to the capacity. - r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.compressBuf) - if err != nil { - return false, err - } - } else if isZstdCompressed && len(r.compressBuf) > 0 { - r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) - if err != nil { - return false, err - } + r.rec, err = compression.Decode(compr, r.precomprBuf, r.decBuf) + if err != nil { + return false, err } return true, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go index a744b0cc4b..c559d85b89 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go @@ -21,17 +21,17 @@ import ( "hash/crc32" "io" - "github.com/golang/snappy" - "github.com/klauspost/compress/zstd" + "github.com/prometheus/prometheus/util/compression" ) // Reader reads WAL records from an io.Reader. type Reader struct { - rdr io.Reader - err error - rec []byte - compressBuf []byte - zstdReader *zstd.Decoder + rdr io.Reader + err error + rec []byte + + precomprBuf []byte + decBuf compression.DecodeBuffer buf [pageSize]byte total int64 // Total bytes processed. curRecTyp recType // Used for checking that the last record is not torn. @@ -39,15 +39,13 @@ type Reader struct { // NewReader returns a new reader. func NewReader(r io.Reader) *Reader { - // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. - zstdReader, _ := zstd.NewReader(nil) - return &Reader{rdr: r, zstdReader: zstdReader} + return &Reader{rdr: r, decBuf: compression.NewSyncDecodeBuffer()} } // Next advances the reader to the next records and returns true if it exists. // It must not be called again after it returned false. func (r *Reader) Next() bool { - err := r.next() + err := r.nextNew() if err != nil && errors.Is(err, io.EOF) { // The last WAL segment record shouldn't be torn(should be full or last). // The last record would be torn after a crash just before @@ -61,14 +59,13 @@ func (r *Reader) Next() bool { return r.err == nil } -func (r *Reader) next() (err error) { +func (r *Reader) nextNew() (err error) { // We have to use r.buf since allocating byte arrays here fails escape // analysis and ends up on the heap, even though it seemingly should not. hdr := r.buf[:recordHeaderSize] buf := r.buf[recordHeaderSize:] - r.rec = r.rec[:0] - r.compressBuf = r.compressBuf[:0] + r.precomprBuf = r.precomprBuf[:0] i := 0 for { @@ -77,8 +74,13 @@ func (r *Reader) next() (err error) { } r.total++ r.curRecTyp = recTypeFromHeader(hdr[0]) - isSnappyCompressed := hdr[0]&snappyMask == snappyMask - isZstdCompressed := hdr[0]&zstdMask == zstdMask + + compr := compression.None + if hdr[0]&snappyMask == snappyMask { + compr = compression.Snappy + } else if hdr[0]&zstdMask == zstdMask { + compr = compression.Zstd + } // Gobble up zero bytes. if r.curRecTyp == recPageTerm { @@ -133,29 +135,14 @@ func (r *Reader) next() (err error) { if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc { return fmt.Errorf("unexpected checksum %x, expected %x", c, crc) } - - if isSnappyCompressed || isZstdCompressed { - r.compressBuf = append(r.compressBuf, buf[:length]...) - } else { - r.rec = append(r.rec, buf[:length]...) - } - if err := validateRecord(r.curRecTyp, i); err != nil { return err } + + r.precomprBuf = append(r.precomprBuf, buf[:length]...) if r.curRecTyp == recLast || r.curRecTyp == recFull { - if isSnappyCompressed && len(r.compressBuf) > 0 { - // The snappy library uses `len` to calculate if we need a new buffer. - // In order to allocate as few buffers as possible make the length - // equal to the capacity. - r.rec = r.rec[:cap(r.rec)] - r.rec, err = snappy.Decode(r.rec, r.compressBuf) - return err - } else if isZstdCompressed && len(r.compressBuf) > 0 { - r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0]) - return err - } - return nil + r.rec, err = compression.Decode(compr, r.precomprBuf, r.decBuf) + return err } // Only increment i for non-zero records since we use it diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index ca74a9ceaf..f171a8bdc1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -491,12 +491,13 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { metadata []record.RefMetadata ) for r.Next() && !isClosed(w.quit) { + var err error rec := r.Record() w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc() switch dec.Type(rec) { case record.Series: - series, err := dec.Series(rec, series[:0]) + series, err = dec.Series(rec, series[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err @@ -509,7 +510,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !tail { break } - samples, err := dec.Samples(rec, samples[:0]) + samples, err = dec.Samples(rec, samples[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err @@ -539,7 +540,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !tail { break } - exemplars, err := dec.Exemplars(rec, exemplars[:0]) + exemplars, err = dec.Exemplars(rec, exemplars[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err @@ -554,7 +555,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !tail { break } - histograms, err := dec.HistogramSamples(rec, histograms[:0]) + histograms, err = dec.HistogramSamples(rec, histograms[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err @@ -582,7 +583,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !tail { break } - floatHistograms, err := dec.FloatHistogramSamples(rec, floatHistograms[:0]) + floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err @@ -606,12 +607,12 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendMetadata { break } - meta, err := dec.Metadata(rec, metadata[:0]) + metadata, err = dec.Metadata(rec, metadata[:0]) if err != nil { w.recordDecodeFailsMetric.Inc() return err } - w.writer.StoreMetadata(meta) + w.writer.StoreMetadata(metadata) case record.Unknown: // Could be corruption, or reading from a WAL from a newer Prometheus. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index 54c257d61a..dec41ad2c7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -29,12 +29,12 @@ import ( "sync" "time" - "github.com/golang/snappy" - "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/util/compression" ) const ( @@ -169,26 +169,6 @@ func OpenReadSegment(fn string) (*Segment, error) { return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil } -type CompressionType string - -const ( - CompressionNone CompressionType = "none" - CompressionSnappy CompressionType = "snappy" - CompressionZstd CompressionType = "zstd" -) - -// ParseCompressionType parses the two compression-related configuration values and returns the CompressionType. If -// compression is enabled but the compressType is unrecognized, we default to Snappy compression. -func ParseCompressionType(compress bool, compressType string) CompressionType { - if compress { - if compressType == "zstd" { - return CompressionZstd - } - return CompressionSnappy - } - return CompressionNone -} - // WL is a write log that stores records in segment files. // It must be read from start to end once before logging new data. // If an error occurs during read, the repair procedure must be called @@ -210,9 +190,8 @@ type WL struct { stopc chan chan struct{} actorc chan func() closed bool // To allow calling Close() more than once without blocking. - compress CompressionType - compressBuf []byte - zstdWriter *zstd.Encoder + compress compression.Type + cEnc compression.EncodeBuffer WriteNotified WriteNotified @@ -220,14 +199,17 @@ type WL struct { } type wlMetrics struct { - fsyncDuration prometheus.Summary - pageFlushes prometheus.Counter - pageCompletions prometheus.Counter - truncateFail prometheus.Counter - truncateTotal prometheus.Counter - currentSegment prometheus.Gauge - writesFailed prometheus.Counter - walFileSize prometheus.GaugeFunc + fsyncDuration prometheus.Summary + pageFlushes prometheus.Counter + pageCompletions prometheus.Counter + truncateFail prometheus.Counter + truncateTotal prometheus.Counter + currentSegment prometheus.Gauge + writesFailed prometheus.Counter + walFileSize prometheus.GaugeFunc + recordPartWrites prometheus.Counter + recordPartBytes prometheus.Counter + recordBytesSaved *prometheus.CounterVec r prometheus.Registerer } @@ -244,78 +226,78 @@ func (w *wlMetrics) Unregister() { w.r.Unregister(w.currentSegment) w.r.Unregister(w.writesFailed) w.r.Unregister(w.walFileSize) + w.r.Unregister(w.recordPartWrites) + w.r.Unregister(w.recordPartBytes) + w.r.Unregister(w.recordBytesSaved) } func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { - m := &wlMetrics{ + return &wlMetrics{ r: r, + fsyncDuration: promauto.With(r).NewSummary(prometheus.SummaryOpts{ + Name: "fsync_duration_seconds", + Help: "Duration of write log fsync.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + pageFlushes: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "page_flushes_total", + Help: "Total number of page flushes.", + }), + pageCompletions: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "completed_pages_total", + Help: "Total number of completed pages.", + }), + truncateFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "truncations_failed_total", + Help: "Total number of write log truncations that failed.", + }), + truncateTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "truncations_total", + Help: "Total number of write log truncations attempted.", + }), + currentSegment: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "segment_current", + Help: "Write log segment index that TSDB is currently writing to.", + }), + writesFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "writes_failed_total", + Help: "Total number of write log writes that failed.", + }), + walFileSize: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "storage_size_bytes", + Help: "Size of the write log directory.", + }, func() float64 { + val, err := w.Size() + if err != nil { + w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error()) + } + return float64(val) + }), + recordPartWrites: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "record_part_writes_total", + Help: "Total number of record parts written before flushing.", + }), + recordPartBytes: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "record_parts_bytes_written_total", + Help: "Total number of record part bytes written before flushing, including" + + " CRC and compression headers.", + }), + recordBytesSaved: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "record_bytes_saved_total", + Help: "Total number of bytes saved by the optional record compression." + + " Use this metric to learn about the effectiveness compression.", + }, []string{"compression"}), } - - m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "fsync_duration_seconds", - Help: "Duration of write log fsync.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }) - m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "page_flushes_total", - Help: "Total number of page flushes.", - }) - m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "completed_pages_total", - Help: "Total number of completed pages.", - }) - m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "truncations_failed_total", - Help: "Total number of write log truncations that failed.", - }) - m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "truncations_total", - Help: "Total number of write log truncations attempted.", - }) - m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "segment_current", - Help: "Write log segment index that TSDB is currently writing to.", - }) - m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "writes_failed_total", - Help: "Total number of write log writes that failed.", - }) - m.walFileSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Name: "storage_size_bytes", - Help: "Size of the write log directory.", - }, func() float64 { - val, err := w.Size() - if err != nil { - w.logger.Error("Failed to calculate size of \"wal\" dir", - "err", err.Error()) - } - return float64(val) - }) - - if r != nil { - r.MustRegister( - m.fsyncDuration, - m.pageFlushes, - m.pageCompletions, - m.truncateFail, - m.truncateTotal, - m.currentSegment, - m.writesFailed, - m.walFileSize, - ) - } - - return m } // New returns a new WAL over the given directory. -func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { +func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress compression.Type) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { +func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress compression.Type) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -326,15 +308,6 @@ func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segment logger = promslog.NewNopLogger() } - var zstdWriter *zstd.Encoder - if compress == CompressionZstd { - var err error - zstdWriter, err = zstd.NewWriter(nil) - if err != nil { - return nil, err - } - } - w := &WL{ dir: dir, logger: logger, @@ -343,7 +316,7 @@ func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segment actorc: make(chan func(), 100), stopc: make(chan chan struct{}), compress: compress, - zstdWriter: zstdWriter, + cEnc: compression.NewSyncEncodeBuffer(), } prefix := "prometheus_tsdb_wal_" if filepath.Base(dir) == WblDirName { @@ -382,22 +355,16 @@ func Open(logger *slog.Logger, dir string) (*WL, error) { if logger == nil { logger = promslog.NewNopLogger() } - zstdWriter, err := zstd.NewWriter(nil) - if err != nil { - return nil, err - } w := &WL{ - dir: dir, - logger: logger, - zstdWriter: zstdWriter, + dir: dir, + logger: logger, } - return w, nil } // CompressionType returns if compression is enabled on this WAL. -func (w *WL) CompressionType() CompressionType { +func (w *WL) CompressionType() compression.Type { return w.compress } @@ -715,26 +682,23 @@ func (w *WL) log(rec []byte, final bool) error { } // Compress the record before calculating if a new segment is needed. - compressed := false - if w.compress == CompressionSnappy && len(rec) > 0 { - // If MaxEncodedLen is less than 0 the record is too large to be compressed. - if len(rec) > 0 && snappy.MaxEncodedLen(len(rec)) >= 0 { - // The snappy library uses `len` to calculate if we need a new buffer. - // In order to allocate as few buffers as possible make the length - // equal to the capacity. - w.compressBuf = w.compressBuf[:cap(w.compressBuf)] - w.compressBuf = snappy.Encode(w.compressBuf, rec) - if len(w.compressBuf) < len(rec) { - rec = w.compressBuf - compressed = true - } - } - } else if w.compress == CompressionZstd && len(rec) > 0 { - w.compressBuf = w.zstdWriter.EncodeAll(rec, w.compressBuf[:0]) - if len(w.compressBuf) < len(rec) { - rec = w.compressBuf - compressed = true + finalCompression := w.compress + enc, err := compression.Encode(w.compress, rec, w.cEnc) + if err != nil { + return err + } + if w.compress != compression.None { + savedBytes := len(rec) - len(enc) + + // Even if the compression was applied, skip it, if there's no benefit + // in the WAL record size (we have a choice). For small records e.g. snappy + // compression can yield larger records than the uncompressed. + if savedBytes <= 0 { + enc = rec + finalCompression = compression.None + savedBytes = 0 } + w.metrics.recordBytesSaved.WithLabelValues(w.compress).Add(float64(savedBytes)) } // If the record is too big to fit within the active page in the current @@ -743,7 +707,7 @@ func (w *WL) log(rec []byte, final bool) error { left := w.page.remaining() - recordHeaderSize // Free space in the active page. left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. - if len(rec) > left { + if len(enc) > left { if _, err := w.nextSegment(true); err != nil { return err } @@ -751,32 +715,36 @@ func (w *WL) log(rec []byte, final bool) error { // Populate as many pages as necessary to fit the record. // Be careful to always do one pass to ensure we write zero-length records. - for i := 0; i == 0 || len(rec) > 0; i++ { + for i := 0; i == 0 || len(enc) > 0; i++ { p := w.page // Find how much of the record we can fit into the page. var ( - l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize) - part = rec[:l] + l = min(len(enc), (pageSize-p.alloc)-recordHeaderSize) + part = enc[:l] buf = p.buf[p.alloc:] typ recType ) switch { - case i == 0 && len(part) == len(rec): + case i == 0 && len(part) == len(enc): typ = recFull - case len(part) == len(rec): + case len(part) == len(enc): typ = recLast case i == 0: typ = recFirst default: typ = recMiddle } - if compressed { - if w.compress == CompressionSnappy { + + if finalCompression != compression.None { + switch finalCompression { + case compression.Snappy: typ |= snappyMask - } else if w.compress == CompressionZstd { + case compression.Zstd: typ |= zstdMask + default: + return fmt.Errorf("unsupported compression type: %v", finalCompression) } } @@ -788,6 +756,9 @@ func (w *WL) log(rec []byte, final bool) error { copy(buf[recordHeaderSize:], part) p.alloc += len(part) + recordHeaderSize + w.metrics.recordPartWrites.Inc() + w.metrics.recordPartBytes.Add(float64(len(part) + recordHeaderSize)) + if w.page.full() { if err := w.flushPage(true); err != nil { // TODO When the flushing fails at this point and the record has not been @@ -796,7 +767,7 @@ func (w *WL) log(rec []byte, final bool) error { return err } } - rec = rec[l:] + enc = enc[l:] } // If it's the final record of the batch and the page is not empty, flush it. diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 5b2fde152b..95783957a7 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -143,6 +143,7 @@ var ( NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning) IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning) + IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) @@ -295,9 +296,20 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit } } +// NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored +// in a range vector which contains mix of floats and histograms. func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), } } + +// NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a +// combination of two incompatible histograms. +func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator), + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/compression/buffers.go b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go new file mode 100644 index 0000000000..765bc64c0b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go @@ -0,0 +1,142 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compression + +import ( + "sync" + + "github.com/klauspost/compress/zstd" +) + +type EncodeBuffer interface { + zstdEncBuf() *zstd.Encoder + get() []byte + set([]byte) +} + +type syncEBuffer struct { + onceZstd sync.Once + w *zstd.Encoder + buf []byte +} + +// NewSyncEncodeBuffer returns synchronous buffer that can only be used +// on one encoding goroutine at once. Notably, the encoded byte slice returned +// by Encode is valid only until the next Encode call. +func NewSyncEncodeBuffer() EncodeBuffer { + return &syncEBuffer{} +} + +func (b *syncEBuffer) zstdEncBuf() *zstd.Encoder { + b.onceZstd.Do(func() { + // Without params this never returns error. + b.w, _ = zstd.NewWriter(nil) + }) + return b.w +} + +func (b *syncEBuffer) get() []byte { + return b.buf +} + +func (b *syncEBuffer) set(buf []byte) { + b.buf = buf +} + +type concurrentEBuffer struct { + onceZstd sync.Once + w *zstd.Encoder +} + +// NewConcurrentEncodeBuffer returns a buffer that can be used concurrently. +// NOTE: For Zstd compression, a concurrency limit equal to GOMAXPROCS is implied. +func NewConcurrentEncodeBuffer() EncodeBuffer { + return &concurrentEBuffer{} +} + +func (b *concurrentEBuffer) zstdEncBuf() *zstd.Encoder { + b.onceZstd.Do(func() { + // Without params this never returns error. + b.w, _ = zstd.NewWriter(nil) + }) + return b.w +} + +// TODO(bwplotka): We could use pool, but putting it back into the pool needs to be +// on the caller side, so no pool for now. +func (b *concurrentEBuffer) get() []byte { + return nil +} + +func (b *concurrentEBuffer) set([]byte) {} + +type DecodeBuffer interface { + zstdDecBuf() *zstd.Decoder + get() []byte + set([]byte) +} + +type syncDBuffer struct { + onceZstd sync.Once + r *zstd.Decoder + buf []byte +} + +// NewSyncDecodeBuffer returns synchronous buffer that can only be used +// on one decoding goroutine at once. Notably, the decoded byte slice returned +// by Decode is valid only until the next Decode call. +func NewSyncDecodeBuffer() DecodeBuffer { + return &syncDBuffer{} +} + +func (b *syncDBuffer) zstdDecBuf() *zstd.Decoder { + b.onceZstd.Do(func() { + // Without params this never returns error. + b.r, _ = zstd.NewReader(nil) + }) + return b.r +} + +func (b *syncDBuffer) get() []byte { + return b.buf +} + +func (b *syncDBuffer) set(buf []byte) { + b.buf = buf +} + +type concurrentDBuffer struct { + onceZstd sync.Once + r *zstd.Decoder +} + +// NewConcurrentDecodeBuffer returns a buffer that can be used concurrently. +// NOTE: For Zstd compression a concurrency limit, equal to GOMAXPROCS is implied. +func NewConcurrentDecodeBuffer() DecodeBuffer { + return &concurrentDBuffer{} +} + +func (b *concurrentDBuffer) zstdDecBuf() *zstd.Decoder { + b.onceZstd.Do(func() { + // Without params this never returns error. + b.r, _ = zstd.NewReader(nil) + }) + return b.r +} + +func (b *concurrentDBuffer) get() []byte { + return nil +} + +func (b *concurrentDBuffer) set([]byte) {} diff --git a/vendor/github.com/prometheus/prometheus/util/compression/compression.go b/vendor/github.com/prometheus/prometheus/util/compression/compression.go new file mode 100644 index 0000000000..a1e9b7e530 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/compression/compression.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compression + +import ( + "errors" + "fmt" + + "github.com/golang/snappy" +) + +// Type represents a valid compression type supported by this package. +type Type = string + +const ( + // None represents no compression case. + // None is the default when Type is empty. + None Type = "none" + // Snappy represents snappy block format. + Snappy Type = "snappy" + // Zstd represents "speed" mode of Zstd (Zstandard https://facebook.github.io/zstd/). + // This is roughly equivalent to the default Zstandard mode (level 3). + Zstd Type = "zstd" +) + +func Types() []Type { return []Type{None, Snappy, Zstd} } + +// Encode returns the encoded form of src for the given compression type. +// For None or empty message the encoding is not attempted. +// +// The buf allows passing various buffer implementations that make encoding more +// efficient. See NewSyncEncodeBuffer and NewConcurrentEncodeBuffer for further +// details. For non-zstd compression types, it is valid to pass nil buf. +// +// Encode is concurrency-safe, however note the concurrency limits for the +// buffer of your choice. +func Encode(t Type, src []byte, buf EncodeBuffer) (ret []byte, err error) { + if len(src) == 0 || t == "" || t == None { + return src, nil + } + if t == Snappy { + // If MaxEncodedLen is less than 0 the record is too large to be compressed. + if snappy.MaxEncodedLen(len(src)) < 0 { + return src, fmt.Errorf("compression: Snappy can't encode such a large message: %v", len(src)) + } + var b []byte + if buf != nil { + b = buf.get() + defer func() { + buf.set(ret) + }() + } + + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + b = b[:cap(b)] + return snappy.Encode(b, src), nil + } + if t == Zstd { + if buf == nil { + return nil, errors.New("zstd requested but EncodeBuffer was not provided") + } + b := buf.get() + defer func() { + buf.set(ret) + }() + + return buf.zstdEncBuf().EncodeAll(src, b[:0]), nil + } + return nil, fmt.Errorf("unsupported compression type: %s", t) +} + +// Decode returns the decoded form of src for the given compression type. +// +// The buf allows passing various buffer implementations that make decoding more +// efficient. See NewSyncDecodeBuffer and NewConcurrentDecodeBuffer for further +// details. For non-zstd compression types, it is valid to pass nil buf. +// +// Decode is concurrency-safe, however note the concurrency limits for the +// buffer of your choice. +func Decode(t Type, src []byte, buf DecodeBuffer) (ret []byte, err error) { + if len(src) == 0 || t == "" || t == None { + return src, nil + } + if t == Snappy { + var b []byte + if buf != nil { + b = buf.get() + defer func() { + buf.set(ret) + }() + } + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + b = b[:cap(b)] + return snappy.Decode(b, src) + } + if t == Zstd { + if buf == nil { + return nil, errors.New("zstd requested but DecodeBuffer was not provided") + } + b := buf.get() + defer func() { + buf.set(ret) + }() + return buf.zstdDecBuf().DecodeAll(src, b[:0]) + } + return nil, fmt.Errorf("unsupported compression type: %s", t) +} diff --git a/vendor/github.com/prometheus/prometheus/util/httputil/compression.go b/vendor/github.com/prometheus/prometheus/util/httputil/compression.go index 9a8a666453..d5bedb7fa9 100644 --- a/vendor/github.com/prometheus/prometheus/util/httputil/compression.go +++ b/vendor/github.com/prometheus/prometheus/util/httputil/compression.go @@ -56,8 +56,13 @@ func (c *compressedResponseWriter) Close() { // Constructs a new compressedResponseWriter based on client request headers. func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { + raw := req.Header.Get(acceptEncodingHeader) + var ( + encoding string + commaFound bool + ) + for { + encoding, raw, commaFound = strings.Cut(raw, ",") switch strings.TrimSpace(encoding) { case gzipEncoding: writer.Header().Set(contentEncodingHeader, gzipEncoding) @@ -72,6 +77,9 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) writer: zlib.NewWriter(writer), } } + if !commaFound { + break + } } return &compressedResponseWriter{ ResponseWriter: writer, diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go index 27fdec2758..3f97b17f09 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/file.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -45,7 +45,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { return nil, fmt.Errorf("can't create json log file: %w", err) } - jsonFmt := &promslog.AllowedFormat{} + jsonFmt := promslog.NewFormat() _ = jsonFmt.Set("json") return &JSONFileLogger{ handler: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}).Handler(), diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 1b8798feb8..c924c9092c 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -262,7 +262,7 @@ func NewAPI( statsRenderer StatsRenderer, rwEnabled bool, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, - otlpEnabled, otlpDeltaToCumulative bool, + otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool, ctZeroIngestionEnabled bool, ) *API { a := &API{ @@ -310,7 +310,7 @@ func NewAPI( a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative}) + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative, NativeDelta: otlpNativeDeltaIngestion}) } return a @@ -1021,6 +1021,7 @@ type ScrapePoolsDiscovery struct { type DroppedTarget struct { // Labels before any processing. DiscoveredLabels labels.Labels `json:"discoveredLabels"` + ScrapePool string `json:"scrapePool"` } // TargetDiscovery has all the active targets. @@ -1105,15 +1106,15 @@ func (api *API) scrapePools(r *http.Request) apiFuncResult { } func (api *API) targets(r *http.Request) apiFuncResult { - sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) { + getSortedPools := func(targets map[string][]*scrape.Target) ([]string, int) { var n int - keys := make([]string, 0, len(targets)) - for k := range targets { - keys = append(keys, k) - n += len(targets[k]) + pools := make([]string, 0, len(targets)) + for p, t := range targets { + pools = append(pools, p) + n += len(t) } - slices.Sort(keys) - return keys, n + slices.Sort(pools) + return pools, n } scrapePool := r.URL.Query().Get("scrapePool") @@ -1125,14 +1126,14 @@ func (api *API) targets(r *http.Request) apiFuncResult { if showActive { targetsActive := api.targetRetriever(r.Context()).TargetsActive() - activeKeys, numTargets := sortKeys(targetsActive) + activePools, numTargets := getSortedPools(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - for _, key := range activeKeys { - if scrapePool != "" && key != scrapePool { + for _, pool := range activePools { + if scrapePool != "" && pool != scrapePool { continue } - for _, target := range targetsActive[key] { + for _, target := range targetsActive[pool] { lastErrStr := "" lastErr := target.LastError() if lastErr != nil { @@ -1144,7 +1145,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { res.ActiveTargets = append(res.ActiveTargets, &Target{ DiscoveredLabels: target.DiscoveredLabels(builder), Labels: target.Labels(builder), - ScrapePool: key, + ScrapePool: pool, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), LastError: func() string { @@ -1170,18 +1171,18 @@ func (api *API) targets(r *http.Request) apiFuncResult { } if showDropped { res.DroppedTargetCounts = api.targetRetriever(r.Context()).TargetsDroppedCounts() - } - if showDropped { + targetsDropped := api.targetRetriever(r.Context()).TargetsDropped() - droppedKeys, numTargets := sortKeys(targetsDropped) + droppedPools, numTargets := getSortedPools(targetsDropped) res.DroppedTargets = make([]*DroppedTarget, 0, numTargets) - for _, key := range droppedKeys { - if scrapePool != "" && key != scrapePool { + for _, pool := range droppedPools { + if scrapePool != "" && pool != scrapePool { continue } - for _, target := range targetsDropped[key] { + for _, target := range targetsDropped[pool] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ DiscoveredLabels: target.DiscoveredLabels(builder), + ScrapePool: pool, }) } } diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore b/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md new file mode 100644 index 0000000000..aaa72fa863 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md @@ -0,0 +1,133 @@ +# xsync benchmarks + +If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs. + +The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks. + +The following commands were used to run the benchmarks: +```bash +$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt +$ benchstat bench.txt | tee benchstat.txt +``` + +The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output. + +Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different. + +### Counter vs. atomic int64 + +``` +name time/op +Counter 27.3ns ± 1% +Counter-2 27.2ns ±11% +Counter-4 15.3ns ± 8% +Counter-8 7.43ns ± 7% +Counter-16 3.70ns ±10% +Counter-32 1.77ns ± 3% +Counter-64 0.96ns ±10% +AtomicInt64 7.60ns ± 0% +AtomicInt64-2 12.6ns ±13% +AtomicInt64-4 13.5ns ±14% +AtomicInt64-8 12.7ns ± 9% +AtomicInt64-16 12.8ns ± 8% +AtomicInt64-32 13.0ns ± 6% +AtomicInt64-64 12.9ns ± 7% +``` + +Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation. + +### MapOf vs. sync.Map + +1,000 `[int, int]` entries with a warm-up, 100% Loads: +``` +IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0% +IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0% +IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0% +IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0% +IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0% +IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0% +IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0% +IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0% +IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0% +IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3% +IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7% +IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7% +IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6% +IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3% +``` + +1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes: +``` +IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0% +IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1% +IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0% +IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0% +IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2% +IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4% +IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2% +IntegerMapStandard_WarmUp/reads=99% 121ns ± 1% +IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3% +IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4% +IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2% +IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2% +IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3% +IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2% +``` + +1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes: +``` +IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2% +IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2% +IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1% +IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0% +IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2% +IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1% +IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1% +``` + +### MPMCQueue vs. Go channels + +Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers: +``` +QueueProdConsWork100 252ns ± 0% +QueueProdConsWork100-2 206ns ± 5% +QueueProdConsWork100-4 136ns ±12% +QueueProdConsWork100-8 110ns ± 6% +QueueProdConsWork100-16 108ns ± 2% +QueueProdConsWork100-32 102ns ± 2% +QueueProdConsWork100-64 101ns ± 0% +ChanProdConsWork100 283ns ± 0% +ChanProdConsWork100-2 406ns ±21% +ChanProdConsWork100-4 549ns ± 7% +ChanProdConsWork100-8 754ns ± 7% +ChanProdConsWork100-16 828ns ± 7% +ChanProdConsWork100-32 810ns ± 8% +ChanProdConsWork100-64 832ns ± 4% +``` + +### RBMutex vs. sync.RWMutex + +The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer: +``` +RBMutexWorkWrite100000 146ns ± 0% +RBMutexWorkWrite100000-2 73.3ns ± 0% +RBMutexWorkWrite100000-4 36.7ns ± 0% +RBMutexWorkWrite100000-8 18.6ns ± 0% +RBMutexWorkWrite100000-16 9.83ns ± 3% +RBMutexWorkWrite100000-32 5.53ns ± 0% +RBMutexWorkWrite100000-64 4.04ns ± 3% +RWMutexWorkWrite100000 121ns ± 0% +RWMutexWorkWrite100000-2 128ns ± 1% +RWMutexWorkWrite100000-4 124ns ± 2% +RWMutexWorkWrite100000-8 101ns ± 1% +RWMutexWorkWrite100000-16 92.9ns ± 1% +RWMutexWorkWrite100000-32 89.9ns ± 1% +RWMutexWorkWrite100000-64 88.4ns ± 1% +``` diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/README.md b/vendor/github.com/puzpuzpuz/xsync/v3/README.md new file mode 100644 index 0000000000..3971553ae7 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/README.md @@ -0,0 +1,195 @@ +[![GoDoc reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3) +[![GoReport](https://goreportcard.com/badge/github.com/puzpuzpuz/xsync/v3)](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3) +[![codecov](https://codecov.io/gh/puzpuzpuz/xsync/branch/main/graph/badge.svg)](https://codecov.io/gh/puzpuzpuz/xsync) + +# xsync + +Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only. + +Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit). + +## Benchmarks + +Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine. + +Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark). + +## Usage + +The latest xsync major version is v3, so `/v3` suffix should be used when importing the library: + +```go +import ( + "github.com/puzpuzpuz/xsync/v3" +) +``` + +*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial. + +### Counter + +A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library. + +```go +c := xsync.NewCounter() +// increment and decrement the counter +c.Inc() +c.Dec() +// read the current value +v := c.Value() +``` + +Works better in comparison with a single atomically updated `int64` counter in high contention scenarios. + +### Map + +A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`. + +```go +m := xsync.NewMap() +m.Store("foo", "bar") +v, ok := m.Load("foo") +s := m.Size() +``` + +`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT + +CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`. + +One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values. + +`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads. + +```go +m := xsync.NewMapOf[string, string]() +m.Store("foo", "bar") +v, ok := m.Load("foo") +``` + +Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible. + +An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types: + +```go +type Point struct { + x int32 + y int32 +} +m := NewMapOf[Point, int]() +m.Store(Point{42, 42}, 42) +v, ok := m.Load(point{42, 42}) +``` + +Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`: +```go +m := xsync.NewMapOf[int, int]() +m.Store(42, 42) +pm := xsync.ToPlainMapOf(m) +``` + +Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers: + +```go +m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { + h := uint64(i) + h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd + h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53 + return h ^ (h >> 33) +}) +``` + +When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration. + +### SPSCQueue + +A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items. + +```go +q := xsync.NewSPSCQueue(1024) +// producer inserts an item into the queue +// optimistic insertion attempt; doesn't block +inserted := q.TryEnqueue("bar") +// consumer obtains an item from the queue +// optimistic obtain attempt; doesn't block +item, ok := q.TryDequeue() // interface{} pointing to a string +``` + +`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later. + +```go +q := xsync.NewSPSCQueueOf[string](1024) +inserted := q.TryEnqueue("foo") +item, ok := q.TryDequeue() // string +``` + +The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively. + +### MPMCQueue + +A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue. + +```go +q := xsync.NewMPMCQueue(1024) +// producer optimistically inserts an item into the queue +// optimistic insertion attempt; doesn't block +inserted := q.TryEnqueue("bar") +// consumer obtains an item from the queue +// optimistic obtain attempt; doesn't block +item, ok := q.TryDequeue() // interface{} pointing to a string +``` + +`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later. + +```go +q := xsync.NewMPMCQueueOf[string](1024) +inserted := q.TryEnqueue("foo") +item, ok := q.TryDequeue() // string +``` + +The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers. + +The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items. + +In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine. + +To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time. + +### RBMutex + +A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer. + +```go +mu := xsync.NewRBMutex() +// reader lock calls return a token +t := mu.RLock() +// the token must be later used to unlock the mutex +mu.RUnlock(t) +// writer locks are the same as in sync.RWMutex +mu.Lock() +mu.Unlock() +``` + +`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf + +The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores. + +Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines. + +`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods. + +Apart from blocking methods, `RBMutex` also has methods for optimistic locking: +```go +mu := xsync.NewRBMutex() +if locked, t := mu.TryRLock(); locked { + // critical reader section... + mu.RUnlock(t) +} +if mu.TryLock() { + // critical writer section... + mu.Unlock() +} +``` + +## License + +Licensed under MIT. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/counter.go b/vendor/github.com/puzpuzpuz/xsync/v3/counter.go new file mode 100644 index 0000000000..4d4dc87d21 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/counter.go @@ -0,0 +1,99 @@ +package xsync + +import ( + "sync" + "sync/atomic" +) + +// pool for P tokens +var ptokenPool sync.Pool + +// a P token is used to point at the current OS thread (P) +// on which the goroutine is run; exact identity of the thread, +// as well as P migration tolerance, is not important since +// it's used to as a best effort mechanism for assigning +// concurrent operations (goroutines) to different stripes of +// the counter +type ptoken struct { + idx uint32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// A Counter is a striped int64 counter. +// +// Should be preferred over a single atomically updated int64 +// counter in high contention scenarios. +// +// A Counter must not be copied after first use. +type Counter struct { + stripes []cstripe + mask uint32 +} + +type cstripe struct { + c int64 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 8]byte +} + +// NewCounter creates a new Counter instance. +func NewCounter() *Counter { + nstripes := nextPowOf2(parallelism()) + c := Counter{ + stripes: make([]cstripe, nstripes), + mask: nstripes - 1, + } + return &c +} + +// Inc increments the counter by 1. +func (c *Counter) Inc() { + c.Add(1) +} + +// Dec decrements the counter by 1. +func (c *Counter) Dec() { + c.Add(-1) +} + +// Add adds the delta to the counter. +func (c *Counter) Add(delta int64) { + t, ok := ptokenPool.Get().(*ptoken) + if !ok { + t = new(ptoken) + t.idx = runtime_fastrand() + } + for { + stripe := &c.stripes[t.idx&c.mask] + cnt := atomic.LoadInt64(&stripe.c) + if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) { + break + } + // Give a try with another randomly selected stripe. + t.idx = runtime_fastrand() + } + ptokenPool.Put(t) +} + +// Value returns the current counter value. +// The returned value may not include all of the latest operations in +// presence of concurrent modifications of the counter. +func (c *Counter) Value() int64 { + v := int64(0) + for i := 0; i < len(c.stripes); i++ { + stripe := &c.stripes[i] + v += atomic.LoadInt64(&stripe.c) + } + return v +} + +// Reset resets the counter to zero. +// This method should only be used when it is known that there are +// no concurrent modifications of the counter. +func (c *Counter) Reset() { + for i := 0; i < len(c.stripes); i++ { + stripe := &c.stripes[i] + atomic.StoreInt64(&stripe.c, 0) + } +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/map.go b/vendor/github.com/puzpuzpuz/xsync/v3/map.go new file mode 100644 index 0000000000..c7837e90b9 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/map.go @@ -0,0 +1,917 @@ +package xsync + +import ( + "fmt" + "math" + "runtime" + "strings" + "sync" + "sync/atomic" + "unsafe" +) + +type mapResizeHint int + +const ( + mapGrowHint mapResizeHint = 0 + mapShrinkHint mapResizeHint = 1 + mapClearHint mapResizeHint = 2 +) + +const ( + // number of Map entries per bucket; 3 entries lead to size of 64B + // (one cache line) on 64-bit machines + entriesPerMapBucket = 3 + // threshold fraction of table occupation to start a table shrinking + // when deleting the last entry in a bucket chain + mapShrinkFraction = 128 + // map load factor to trigger a table resize during insertion; + // a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen + // key-value pairs (this is a soft limit) + mapLoadFactor = 0.75 + // minimal table size, i.e. number of buckets; thus, minimal map + // capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen + defaultMinMapTableLen = 32 + // minimum counter stripes to use + minMapCounterLen = 8 + // maximum counter stripes to use; stands for around 4KB of memory + maxMapCounterLen = 32 +) + +var ( + topHashMask = uint64((1<<20)-1) << 44 + topHashEntryMasks = [3]uint64{ + topHashMask, + topHashMask >> 20, + topHashMask >> 40, + } +) + +// Map is like a Go map[string]interface{} but is safe for concurrent +// use by multiple goroutines without additional locking or +// coordination. It follows the interface of sync.Map with +// a number of valuable extensions like Compute or Size. +// +// A Map must not be copied after first use. +// +// Map uses a modified version of Cache-Line Hash Table (CLHT) +// data structure: https://github.com/LPD-EPFL/CLHT +// +// CLHT is built around idea to organize the hash table in +// cache-line-sized buckets, so that on all modern CPUs update +// operations complete with at most one cache-line transfer. +// Also, Get operations involve no write to memory, as well as no +// mutexes or any other sort of locks. Due to this design, in all +// considered scenarios Map outperforms sync.Map. +// +// One important difference with sync.Map is that only string keys +// are supported. That's because Golang standard library does not +// expose the built-in hash functions for interface{} values. +type Map struct { + totalGrowths int64 + totalShrinks int64 + resizing int64 // resize in progress flag; updated atomically + resizeMu sync.Mutex // only used along with resizeCond + resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) + table unsafe.Pointer // *mapTable + minTableLen int + growOnly bool +} + +type mapTable struct { + buckets []bucketPadded + // striped counter for number of table entries; + // used to determine if a table shrinking is needed + // occupies min(buckets_memory/1024, 64KB) of memory + size []counterStripe + seed uint64 +} + +type counterStripe struct { + c int64 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 8]byte +} + +type bucketPadded struct { + //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs + pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte + bucket +} + +type bucket struct { + next unsafe.Pointer // *bucketPadded + keys [entriesPerMapBucket]unsafe.Pointer + values [entriesPerMapBucket]unsafe.Pointer + // topHashMutex is a 2-in-1 value. + // + // It contains packed top 20 bits (20 MSBs) of hash codes for keys + // stored in the bucket: + // | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex | + // | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit | + // + // The least significant bit is used for the mutex (TTAS spinlock). + topHashMutex uint64 +} + +type rangeEntry struct { + key unsafe.Pointer + value unsafe.Pointer +} + +// MapConfig defines configurable Map/MapOf options. +type MapConfig struct { + sizeHint int + growOnly bool +} + +// WithPresize configures new Map/MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal +// capacity meaning that the underlying hash table will never shrink +// to a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +func WithPresize(sizeHint int) func(*MapConfig) { + return func(c *MapConfig) { + c.sizeHint = sizeHint + } +} + +// WithGrowOnly configures new Map/MapOf instance to be grow-only. +// This means that the underlying hash table grows in capacity when +// new keys are added, but does not shrink when keys are deleted. +// The only exception to this rule is the Clear method which +// shrinks the hash table back to the initial capacity. +func WithGrowOnly() func(*MapConfig) { + return func(c *MapConfig) { + c.growOnly = true + } +} + +// NewMap creates a new Map instance configured with the given +// options. +func NewMap(options ...func(*MapConfig)) *Map { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + + m := &Map{} + m.resizeCond = *sync.NewCond(&m.resizeMu) + var table *mapTable + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + table = newMapTable(defaultMinMapTableLen) + } else { + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor)) + table = newMapTable(int(tableLen)) + } + m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly + atomic.StorePointer(&m.table, unsafe.Pointer(table)) + return m +} + +// NewMapPresized creates a new Map instance with capacity enough to hold +// sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMap in combination with WithPresize. +func NewMapPresized(sizeHint int) *Map { + return NewMap(WithPresize(sizeHint)) +} + +func newMapTable(minTableLen int) *mapTable { + buckets := make([]bucketPadded, minTableLen) + counterLen := minTableLen >> 10 + if counterLen < minMapCounterLen { + counterLen = minMapCounterLen + } else if counterLen > maxMapCounterLen { + counterLen = maxMapCounterLen + } + counter := make([]counterStripe, counterLen) + t := &mapTable{ + buckets: buckets, + size: counter, + seed: makeSeed(), + } + return t +} + +// ToPlainMap returns a native map with a copy of xsync Map's +// contents. The copied xsync Map should not be modified while +// this call is made. If the copied Map is modified, the copying +// behavior is the same as in the Range method. +func ToPlainMap(m *Map) map[string]interface{} { + pm := make(map[string]interface{}) + if m != nil { + m.Range(func(key string, value interface{}) bool { + pm[key] = value + return true + }) + } + return pm +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key string) (value interface{}, ok bool) { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + hash := hashString(key, table.seed) + bidx := uint64(len(table.buckets)-1) & hash + b := &table.buckets[bidx] + for { + topHashes := atomic.LoadUint64(&b.topHashMutex) + for i := 0; i < entriesPerMapBucket; i++ { + if !topHashMatch(hash, topHashes, i) { + continue + } + atomic_snapshot: + // Start atomic snapshot. + vp := atomic.LoadPointer(&b.values[i]) + kp := atomic.LoadPointer(&b.keys[i]) + if kp != nil && vp != nil { + if key == derefKey(kp) { + if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) { + // Atomic snapshot succeeded. + return derefValue(vp), true + } + // Concurrent update/remove. Go for another spin. + goto atomic_snapshot + } + } + } + bptr := atomic.LoadPointer(&b.next) + if bptr == nil { + return + } + b = (*bucketPadded)(bptr) + } +} + +// Store sets the value for a key. +func (m *Map) Store(key string, value interface{}) { + m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + true, + false, + ) +} + +// LoadAndStore returns the existing value for the key if present, +// while setting the new value for the key. +// It stores the new value and returns the existing one, if present. +// The loaded result is true if the existing value was loaded, +// false otherwise. +func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrCompute returns the existing value for the key if present. +// Otherwise, it computes the value using the provided function, and +// then stores and returns the computed value. The loaded result is +// true if the value was loaded, false if computed. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + return valueFn(), false + }, + true, + false, + ) +} + +// LoadOrTryCompute returns the existing value for the key if present. +// Otherwise, it tries to compute the value using the provided function +// and, if successful, stores and returns the computed value. The loaded +// result is true if the value was loaded, or false if computed (whether +// successfully or not). If the compute attempt was cancelled (due to an +// error, for example), a nil value will be returned. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) LoadOrTryCompute( + key string, + valueFn func() (newValue interface{}, cancel bool), +) (value interface{}, loaded bool) { + return m.doCompute( + key, + func(interface{}, bool) (interface{}, bool) { + nv, c := valueFn() + if !c { + return nv, false + } + return nil, true + }, + true, + false, + ) +} + +// Compute either sets the computed new value for the key or deletes +// the value for the key. When the delete result of the valueFn function +// is set to true, the value will be deleted, if it exists. When delete +// is set to false, the value is updated to the newValue. +// The ok result indicates whether value was computed and stored, thus, is +// present in the map. The actual result contains the new value in cases where +// the value was computed and stored. See the example for a few use cases. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *Map) Compute( + key string, + valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool), +) (actual interface{}, ok bool) { + return m.doCompute(key, valueFn, false, true) +} + +// LoadAndDelete deletes the value for a key, returning the previous +// value if any. The loaded result reports whether the key was +// present. +func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) { + return m.doCompute( + key, + func(value interface{}, loaded bool) (interface{}, bool) { + return value, true + }, + false, + false, + ) +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key string) { + m.doCompute( + key, + func(value interface{}, loaded bool) (interface{}, bool) { + return value, true + }, + false, + false, + ) +} + +func (m *Map) doCompute( + key string, + valueFn func(oldValue interface{}, loaded bool) (interface{}, bool), + loadIfExists, computeOnly bool, +) (interface{}, bool) { + // Read-only path. + if loadIfExists { + if v, ok := m.Load(key); ok { + return v, !computeOnly + } + } + // Write path. + for { + compute_attempt: + var ( + emptyb *bucketPadded + emptyidx int + hintNonEmpty int + ) + table := (*mapTable)(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + hash := hashString(key, table.seed) + bidx := uint64(len(table.buckets)-1) & hash + rootb := &table.buckets[bidx] + lockBucket(&rootb.topHashMutex) + // The following two checks must go in reverse to what's + // in the resize method. + if m.resizeInProgress() { + // Resize is in progress. Wait, then go for another attempt. + unlockBucket(&rootb.topHashMutex) + m.waitForResize() + goto compute_attempt + } + if m.newerTableExists(table) { + // Someone resized the table. Go for another attempt. + unlockBucket(&rootb.topHashMutex) + goto compute_attempt + } + b := rootb + for { + topHashes := atomic.LoadUint64(&b.topHashMutex) + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] == nil { + if emptyb == nil { + emptyb = b + emptyidx = i + } + continue + } + if !topHashMatch(hash, topHashes, i) { + hintNonEmpty++ + continue + } + if key == derefKey(b.keys[i]) { + vp := b.values[i] + if loadIfExists { + unlockBucket(&rootb.topHashMutex) + return derefValue(vp), !computeOnly + } + // In-place update/delete. + // We get a copy of the value via an interface{} on each call, + // thus the live value pointers are unique. Otherwise atomic + // snapshot won't be correct in case of multiple Store calls + // using the same value. + oldValue := derefValue(vp) + newValue, del := valueFn(oldValue, true) + if del { + // Deletion. + // First we update the value, then the key. + // This is important for atomic snapshot states. + atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i)) + atomic.StorePointer(&b.values[i], nil) + atomic.StorePointer(&b.keys[i], nil) + leftEmpty := false + if hintNonEmpty == 0 { + leftEmpty = isEmptyBucket(b) + } + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, -1) + // Might need to shrink the table. + if leftEmpty { + m.resize(table, mapShrinkHint) + } + return oldValue, !computeOnly + } + nvp := unsafe.Pointer(&newValue) + if assertionsEnabled && vp == nvp { + panic("non-unique value pointer") + } + atomic.StorePointer(&b.values[i], nvp) + unlockBucket(&rootb.topHashMutex) + if computeOnly { + // Compute expects the new value to be returned. + return newValue, true + } + // LoadAndStore expects the old value to be returned. + return oldValue, true + } + hintNonEmpty++ + } + if b.next == nil { + if emptyb != nil { + // Insertion into an existing bucket. + var zeroV interface{} + newValue, del := valueFn(zeroV, false) + if del { + unlockBucket(&rootb.topHashMutex) + return zeroV, false + } + // First we update the value, then the key. + // This is important for atomic snapshot states. + topHashes = atomic.LoadUint64(&emptyb.topHashMutex) + atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx)) + atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue)) + atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key)) + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, 1) + return newValue, computeOnly + } + growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor + if table.sumSize() > int64(growThreshold) { + // Need to grow the table. Then go for another attempt. + unlockBucket(&rootb.topHashMutex) + m.resize(table, mapGrowHint) + goto compute_attempt + } + // Insertion into a new bucket. + var zeroV interface{} + newValue, del := valueFn(zeroV, false) + if del { + unlockBucket(&rootb.topHashMutex) + return newValue, false + } + // Create and append a bucket. + newb := new(bucketPadded) + newb.keys[0] = unsafe.Pointer(&key) + newb.values[0] = unsafe.Pointer(&newValue) + newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0) + atomic.StorePointer(&b.next, unsafe.Pointer(newb)) + unlockBucket(&rootb.topHashMutex) + table.addSize(bidx, 1) + return newValue, computeOnly + } + b = (*bucketPadded)(b.next) + } + } +} + +func (m *Map) newerTableExists(table *mapTable) bool { + curTablePtr := atomic.LoadPointer(&m.table) + return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table)) +} + +func (m *Map) resizeInProgress() bool { + return atomic.LoadInt64(&m.resizing) == 1 +} + +func (m *Map) waitForResize() { + m.resizeMu.Lock() + for m.resizeInProgress() { + m.resizeCond.Wait() + } + m.resizeMu.Unlock() +} + +func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) { + knownTableLen := len(knownTable.buckets) + // Fast path for shrink attempts. + if hint == mapShrinkHint { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { + return + } + } + // Slow path. + if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) { + // Someone else started resize. Wait for it to finish. + m.waitForResize() + return + } + var newTable *mapTable + table := (*mapTable)(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + switch hint { + case mapGrowHint: + // Grow the table with factor of 2. + atomic.AddInt64(&m.totalGrowths, 1) + newTable = newMapTable(tableLen << 1) + case mapShrinkHint: + shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction) + if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { + // Shrink the table with factor of 2. + atomic.AddInt64(&m.totalShrinks, 1) + newTable = newMapTable(tableLen >> 1) + } else { + // No need to shrink. Wake up all waiters and give up. + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() + return + } + case mapClearHint: + newTable = newMapTable(m.minTableLen) + default: + panic(fmt.Sprintf("unexpected resize hint: %d", hint)) + } + // Copy the data only if we're not clearing the map. + if hint != mapClearHint { + for i := 0; i < tableLen; i++ { + copied := copyBucket(&table.buckets[i], newTable) + newTable.addSizePlain(uint64(i), copied) + } + } + // Publish the new table and wake up all waiters. + atomic.StorePointer(&m.table, unsafe.Pointer(newTable)) + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() +} + +func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) { + rootb := b + lockBucket(&rootb.topHashMutex) + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + k := derefKey(b.keys[i]) + hash := hashString(k, destTable.seed) + bidx := uint64(len(destTable.buckets)-1) & hash + destb := &destTable.buckets[bidx] + appendToBucket(hash, b.keys[i], b.values[i], destb) + copied++ + } + } + if b.next == nil { + unlockBucket(&rootb.topHashMutex) + return + } + b = (*bucketPadded)(b.next) + } +} + +func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) { + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] == nil { + b.keys[i] = keyPtr + b.values[i] = valPtr + b.topHashMutex = storeTopHash(hash, b.topHashMutex, i) + return + } + } + if b.next == nil { + newb := new(bucketPadded) + newb.keys[0] = keyPtr + newb.values[0] = valPtr + newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0) + b.next = unsafe.Pointer(newb) + return + } + b = (*bucketPadded)(b.next) + } +} + +func isEmptyBucket(rootb *bucketPadded) bool { + b := rootb + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + return false + } + } + if b.next == nil { + return true + } + b = (*bucketPadded)(b.next) + } +} + +// Range calls f sequentially for each key and value present in the +// map. If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot +// of the Map's contents: no key will be visited more than once, but +// if the value for any key is stored or deleted concurrently, Range +// may reflect any mapping for that key from any point during the +// Range call. +// +// It is safe to modify the map while iterating it, including entry +// creation, modification and deletion. However, the concurrent +// modification rule apply, i.e. the changes may be not reflected +// in the subsequently iterated entries. +func (m *Map) Range(f func(key string, value interface{}) bool) { + var zeroEntry rangeEntry + // Pre-allocate array big enough to fit entries for most hash tables. + bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket) + tablep := atomic.LoadPointer(&m.table) + table := *(*mapTable)(tablep) + for i := range table.buckets { + rootb := &table.buckets[i] + b := rootb + // Prevent concurrent modifications and copy all entries into + // the intermediate slice. + lockBucket(&rootb.topHashMutex) + for { + for i := 0; i < entriesPerMapBucket; i++ { + if b.keys[i] != nil { + bentries = append(bentries, rangeEntry{ + key: b.keys[i], + value: b.values[i], + }) + } + } + if b.next == nil { + unlockBucket(&rootb.topHashMutex) + break + } + b = (*bucketPadded)(b.next) + } + // Call the function for all copied entries. + for j := range bentries { + k := derefKey(bentries[j].key) + v := derefValue(bentries[j].value) + if !f(k, v) { + return + } + // Remove the reference to avoid preventing the copied + // entries from being GCed until this method finishes. + bentries[j] = zeroEntry + } + bentries = bentries[:0] + } +} + +// Clear deletes all keys and values currently stored in the map. +func (m *Map) Clear() { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + m.resize(table, mapClearHint) +} + +// Size returns current size of the map. +func (m *Map) Size() int { + table := (*mapTable)(atomic.LoadPointer(&m.table)) + return int(table.sumSize()) +} + +func derefKey(keyPtr unsafe.Pointer) string { + return *(*string)(keyPtr) +} + +func derefValue(valuePtr unsafe.Pointer) interface{} { + return *(*interface{})(valuePtr) +} + +func lockBucket(mu *uint64) { + for { + var v uint64 + for { + v = atomic.LoadUint64(mu) + if v&1 != 1 { + break + } + runtime.Gosched() + } + if atomic.CompareAndSwapUint64(mu, v, v|1) { + return + } + runtime.Gosched() + } +} + +func unlockBucket(mu *uint64) { + v := atomic.LoadUint64(mu) + atomic.StoreUint64(mu, v&^1) +} + +func topHashMatch(hash, topHashes uint64, idx int) bool { + if topHashes&(1<<(idx+1)) == 0 { + // Entry is not present. + return false + } + hash = hash & topHashMask + topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx) + return hash == topHashes +} + +func storeTopHash(hash, topHashes uint64, idx int) uint64 { + // Zero out top hash at idx. + topHashes = topHashes &^ topHashEntryMasks[idx] + // Chop top 20 MSBs of the given hash and position them at idx. + hash = (hash & topHashMask) >> (20 * idx) + // Store the MSBs. + topHashes = topHashes | hash + // Mark the entry as present. + return topHashes | (1 << (idx + 1)) +} + +func eraseTopHash(topHashes uint64, idx int) uint64 { + return topHashes &^ (1 << (idx + 1)) +} + +func (table *mapTable) addSize(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + atomic.AddInt64(&table.size[cidx].c, int64(delta)) +} + +func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + table.size[cidx].c += int64(delta) +} + +func (table *mapTable) sumSize() int64 { + sum := int64(0) + for i := range table.size { + sum += atomic.LoadInt64(&table.size[i].c) + } + return sum +} + +// MapStats is Map/MapOf statistics. +// +// Warning: map statistics are intented to be used for diagnostic +// purposes, not for production code. This means that breaking changes +// may be introduced into this struct even between minor releases. +type MapStats struct { + // RootBuckets is the number of root buckets in the hash table. + // Each bucket holds a few entries. + RootBuckets int + // TotalBuckets is the total number of buckets in the hash table, + // including root and their chained buckets. Each bucket holds + // a few entries. + TotalBuckets int + // EmptyBuckets is the number of buckets that hold no entries. + EmptyBuckets int + // Capacity is the Map/MapOf capacity, i.e. the total number of + // entries that all buckets can physically hold. This number + // does not consider the load factor. + Capacity int + // Size is the exact number of entries stored in the map. + Size int + // Counter is the number of entries stored in the map according + // to the internal atomic counter. In case of concurrent map + // modifications this number may be different from Size. + Counter int + // CounterLen is the number of internal atomic counter stripes. + // This number may grow with the map capacity to improve + // multithreaded scalability. + CounterLen int + // MinEntries is the minimum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MinEntries int + // MinEntries is the maximum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MaxEntries int + // TotalGrowths is the number of times the hash table grew. + TotalGrowths int64 + // TotalGrowths is the number of times the hash table shrinked. + TotalShrinks int64 +} + +// ToString returns string representation of map stats. +func (s *MapStats) ToString() string { + var sb strings.Builder + sb.WriteString("MapStats{\n") + sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets)) + sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets)) + sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets)) + sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity)) + sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size)) + sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter)) + sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen)) + sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries)) + sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries)) + sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths)) + sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks)) + sb.WriteString("}\n") + return sb.String() +} + +// Stats returns statistics for the Map. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *Map) Stats() MapStats { + stats := MapStats{ + TotalGrowths: atomic.LoadInt64(&m.totalGrowths), + TotalShrinks: atomic.LoadInt64(&m.totalShrinks), + MinEntries: math.MaxInt32, + } + table := (*mapTable)(atomic.LoadPointer(&m.table)) + stats.RootBuckets = len(table.buckets) + stats.Counter = int(table.sumSize()) + stats.CounterLen = len(table.size) + for i := range table.buckets { + nentries := 0 + b := &table.buckets[i] + stats.TotalBuckets++ + for { + nentriesLocal := 0 + stats.Capacity += entriesPerMapBucket + for i := 0; i < entriesPerMapBucket; i++ { + if atomic.LoadPointer(&b.keys[i]) != nil { + stats.Size++ + nentriesLocal++ + } + } + nentries += nentriesLocal + if nentriesLocal == 0 { + stats.EmptyBuckets++ + } + if b.next == nil { + break + } + b = (*bucketPadded)(atomic.LoadPointer(&b.next)) + stats.TotalBuckets++ + } + if nentries < stats.MinEntries { + stats.MinEntries = nentries + } + if nentries > stats.MaxEntries { + stats.MaxEntries = nentries + } + } + return stats +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go new file mode 100644 index 0000000000..d1ce9b2e2d --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go @@ -0,0 +1,738 @@ +package xsync + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "unsafe" +) + +const ( + // number of MapOf entries per bucket; 5 entries lead to size of 64B + // (one cache line) on 64-bit machines + entriesPerMapOfBucket = 5 + defaultMeta uint64 = 0x8080808080808080 + metaMask uint64 = 0xffffffffff + defaultMetaMasked uint64 = defaultMeta & metaMask + emptyMetaSlot uint8 = 0x80 +) + +// MapOf is like a Go map[K]V but is safe for concurrent +// use by multiple goroutines without additional locking or +// coordination. It follows the interface of sync.Map with +// a number of valuable extensions like Compute or Size. +// +// A MapOf must not be copied after first use. +// +// MapOf uses a modified version of Cache-Line Hash Table (CLHT) +// data structure: https://github.com/LPD-EPFL/CLHT +// +// CLHT is built around idea to organize the hash table in +// cache-line-sized buckets, so that on all modern CPUs update +// operations complete with at most one cache-line transfer. +// Also, Get operations involve no write to memory, as well as no +// mutexes or any other sort of locks. Due to this design, in all +// considered scenarios MapOf outperforms sync.Map. +// +// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap +// (immutable K/V pair structs instead of atomic snapshots) +// and C++'s absl::flat_hash_map (meta memory and SWAR-based +// lookups). +type MapOf[K comparable, V any] struct { + totalGrowths int64 + totalShrinks int64 + resizing int64 // resize in progress flag; updated atomically + resizeMu sync.Mutex // only used along with resizeCond + resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) + table unsafe.Pointer // *mapOfTable + hasher func(K, uint64) uint64 + minTableLen int + growOnly bool +} + +type mapOfTable[K comparable, V any] struct { + buckets []bucketOfPadded + // striped counter for number of table entries; + // used to determine if a table shrinking is needed + // occupies min(buckets_memory/1024, 64KB) of memory + size []counterStripe + seed uint64 +} + +// bucketOfPadded is a CL-sized map bucket holding up to +// entriesPerMapOfBucket entries. +type bucketOfPadded struct { + //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs + pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte + bucketOf +} + +type bucketOf struct { + meta uint64 + entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf + next unsafe.Pointer // *bucketOfPadded + mu sync.Mutex +} + +// entryOf is an immutable map entry. +type entryOf[K comparable, V any] struct { + key K + value V +} + +// NewMapOf creates a new MapOf instance configured with the given +// options. +func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] { + return NewMapOfWithHasher[K, V](defaultHasher[K](), options...) +} + +// NewMapOfWithHasher creates a new MapOf instance configured with +// the given hasher and options. The hash function is used instead +// of the built-in hash function configured when a map is created +// with the NewMapOf function. +func NewMapOfWithHasher[K comparable, V any]( + hasher func(K, uint64) uint64, + options ...func(*MapConfig), +) *MapOf[K, V] { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket, + } + for _, o := range options { + o(c) + } + + m := &MapOf[K, V]{} + m.resizeCond = *sync.NewCond(&m.resizeMu) + m.hasher = hasher + var table *mapOfTable[K, V] + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket { + table = newMapOfTable[K, V](defaultMinMapTableLen) + } else { + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor)) + table = newMapOfTable[K, V](int(tableLen)) + } + m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly + atomic.StorePointer(&m.table, unsafe.Pointer(table)) + return m +} + +// NewMapOfPresized creates a new MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMapOf in combination with WithPresize. +func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { + return NewMapOf[K, V](WithPresize(sizeHint)) +} + +func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] { + buckets := make([]bucketOfPadded, minTableLen) + for i := range buckets { + buckets[i].meta = defaultMeta + } + counterLen := minTableLen >> 10 + if counterLen < minMapCounterLen { + counterLen = minMapCounterLen + } else if counterLen > maxMapCounterLen { + counterLen = maxMapCounterLen + } + counter := make([]counterStripe, counterLen) + t := &mapOfTable[K, V]{ + buckets: buckets, + size: counter, + seed: makeSeed(), + } + return t +} + +// ToPlainMapOf returns a native map with a copy of xsync Map's +// contents. The copied xsync Map should not be modified while +// this call is made. If the copied Map is modified, the copying +// behavior is the same as in the Range method. +func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V { + pm := make(map[K]V) + if m != nil { + m.Range(func(key K, value V) bool { + pm[key] = value + return true + }) + } + return pm +} + +// Load returns the value stored in the map for a key, or zero value +// of type V if no value is present. +// The ok result indicates whether value was found in the map. +func (m *MapOf[K, V]) Load(key K) (value V, ok bool) { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2w := broadcast(h2(hash)) + bidx := uint64(len(table.buckets)-1) & h1 + b := &table.buckets[bidx] + for { + metaw := atomic.LoadUint64(&b.meta) + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := atomic.LoadPointer(&b.entries[idx]) + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + return e.value, true + } + } + markedw &= markedw - 1 + } + bptr := atomic.LoadPointer(&b.next) + if bptr == nil { + return + } + b = (*bucketOfPadded)(bptr) + } +} + +// Store sets the value for a key. +func (m *MapOf[K, V]) Store(key K, value V) { + m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + true, + false, + ) +} + +// LoadAndStore returns the existing value for the key if present, +// while setting the new value for the key. +// It stores the new value and returns the existing one, if present. +// The loaded result is true if the existing value was loaded, +// false otherwise. +func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return value, false + }, + false, + false, + ) +} + +// LoadOrCompute returns the existing value for the key if present. +// Otherwise, it computes the value using the provided function, and +// then stores and returns the computed value. The loaded result is +// true if the value was loaded, false if computed. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + return valueFn(), false + }, + true, + false, + ) +} + +// LoadOrTryCompute returns the existing value for the key if present. +// Otherwise, it tries to compute the value using the provided function +// and, if successful, stores and returns the computed value. The loaded +// result is true if the value was loaded, or false if computed (whether +// successfully or not). If the compute attempt was cancelled (due to an +// error, for example), a zero value of type V will be returned. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) LoadOrTryCompute( + key K, + valueFn func() (newValue V, cancel bool), +) (value V, loaded bool) { + return m.doCompute( + key, + func(V, bool) (V, bool) { + nv, c := valueFn() + if !c { + return nv, false + } + return nv, true // nv is ignored + }, + true, + false, + ) +} + +// Compute either sets the computed new value for the key or deletes +// the value for the key. When the delete result of the valueFn function +// is set to true, the value will be deleted, if it exists. When delete +// is set to false, the value is updated to the newValue. +// The ok result indicates whether value was computed and stored, thus, is +// present in the map. The actual result contains the new value in cases where +// the value was computed and stored. See the example for a few use cases. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (m *MapOf[K, V]) Compute( + key K, + valueFn func(oldValue V, loaded bool) (newValue V, delete bool), +) (actual V, ok bool) { + return m.doCompute(key, valueFn, false, true) +} + +// LoadAndDelete deletes the value for a key, returning the previous +// value if any. The loaded result reports whether the key was +// present. +func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + return m.doCompute( + key, + func(value V, loaded bool) (V, bool) { + return value, true + }, + false, + false, + ) +} + +// Delete deletes the value for a key. +func (m *MapOf[K, V]) Delete(key K) { + m.doCompute( + key, + func(value V, loaded bool) (V, bool) { + return value, true + }, + false, + false, + ) +} + +func (m *MapOf[K, V]) doCompute( + key K, + valueFn func(oldValue V, loaded bool) (V, bool), + loadIfExists, computeOnly bool, +) (V, bool) { + // Read-only path. + if loadIfExists { + if v, ok := m.Load(key); ok { + return v, !computeOnly + } + } + // Write path. + for { + compute_attempt: + var ( + emptyb *bucketOfPadded + emptyidx int + ) + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2 := h2(hash) + h2w := broadcast(h2) + bidx := uint64(len(table.buckets)-1) & h1 + rootb := &table.buckets[bidx] + rootb.mu.Lock() + // The following two checks must go in reverse to what's + // in the resize method. + if m.resizeInProgress() { + // Resize is in progress. Wait, then go for another attempt. + rootb.mu.Unlock() + m.waitForResize() + goto compute_attempt + } + if m.newerTableExists(table) { + // Someone resized the table. Go for another attempt. + rootb.mu.Unlock() + goto compute_attempt + } + b := rootb + for { + metaw := b.meta + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := b.entries[idx] + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + if loadIfExists { + rootb.mu.Unlock() + return e.value, !computeOnly + } + // In-place update/delete. + // We get a copy of the value via an interface{} on each call, + // thus the live value pointers are unique. Otherwise atomic + // snapshot won't be correct in case of multiple Store calls + // using the same value. + oldv := e.value + newv, del := valueFn(oldv, true) + if del { + // Deletion. + // First we update the hash, then the entry. + newmetaw := setByte(metaw, emptyMetaSlot, idx) + atomic.StoreUint64(&b.meta, newmetaw) + atomic.StorePointer(&b.entries[idx], nil) + rootb.mu.Unlock() + table.addSize(bidx, -1) + // Might need to shrink the table if we left bucket empty. + if newmetaw == defaultMeta { + m.resize(table, mapShrinkHint) + } + return oldv, !computeOnly + } + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newv + atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe)) + rootb.mu.Unlock() + if computeOnly { + // Compute expects the new value to be returned. + return newv, true + } + // LoadAndStore expects the old value to be returned. + return oldv, true + } + } + markedw &= markedw - 1 + } + if emptyb == nil { + // Search for empty entries (up to 5 per bucket). + emptyw := metaw & defaultMetaMasked + if emptyw != 0 { + idx := firstMarkedByteIndex(emptyw) + emptyb = b + emptyidx = idx + } + } + if b.next == nil { + if emptyb != nil { + // Insertion into an existing bucket. + var zeroV V + newValue, del := valueFn(zeroV, false) + if del { + rootb.mu.Unlock() + return zeroV, false + } + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newValue + // First we update meta, then the entry. + atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx)) + atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe)) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newValue, computeOnly + } + growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor + if table.sumSize() > int64(growThreshold) { + // Need to grow the table. Then go for another attempt. + rootb.mu.Unlock() + m.resize(table, mapGrowHint) + goto compute_attempt + } + // Insertion into a new bucket. + var zeroV V + newValue, del := valueFn(zeroV, false) + if del { + rootb.mu.Unlock() + return newValue, false + } + // Create and append a bucket. + newb := new(bucketOfPadded) + newb.meta = setByte(defaultMeta, h2, 0) + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newValue + newb.entries[0] = unsafe.Pointer(newe) + atomic.StorePointer(&b.next, unsafe.Pointer(newb)) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newValue, computeOnly + } + b = (*bucketOfPadded)(b.next) + } + } +} + +func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool { + curTablePtr := atomic.LoadPointer(&m.table) + return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table)) +} + +func (m *MapOf[K, V]) resizeInProgress() bool { + return atomic.LoadInt64(&m.resizing) == 1 +} + +func (m *MapOf[K, V]) waitForResize() { + m.resizeMu.Lock() + for m.resizeInProgress() { + m.resizeCond.Wait() + } + m.resizeMu.Unlock() +} + +func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { + knownTableLen := len(knownTable.buckets) + // Fast path for shrink attempts. + if hint == mapShrinkHint { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) { + return + } + } + // Slow path. + if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) { + // Someone else started resize. Wait for it to finish. + m.waitForResize() + return + } + var newTable *mapOfTable[K, V] + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + tableLen := len(table.buckets) + switch hint { + case mapGrowHint: + // Grow the table with factor of 2. + atomic.AddInt64(&m.totalGrowths, 1) + newTable = newMapOfTable[K, V](tableLen << 1) + case mapShrinkHint: + shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction) + if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { + // Shrink the table with factor of 2. + atomic.AddInt64(&m.totalShrinks, 1) + newTable = newMapOfTable[K, V](tableLen >> 1) + } else { + // No need to shrink. Wake up all waiters and give up. + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() + return + } + case mapClearHint: + newTable = newMapOfTable[K, V](m.minTableLen) + default: + panic(fmt.Sprintf("unexpected resize hint: %d", hint)) + } + // Copy the data only if we're not clearing the map. + if hint != mapClearHint { + for i := 0; i < tableLen; i++ { + copied := copyBucketOf(&table.buckets[i], newTable, m.hasher) + newTable.addSizePlain(uint64(i), copied) + } + } + // Publish the new table and wake up all waiters. + atomic.StorePointer(&m.table, unsafe.Pointer(newTable)) + m.resizeMu.Lock() + atomic.StoreInt64(&m.resizing, 0) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() +} + +func copyBucketOf[K comparable, V any]( + b *bucketOfPadded, + destTable *mapOfTable[K, V], + hasher func(K, uint64) uint64, +) (copied int) { + rootb := b + rootb.mu.Lock() + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] != nil { + e := (*entryOf[K, V])(b.entries[i]) + hash := hasher(e.key, destTable.seed) + bidx := uint64(len(destTable.buckets)-1) & h1(hash) + destb := &destTable.buckets[bidx] + appendToBucketOf(h2(hash), b.entries[i], destb) + copied++ + } + } + if b.next == nil { + rootb.mu.Unlock() + return + } + b = (*bucketOfPadded)(b.next) + } +} + +// Range calls f sequentially for each key and value present in the +// map. If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot +// of the Map's contents: no key will be visited more than once, but +// if the value for any key is stored or deleted concurrently, Range +// may reflect any mapping for that key from any point during the +// Range call. +// +// It is safe to modify the map while iterating it, including entry +// creation, modification and deletion. However, the concurrent +// modification rule apply, i.e. the changes may be not reflected +// in the subsequently iterated entries. +func (m *MapOf[K, V]) Range(f func(key K, value V) bool) { + var zeroPtr unsafe.Pointer + // Pre-allocate array big enough to fit entries for most hash tables. + bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket) + tablep := atomic.LoadPointer(&m.table) + table := *(*mapOfTable[K, V])(tablep) + for i := range table.buckets { + rootb := &table.buckets[i] + b := rootb + // Prevent concurrent modifications and copy all entries into + // the intermediate slice. + rootb.mu.Lock() + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] != nil { + bentries = append(bentries, b.entries[i]) + } + } + if b.next == nil { + rootb.mu.Unlock() + break + } + b = (*bucketOfPadded)(b.next) + } + // Call the function for all copied entries. + for j := range bentries { + entry := (*entryOf[K, V])(bentries[j]) + if !f(entry.key, entry.value) { + return + } + // Remove the reference to avoid preventing the copied + // entries from being GCed until this method finishes. + bentries[j] = zeroPtr + } + bentries = bentries[:0] + } +} + +// Clear deletes all keys and values currently stored in the map. +func (m *MapOf[K, V]) Clear() { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + m.resize(table, mapClearHint) +} + +// Size returns current size of the map. +func (m *MapOf[K, V]) Size() int { + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + return int(table.sumSize()) +} + +func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) { + for { + for i := 0; i < entriesPerMapOfBucket; i++ { + if b.entries[i] == nil { + b.meta = setByte(b.meta, h2, i) + b.entries[i] = entryPtr + return + } + } + if b.next == nil { + newb := new(bucketOfPadded) + newb.meta = setByte(defaultMeta, h2, 0) + newb.entries[0] = entryPtr + b.next = unsafe.Pointer(newb) + return + } + b = (*bucketOfPadded)(b.next) + } +} + +func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + atomic.AddInt64(&table.size[cidx].c, int64(delta)) +} + +func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) { + cidx := uint64(len(table.size)-1) & bucketIdx + table.size[cidx].c += int64(delta) +} + +func (table *mapOfTable[K, V]) sumSize() int64 { + sum := int64(0) + for i := range table.size { + sum += atomic.LoadInt64(&table.size[i].c) + } + return sum +} + +func h1(h uint64) uint64 { + return h >> 7 +} + +func h2(h uint64) uint8 { + return uint8(h & 0x7f) +} + +// Stats returns statistics for the MapOf. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *MapOf[K, V]) Stats() MapStats { + stats := MapStats{ + TotalGrowths: atomic.LoadInt64(&m.totalGrowths), + TotalShrinks: atomic.LoadInt64(&m.totalShrinks), + MinEntries: math.MaxInt32, + } + table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) + stats.RootBuckets = len(table.buckets) + stats.Counter = int(table.sumSize()) + stats.CounterLen = len(table.size) + for i := range table.buckets { + nentries := 0 + b := &table.buckets[i] + stats.TotalBuckets++ + for { + nentriesLocal := 0 + stats.Capacity += entriesPerMapOfBucket + for i := 0; i < entriesPerMapOfBucket; i++ { + if atomic.LoadPointer(&b.entries[i]) != nil { + stats.Size++ + nentriesLocal++ + } + } + nentries += nentriesLocal + if nentriesLocal == 0 { + stats.EmptyBuckets++ + } + if b.next == nil { + break + } + b = (*bucketOfPadded)(atomic.LoadPointer(&b.next)) + stats.TotalBuckets++ + } + if nentries < stats.MinEntries { + stats.MinEntries = nentries + } + if nentries > stats.MaxEntries { + stats.MaxEntries = nentries + } + } + return stats +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go new file mode 100644 index 0000000000..c5fd262379 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go @@ -0,0 +1,125 @@ +package xsync + +import ( + "runtime" + "sync/atomic" + "unsafe" +) + +// A MPMCQueue is a bounded multi-producer multi-consumer concurrent +// queue. +// +// MPMCQueue instances must be created with NewMPMCQueue function. +// A MPMCQueue must not be copied after first use. +// +// Based on the data structure from the following C++ library: +// https://github.com/rigtorp/MPMCQueue +type MPMCQueue struct { + cap uint64 + head uint64 + //lint:ignore U1000 prevents false sharing + hpad [cacheLineSize - 8]byte + tail uint64 + //lint:ignore U1000 prevents false sharing + tpad [cacheLineSize - 8]byte + slots []slotPadded +} + +type slotPadded struct { + slot + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - unsafe.Sizeof(slot{})]byte +} + +type slot struct { + turn uint64 + item interface{} +} + +// NewMPMCQueue creates a new MPMCQueue instance with the given +// capacity. +func NewMPMCQueue(capacity int) *MPMCQueue { + if capacity < 1 { + panic("capacity must be positive number") + } + return &MPMCQueue{ + cap: uint64(capacity), + slots: make([]slotPadded, capacity), + } +} + +// Enqueue inserts the given item into the queue. +// Blocks, if the queue is full. +// +// Deprecated: use TryEnqueue in combination with runtime.Gosched(). +func (q *MPMCQueue) Enqueue(item interface{}) { + head := atomic.AddUint64(&q.head, 1) - 1 + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + for atomic.LoadUint64(&slot.turn) != turn { + runtime.Gosched() + } + slot.item = item + atomic.StoreUint64(&slot.turn, turn+1) +} + +// Dequeue retrieves and removes the item from the head of the queue. +// Blocks, if the queue is empty. +// +// Deprecated: use TryDequeue in combination with runtime.Gosched(). +func (q *MPMCQueue) Dequeue() interface{} { + tail := atomic.AddUint64(&q.tail, 1) - 1 + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + for atomic.LoadUint64(&slot.turn) != turn { + runtime.Gosched() + } + item := slot.item + slot.item = nil + atomic.StoreUint64(&slot.turn, turn+1) + return item +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *MPMCQueue) TryEnqueue(item interface{}) bool { + head := atomic.LoadUint64(&q.head) + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + if atomic.LoadUint64(&slot.turn) == turn { + if atomic.CompareAndSwapUint64(&q.head, head, head+1) { + slot.item = item + atomic.StoreUint64(&slot.turn, turn+1) + return true + } + } + return false +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) { + tail := atomic.LoadUint64(&q.tail) + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + if atomic.LoadUint64(&slot.turn) == turn { + if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { + item = slot.item + ok = true + slot.item = nil + atomic.StoreUint64(&slot.turn, turn+1) + return + } + } + return +} + +func (q *MPMCQueue) idx(i uint64) uint64 { + return i % q.cap +} + +func (q *MPMCQueue) turn(i uint64) uint64 { + return i / q.cap +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go new file mode 100644 index 0000000000..3f7e4ccc11 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go @@ -0,0 +1,138 @@ +//go:build go1.19 +// +build go1.19 + +package xsync + +import ( + "runtime" + "sync/atomic" + "unsafe" +) + +// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent +// queue. It's a generic version of MPMCQueue. +// +// MPMCQueueOf instances must be created with NewMPMCQueueOf function. +// A MPMCQueueOf must not be copied after first use. +// +// Based on the data structure from the following C++ library: +// https://github.com/rigtorp/MPMCQueue +type MPMCQueueOf[I any] struct { + cap uint64 + head uint64 + //lint:ignore U1000 prevents false sharing + hpad [cacheLineSize - 8]byte + tail uint64 + //lint:ignore U1000 prevents false sharing + tpad [cacheLineSize - 8]byte + slots []slotOfPadded[I] +} + +type slotOfPadded[I any] struct { + slotOf[I] + // Unfortunately, proper padding like the below one: + // + // pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte + // + // won't compile, so here we add a best-effort padding for items up to + // 56 bytes size. + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte +} + +type slotOf[I any] struct { + // atomic.Uint64 is used here to get proper 8 byte alignment on + // 32-bit archs. + turn atomic.Uint64 + item I +} + +// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given +// capacity. +func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] { + if capacity < 1 { + panic("capacity must be positive number") + } + return &MPMCQueueOf[I]{ + cap: uint64(capacity), + slots: make([]slotOfPadded[I], capacity), + } +} + +// Enqueue inserts the given item into the queue. +// Blocks, if the queue is full. +// +// Deprecated: use TryEnqueue in combination with runtime.Gosched(). +func (q *MPMCQueueOf[I]) Enqueue(item I) { + head := atomic.AddUint64(&q.head, 1) - 1 + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + for slot.turn.Load() != turn { + runtime.Gosched() + } + slot.item = item + slot.turn.Store(turn + 1) +} + +// Dequeue retrieves and removes the item from the head of the queue. +// Blocks, if the queue is empty. +// +// Deprecated: use TryDequeue in combination with runtime.Gosched(). +func (q *MPMCQueueOf[I]) Dequeue() I { + var zeroI I + tail := atomic.AddUint64(&q.tail, 1) - 1 + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + for slot.turn.Load() != turn { + runtime.Gosched() + } + item := slot.item + slot.item = zeroI + slot.turn.Store(turn + 1) + return item +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool { + head := atomic.LoadUint64(&q.head) + slot := &q.slots[q.idx(head)] + turn := q.turn(head) * 2 + if slot.turn.Load() == turn { + if atomic.CompareAndSwapUint64(&q.head, head, head+1) { + slot.item = item + slot.turn.Store(turn + 1) + return true + } + } + return false +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) { + tail := atomic.LoadUint64(&q.tail) + slot := &q.slots[q.idx(tail)] + turn := q.turn(tail)*2 + 1 + if slot.turn.Load() == turn { + if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) { + var zeroI I + item = slot.item + ok = true + slot.item = zeroI + slot.turn.Store(turn + 1) + return + } + } + return +} + +func (q *MPMCQueueOf[I]) idx(i uint64) uint64 { + return i % q.cap +} + +func (q *MPMCQueueOf[I]) turn(i uint64) uint64 { + return i / q.cap +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go new file mode 100644 index 0000000000..4cbd9c41d9 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go @@ -0,0 +1,188 @@ +package xsync + +import ( + "runtime" + "sync" + "sync/atomic" + "time" +) + +// slow-down guard +const nslowdown = 7 + +// pool for reader tokens +var rtokenPool sync.Pool + +// RToken is a reader lock token. +type RToken struct { + slot uint32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// A RBMutex is a reader biased reader/writer mutual exclusion lock. +// The lock can be held by an many readers or a single writer. +// The zero value for a RBMutex is an unlocked mutex. +// +// A RBMutex must not be copied after first use. +// +// RBMutex is based on a modified version of BRAVO +// (Biased Locking for Reader-Writer Locks) algorithm: +// https://arxiv.org/pdf/1810.01553.pdf +// +// RBMutex is a specialized mutex for scenarios, such as caches, +// where the vast majority of locks are acquired by readers and write +// lock acquire attempts are infrequent. In such scenarios, RBMutex +// performs better than sync.RWMutex on large multicore machines. +// +// RBMutex extends sync.RWMutex internally and uses it as the "reader +// bias disabled" fallback, so the same semantics apply. The only +// noticeable difference is in reader tokens returned from the +// RLock/RUnlock methods. +type RBMutex struct { + rslots []rslot + rmask uint32 + rbias int32 + inhibitUntil time.Time + rw sync.RWMutex +} + +type rslot struct { + mu int32 + //lint:ignore U1000 prevents false sharing + pad [cacheLineSize - 4]byte +} + +// NewRBMutex creates a new RBMutex instance. +func NewRBMutex() *RBMutex { + nslots := nextPowOf2(parallelism()) + mu := RBMutex{ + rslots: make([]rslot, nslots), + rmask: nslots - 1, + rbias: 1, + } + return &mu +} + +// TryRLock tries to lock m for reading without blocking. +// When TryRLock succeeds, it returns true and a reader token. +// In case of a failure, a false is returned. +func (mu *RBMutex) TryRLock() (bool, *RToken) { + if t := mu.fastRlock(); t != nil { + return true, t + } + // Optimistic slow path. + if mu.rw.TryRLock() { + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return true, nil + } + return false, nil +} + +// RLock locks m for reading and returns a reader token. The +// token must be used in the later RUnlock call. +// +// Should not be used for recursive read locking; a blocked Lock +// call excludes new readers from acquiring the lock. +func (mu *RBMutex) RLock() *RToken { + if t := mu.fastRlock(); t != nil { + return t + } + // Slow path. + mu.rw.RLock() + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return nil +} + +func (mu *RBMutex) fastRlock() *RToken { + if atomic.LoadInt32(&mu.rbias) == 1 { + t, ok := rtokenPool.Get().(*RToken) + if !ok { + t = new(RToken) + t.slot = runtime_fastrand() + } + // Try all available slots to distribute reader threads to slots. + for i := 0; i < len(mu.rslots); i++ { + slot := t.slot + uint32(i) + rslot := &mu.rslots[slot&mu.rmask] + rslotmu := atomic.LoadInt32(&rslot.mu) + if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) { + if atomic.LoadInt32(&mu.rbias) == 1 { + // Hot path succeeded. + t.slot = slot + return t + } + // The mutex is no longer reader biased. Roll back. + atomic.AddInt32(&rslot.mu, -1) + rtokenPool.Put(t) + return nil + } + // Contention detected. Give a try with the next slot. + } + } + return nil +} + +// RUnlock undoes a single RLock call. A reader token obtained from +// the RLock call must be provided. RUnlock does not affect other +// simultaneous readers. A panic is raised if m is not locked for +// reading on entry to RUnlock. +func (mu *RBMutex) RUnlock(t *RToken) { + if t == nil { + mu.rw.RUnlock() + return + } + if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 { + panic("invalid reader state detected") + } + rtokenPool.Put(t) +} + +// TryLock tries to lock m for writing without blocking. +func (mu *RBMutex) TryLock() bool { + if mu.rw.TryLock() { + if atomic.LoadInt32(&mu.rbias) == 1 { + atomic.StoreInt32(&mu.rbias, 0) + for i := 0; i < len(mu.rslots); i++ { + if atomic.LoadInt32(&mu.rslots[i].mu) > 0 { + // There is a reader. Roll back. + atomic.StoreInt32(&mu.rbias, 1) + mu.rw.Unlock() + return false + } + } + } + return true + } + return false +} + +// Lock locks m for writing. If the lock is already locked for +// reading or writing, Lock blocks until the lock is available. +func (mu *RBMutex) Lock() { + mu.rw.Lock() + if atomic.LoadInt32(&mu.rbias) == 1 { + atomic.StoreInt32(&mu.rbias, 0) + start := time.Now() + for i := 0; i < len(mu.rslots); i++ { + for atomic.LoadInt32(&mu.rslots[i].mu) > 0 { + runtime.Gosched() + } + } + mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown) + } +} + +// Unlock unlocks m for writing. A panic is raised if m is not locked +// for writing on entry to Unlock. +// +// As with RWMutex, a locked RBMutex is not associated with a +// particular goroutine. One goroutine may RLock (Lock) a RBMutex and +// then arrange for another goroutine to RUnlock (Unlock) it. +func (mu *RBMutex) Unlock() { + mu.rw.Unlock() +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go new file mode 100644 index 0000000000..6e4f84bc0c --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go @@ -0,0 +1,92 @@ +package xsync + +import ( + "sync/atomic" +) + +// A SPSCQueue is a bounded single-producer single-consumer concurrent +// queue. This means that not more than a single goroutine must be +// publishing items to the queue while not more than a single goroutine +// must be consuming those items. +// +// SPSCQueue instances must be created with NewSPSCQueue function. +// A SPSCQueue must not be copied after first use. +// +// Based on the data structure from the following article: +// https://rigtorp.se/ringbuffer/ +type SPSCQueue struct { + cap uint64 + pidx uint64 + //lint:ignore U1000 prevents false sharing + pad0 [cacheLineSize - 8]byte + pcachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad1 [cacheLineSize - 8]byte + cidx uint64 + //lint:ignore U1000 prevents false sharing + pad2 [cacheLineSize - 8]byte + ccachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad3 [cacheLineSize - 8]byte + items []interface{} +} + +// NewSPSCQueue creates a new SPSCQueue instance with the given +// capacity. +func NewSPSCQueue(capacity int) *SPSCQueue { + if capacity < 1 { + panic("capacity must be positive number") + } + return &SPSCQueue{ + cap: uint64(capacity + 1), + items: make([]interface{}, capacity+1), + } +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *SPSCQueue) TryEnqueue(item interface{}) bool { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.pidx) + nextIdx := idx + 1 + if nextIdx == q.cap { + nextIdx = 0 + } + cachedIdx := q.ccachedIdx + if nextIdx == cachedIdx { + cachedIdx = atomic.LoadUint64(&q.cidx) + q.ccachedIdx = cachedIdx + if nextIdx == cachedIdx { + return false + } + } + q.items[idx] = item + atomic.StoreUint64(&q.pidx, nextIdx) + return true +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.cidx) + cachedIdx := q.pcachedIdx + if idx == cachedIdx { + cachedIdx = atomic.LoadUint64(&q.pidx) + q.pcachedIdx = cachedIdx + if idx == cachedIdx { + return + } + } + item = q.items[idx] + q.items[idx] = nil + ok = true + nextIdx := idx + 1 + if nextIdx == q.cap { + nextIdx = 0 + } + atomic.StoreUint64(&q.cidx, nextIdx) + return +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go new file mode 100644 index 0000000000..3ae132e503 --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go @@ -0,0 +1,96 @@ +//go:build go1.19 +// +build go1.19 + +package xsync + +import ( + "sync/atomic" +) + +// A SPSCQueueOf is a bounded single-producer single-consumer concurrent +// queue. This means that not more than a single goroutine must be +// publishing items to the queue while not more than a single goroutine +// must be consuming those items. +// +// SPSCQueueOf instances must be created with NewSPSCQueueOf function. +// A SPSCQueueOf must not be copied after first use. +// +// Based on the data structure from the following article: +// https://rigtorp.se/ringbuffer/ +type SPSCQueueOf[I any] struct { + cap uint64 + pidx uint64 + //lint:ignore U1000 prevents false sharing + pad0 [cacheLineSize - 8]byte + pcachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad1 [cacheLineSize - 8]byte + cidx uint64 + //lint:ignore U1000 prevents false sharing + pad2 [cacheLineSize - 8]byte + ccachedIdx uint64 + //lint:ignore U1000 prevents false sharing + pad3 [cacheLineSize - 8]byte + items []I +} + +// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given +// capacity. +func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] { + if capacity < 1 { + panic("capacity must be positive number") + } + return &SPSCQueueOf[I]{ + cap: uint64(capacity + 1), + items: make([]I, capacity+1), + } +} + +// TryEnqueue inserts the given item into the queue. Does not block +// and returns immediately. The result indicates that the queue isn't +// full and the item was inserted. +func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.pidx) + next_idx := idx + 1 + if next_idx == q.cap { + next_idx = 0 + } + cached_idx := q.ccachedIdx + if next_idx == cached_idx { + cached_idx = atomic.LoadUint64(&q.cidx) + q.ccachedIdx = cached_idx + if next_idx == cached_idx { + return false + } + } + q.items[idx] = item + atomic.StoreUint64(&q.pidx, next_idx) + return true +} + +// TryDequeue retrieves and removes the item from the head of the +// queue. Does not block and returns immediately. The ok result +// indicates that the queue isn't empty and an item was retrieved. +func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) { + // relaxed memory order would be enough here + idx := atomic.LoadUint64(&q.cidx) + cached_idx := q.pcachedIdx + if idx == cached_idx { + cached_idx = atomic.LoadUint64(&q.pidx) + q.pcachedIdx = cached_idx + if idx == cached_idx { + return + } + } + var zeroI I + item = q.items[idx] + q.items[idx] = zeroI + ok = true + next_idx := idx + 1 + if next_idx == q.cap { + next_idx = 0 + } + atomic.StoreUint64(&q.cidx, next_idx) + return +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util.go b/vendor/github.com/puzpuzpuz/xsync/v3/util.go new file mode 100644 index 0000000000..769270895d --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util.go @@ -0,0 +1,66 @@ +package xsync + +import ( + "math/bits" + "runtime" + _ "unsafe" +) + +// test-only assert()-like flag +var assertionsEnabled = false + +const ( + // cacheLineSize is used in paddings to prevent false sharing; + // 64B are used instead of 128B as a compromise between + // memory footprint and performance; 128B usage may give ~30% + // improvement on NUMA machines. + cacheLineSize = 64 +) + +// nextPowOf2 computes the next highest power of 2 of 32-bit v. +// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 +func nextPowOf2(v uint32) uint32 { + if v == 0 { + return 1 + } + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + return v +} + +func parallelism() uint32 { + maxProcs := uint32(runtime.GOMAXPROCS(0)) + numCores := uint32(runtime.NumCPU()) + if maxProcs < numCores { + return maxProcs + } + return numCores +} + +//go:noescape +//go:linkname runtime_fastrand runtime.fastrand +func runtime_fastrand() uint32 + +func broadcast(b uint8) uint64 { + return 0x101010101010101 * uint64(b) +} + +func firstMarkedByteIndex(w uint64) int { + return bits.TrailingZeros64(w) >> 3 +} + +// SWAR byte search: may produce false positives, e.g. for 0x0100, +// so make sure to double-check bytes found by this function. +func markZeroBytes(w uint64) uint64 { + return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080) +} + +func setByte(w uint64, b uint8, idx int) uint64 { + shift := idx << 3 + return (w &^ (0xff << shift)) | (uint64(b) << shift) +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go new file mode 100644 index 0000000000..9aa65972df --- /dev/null +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go @@ -0,0 +1,77 @@ +package xsync + +import ( + "reflect" + "unsafe" +) + +// makeSeed creates a random seed. +func makeSeed() uint64 { + var s1 uint32 + for { + s1 = runtime_fastrand() + // We use seed 0 to indicate an uninitialized seed/hash, + // so keep trying until we get a non-zero seed. + if s1 != 0 { + break + } + } + s2 := runtime_fastrand() + return uint64(s1)<<32 | uint64(s2) +} + +// hashString calculates a hash of s with the given seed. +func hashString(s string, seed uint64) uint64 { + if s == "" { + return seed + } + strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len))) +} + +//go:noescape +//go:linkname runtime_memhash runtime.memhash +func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr + +// defaultHasher creates a fast hash function for the given comparable type. +// The only limitation is that the type should not contain interfaces inside +// based on runtime.typehash. +func defaultHasher[T comparable]() func(T, uint64) uint64 { + var zero T + + if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface { + return func(value T, seed uint64) uint64 { + iValue := any(value) + i := (*iface)(unsafe.Pointer(&iValue)) + return runtime_typehash64(i.typ, i.word, seed) + } + } else { + var iZero any = zero + i := (*iface)(unsafe.Pointer(&iZero)) + return func(value T, seed uint64) uint64 { + return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed) + } + } +} + +// how interface is represented in memory +type iface struct { + typ uintptr + word unsafe.Pointer +} + +// same as runtime_typehash, but always returns a uint64 +// see: maphash.rthash function for details +func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 { + if unsafe.Sizeof(uintptr(0)) == 8 { + return uint64(runtime_typehash(t, p, uintptr(seed))) + } + + lo := runtime_typehash(t, p, uintptr(seed)) + hi := runtime_typehash(t, p, uintptr(seed>>32)) + return uint64(hi)<<32 | uint64(lo) +} + +//go:noescape +//go:linkname runtime_typehash runtime.typehash +func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go index 0a0c160fc5..5a32c5041d 100644 --- a/vendor/go.opentelemetry.io/collector/component/component.go +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -78,31 +78,20 @@ func (f ShutdownFunc) Shutdown(ctx context.Context) error { } // Kind represents component kinds. -type Kind int +type Kind struct { + name string +} -const ( - _ Kind = iota // skip 0, start types from 1. - KindReceiver - KindProcessor - KindExporter - KindExtension - KindConnector +var ( + KindReceiver = Kind{name: "Receiver"} + KindProcessor = Kind{name: "Processor"} + KindExporter = Kind{name: "Exporter"} + KindExtension = Kind{name: "Extension"} + KindConnector = Kind{name: "Connector"} ) func (k Kind) String() string { - switch k { - case KindReceiver: - return "Receiver" - case KindProcessor: - return "Processor" - case KindExporter: - return "Exporter" - case KindExtension: - return "Extension" - case KindConnector: - return "Connector" - } - return "" + return k.name } // StabilityLevel represents the stability level of the component created by the factory. diff --git a/vendor/go.opentelemetry.io/collector/component/config.go b/vendor/go.opentelemetry.io/collector/component/config.go index 599b9be323..ca33da36a6 100644 --- a/vendor/go.opentelemetry.io/collector/component/config.go +++ b/vendor/go.opentelemetry.io/collector/component/config.go @@ -3,93 +3,11 @@ package component // import "go.opentelemetry.io/collector/component" -import ( - "reflect" - - "go.uber.org/multierr" -) - // Config defines the configuration for a component.Component. // // Implementations and/or any sub-configs (other types embedded or included in the Config implementation) -// MUST implement the ConfigValidator if any validation is required for that part of the configuration +// MUST implement xconfmap.Validator if any validation is required for that part of the configuration // (e.g. check if a required field is present). // // A valid implementation MUST pass the check componenttest.CheckConfigStruct (return nil error). type Config any - -// As interface types are only used for static typing, a common idiom to find the reflection Type -// for an interface type Foo is to use a *Foo value. -var configValidatorType = reflect.TypeOf((*ConfigValidator)(nil)).Elem() - -// ConfigValidator defines an optional interface for configurations to implement to do validation. -type ConfigValidator interface { - // Validate the configuration and returns an error if invalid. - Validate() error -} - -// ValidateConfig validates a config, by doing this: -// - Call Validate on the config itself if the config implements ConfigValidator. -func ValidateConfig(cfg Config) error { - return validate(reflect.ValueOf(cfg)) -} - -func validate(v reflect.Value) error { - // Validate the value itself. - switch v.Kind() { - case reflect.Invalid: - return nil - case reflect.Ptr: - return validate(v.Elem()) - case reflect.Struct: - var errs error - errs = multierr.Append(errs, callValidateIfPossible(v)) - // Reflect on the pointed data and check each of its fields. - for i := 0; i < v.NumField(); i++ { - if !v.Type().Field(i).IsExported() { - continue - } - errs = multierr.Append(errs, validate(v.Field(i))) - } - return errs - case reflect.Slice, reflect.Array: - var errs error - errs = multierr.Append(errs, callValidateIfPossible(v)) - // Reflect on the pointed data and check each of its fields. - for i := 0; i < v.Len(); i++ { - errs = multierr.Append(errs, validate(v.Index(i))) - } - return errs - case reflect.Map: - var errs error - errs = multierr.Append(errs, callValidateIfPossible(v)) - iter := v.MapRange() - for iter.Next() { - errs = multierr.Append(errs, validate(iter.Key())) - errs = multierr.Append(errs, validate(iter.Value())) - } - return errs - default: - return callValidateIfPossible(v) - } -} - -func callValidateIfPossible(v reflect.Value) error { - // If the value type implements ConfigValidator just call Validate - if v.Type().Implements(configValidatorType) { - return v.Interface().(ConfigValidator).Validate() - } - - // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. - if reflect.PointerTo(v.Type()).Implements(configValidatorType) { - // If not addressable, then create a new *V pointer and set the value to current v. - if !v.CanAddr() { - pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) - pv.Elem().Set(v) - v = pv.Elem() - } - return v.Addr().Interface().(ConfigValidator).Validate() - } - - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/component/identifiable.go b/vendor/go.opentelemetry.io/collector/component/identifiable.go index 63b890b47a..6b81476816 100644 --- a/vendor/go.opentelemetry.io/collector/component/identifiable.go +++ b/vendor/go.opentelemetry.io/collector/component/identifiable.go @@ -123,23 +123,19 @@ func (id ID) MarshalText() (text []byte, err error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. func (id *ID) UnmarshalText(text []byte) error { idStr := string(text) - items := strings.SplitN(idStr, typeAndNameSeparator, 2) - var typeStr, nameStr string - if len(items) >= 1 { - typeStr = strings.TrimSpace(items[0]) - } - - if len(items) == 1 && typeStr == "" { - return errors.New("id must not be empty") - } + typeStr, nameStr, hasName := strings.Cut(idStr, typeAndNameSeparator) + typeStr = strings.TrimSpace(typeStr) if typeStr == "" { - return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + if hasName { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + return errors.New("id must not be empty") } - if len(items) > 1 { + if hasName { // "name" part is present. - nameStr = strings.TrimSpace(items[1]) + nameStr = strings.TrimSpace(nameStr) if nameStr == "" { return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) } diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go index 359562e5f9..461dead4b3 100644 --- a/vendor/go.opentelemetry.io/collector/component/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -4,31 +4,8 @@ package component // import "go.opentelemetry.io/collector/component" import ( - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/internal/telemetry" ) // TelemetrySettings provides components with APIs to report telemetry. -type TelemetrySettings struct { - // Logger that the factory can use during creation and can pass to the created - // component to be used later as well. - Logger *zap.Logger - - // TracerProvider that the factory can pass to other instrumented third-party libraries. - TracerProvider trace.TracerProvider - - // MeterProvider that the factory can pass to other instrumented third-party libraries. - MeterProvider metric.MeterProvider - - // MetricsLevel represents the configuration value set when the collector - // is configured. Components may use this level to decide whether it is - // appropriate to avoid computationally expensive calculations. - MetricsLevel configtelemetry.Level - - // Resource contains the resource attributes for the collector's telemetry. - Resource pcommon.Resource -} +type TelemetrySettings = telemetry.TelemetrySettings diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go deleted file mode 100644 index b8c0967c90..0000000000 --- a/vendor/go.opentelemetry.io/collector/config/configtelemetry/configtelemetry.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" - -import ( - "errors" - "fmt" - "strings" -) - -const ( - // LevelNone indicates that no telemetry data should be collected. - LevelNone Level = iota - 1 - // LevelBasic is the recommended and covers the basics of the service telemetry. - LevelBasic - // LevelNormal adds some other indicators on top of basic. - LevelNormal - // LevelDetailed adds dimensions and views to the previous levels. - LevelDetailed - - levelNoneStr = "None" - levelBasicStr = "Basic" - levelNormalStr = "Normal" - levelDetailedStr = "Detailed" -) - -// Level is the level of internal telemetry (metrics, logs, traces about the component itself) -// that every component should generate. -type Level int32 - -func (l Level) String() string { - switch l { - case LevelNone: - return levelNoneStr - case LevelBasic: - return levelBasicStr - case LevelNormal: - return levelNormalStr - case LevelDetailed: - return levelDetailedStr - } - return "" -} - -// MarshalText marshals Level to text. -func (l Level) MarshalText() (text []byte, err error) { - return []byte(l.String()), nil -} - -// UnmarshalText unmarshalls text to a Level. -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errors.New("cannot unmarshal to a nil *Level") - } - - str := strings.ToLower(string(text)) - switch str { - case strings.ToLower(levelNoneStr): - *l = LevelNone - return nil - case strings.ToLower(levelBasicStr): - *l = LevelBasic - return nil - case strings.ToLower(levelNormalStr): - *l = LevelNormal - return nil - case strings.ToLower(levelDetailedStr): - *l = LevelDetailed - return nil - } - return fmt.Errorf("unknown metrics level %q", str) -} diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go deleted file mode 100644 index 646aeb2d7c..0000000000 --- a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package configtelemetry defines various telemetry level for configuration. -// It enables every component to have access to telemetry level -// to enable metrics only when necessary. -// -// This document provides guidance on which telemetry level to adopt for Collector metrics. -// When adopting a telemetry level, component authors are expected to rely on this guidance to -// justify their choice of telemetry level. -// -// 1. configtelemetry.None -// -// No telemetry data is recorded. -// -// 2. configtelemetry.Basic -// -// Telemetry associated with this level provides essential coverage of the collector telemetry. -// It should only be used for internal collector telemetry generated by the collector core API. Components outside of -// the core API MUST NOT record additional telemetry at this level. -// -// 3. configtelemetry.Normal -// -// Telemetry associated with this level provides complete coverage of the collector telemetry. -// It should be the default for component authors. -// -// Component authors using this telemetry level can use this guidance: -// -// - The signals associated with this level must control cardinality. -// It is acceptable at this level for cardinality to scale linearly with the monitored resources. -// -// - The signals associated with this level must represent a controlled data volume. Examples follow: -// -// a. A max cardinality (total possible combinations of dimension values) for a given metric of at most 100. -// -// b. At most 5 spans actively recording simultaneously per active request. -// -// This is the default level recommended when running the Collector. -// -// 4. configtelemetry.Detailed -// -// Telemetry associated with this level provides complete coverage of the collector telemetry. -// -// The signals associated with this level may exhibit high cardinality and/or high dimensionality. -// -// There is no limit on data volume. -package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/collector/config/configtelemetry/LICENSE rename to vendor/go.opentelemetry.io/collector/confmap/LICENSE diff --git a/vendor/go.opentelemetry.io/collector/confmap/Makefile b/vendor/go.opentelemetry.io/collector/confmap/Makefile new file mode 100644 index 0000000000..39734bfaeb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/README.md b/vendor/go.opentelemetry.io/collector/confmap/README.md new file mode 100644 index 0000000000..ca11a97295 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/README.md @@ -0,0 +1,284 @@ +# Confmap + + +| Status | | +| ------------- |-----------| +| Stability | [stable]: logs, metrics, traces | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fconfmap%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fconfmap) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fconfmap%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fconfmap) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mx-psi](https://www.github.com/mx-psi), [@evan-bradley](https://www.github.com/evan-bradley) | + +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable + + +# High Level Design + +## Conf + +The [Conf](confmap.go) represents the raw configuration for a service (e.g. OpenTelemetry Collector). + +## Provider + +The [Provider](provider.go) provides configuration, and allows to watch/monitor for changes. Any `Provider` +has a `` associated with it, and will provide configs for `configURI` that follow the ":" format. +This format is compatible with the URI definition (see [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986)). +The `` MUST be always included in the `configURI`. The scheme for any `Provider` MUST be at least 2 +characters long to avoid conflicting with a driver-letter identifier as specified in +[file URI syntax](https://datatracker.ietf.org/doc/html/rfc8089#section-2). + +## Converter + +The [Converter](converter.go) allows implementing conversion logic for the provided configuration. One of the most +common use-case is to migrate/transform the configuration after a backwards incompatible change. + +## Resolver + +The `Resolver` handles the use of multiple [Providers](#provider) and [Converters](#converter) +simplifying configuration parsing, monitoring for updates, and the overall life-cycle of the used config providers. +The `Resolver` provides two main functionalities: [Configuration Resolving](#configuration-resolving) and +[Watching for Updates](#watching-for-updates). + +### Configuration Resolving + +The `Resolver` receives as input a set of `Providers`, a list of `Converters`, and a list of configuration identifier +`configURI` that will be used to generate the resulting, or effective, configuration in the form of a `Conf`, +that can be used by code that is oblivious to the usage of `Providers` and `Converters`. + +`Providers` are used to provide an entire configuration when the `configURI` is given directly to the `Resolver`, +or an individual value (partial configuration) when the `configURI` is embedded into the `Conf` as a values using +the syntax `${configURI}`. + +**Limitation:** +- When embedding a `${configURI}` the uri cannot contain dollar sign ("$") character unless it embeds another uri. +- The number of URIs is limited to 100. + +```terminal + Resolver Provider + Resolve │ │ +────────────────►│ │ + │ │ + ┌─ │ Retrieve │ + │ ├─────────────────────────►│ + │ │ Conf │ + │ │◄─────────────────────────┤ + foreach │ │ │ + configURI │ ├───┐ │ + │ │ │Merge │ + │ │◄──┘ │ + └─ │ │ + ┌─ │ Retrieve │ + │ ├─────────────────────────►│ + │ │ Partial Conf Value │ + │ │◄─────────────────────────┤ + foreach │ │ │ + embedded │ │ │ + configURI │ ├───┐ │ + │ │ │Replace │ + │ │◄──┘ │ + └─ │ │ + │ Converter │ + ┌─ │ Convert │ │ + │ ├───────────────►│ │ + foreach │ │ │ │ + Converter │ │◄───────────────┤ │ + └─ │ │ + │ │ +◄────────────────┤ │ +``` + +The `Resolve` method proceeds in the following steps: + +1. Start with an empty "result" of `Conf` type. +2. For each config URI retrieves individual configurations, and merges it into the "result". +3. For each embedded config URI retrieves individual value, and replaces it into the "result". +4. For each "Converter", call "Convert" for the "result". +5. Return the "result", aka effective, configuration. + +#### (Experimental) Append merging strategy for lists + +You can opt-in to experimentally combine slices instead of discarding the existing ones by enabling the `confmap.enableMergeAppendOption` feature flag. Lists are appended in the order in which they appear in their configuration sources. +This will **not** become the default in the future, we are still deciding how this should be configured and want your feedback on [this issue](https://github.com/open-telemetry/opentelemetry-collector/issues/8754). + +##### Example +Consider the following configs, + +```yaml +# main.yaml +receivers: + otlp/in: +processors: + attributes/example: + actions: + - key: key + value: "value" + action: upsert + +exporters: + otlp/out: +extensions: + file_storage: + +service: + pipelines: + traces: + receivers: [ otlp/in ] + processors: [ attributes/example ] + exporters: [ otlp/out ] + extensions: [ file_storage ] +``` + + +```yaml +# extra_extension.yaml +processors: + batch: +extensions: + healthcheckv2: + +service: + extensions: [ healthcheckv2 ] + pipelines: + traces: + processors: [ batch ] +``` + +If you run the Collector with following command, +``` +otelcol --config=main.yaml --config=extra_extension.yaml --feature-gates=confmap.enableMergeAppendOption +``` +then the final configuration after config resolution will look like following: + +```yaml +# main.yaml +receivers: + otlp/in: +processors: + attributes/example: + actions: + - key: key + value: "value" + action: upsert + batch: +exporters: + otlp/out: +extensions: + file_storage: + healthcheckv2: + +service: + pipelines: + traces: + receivers: [ otlp/in ] + processors: [ attributes/example, batch ] + exporters: [ otlp/out ] + extensions: [ file_storage, healthcheckv2 ] +``` + +Notice that the `service::extensions` list is a combination of both configurations. By default, the value of the last configuration source passed, `extra_extension`, would be used, so the extensions list would be: `service::extensions: [healthcheckv2]`. + +> [!NOTE] +> By enabling this feature gate, all the lists in the given configuration will be merged. + +### Watching for Updates +After the configuration was processed, the `Resolver` can be used as a single point to watch for updates in the +configuration retrieved via the `Provider` used to retrieve the “initial” configuration and to generate the “effective” one. + +```terminal + Resolver Provider + │ │ + Watch │ │ +───────────►│ │ + │ │ + . . + . . + . . + │ onChange │ + │◄────────────────────┤ +◄───────────┤ │ + +``` + +The `Resolver` does that by passing an `onChange` func to each `Provider.Retrieve` call and capturing all watch events. + +Calling the `onChange` func from a provider triggers the collector to re-resolve new configuration: + +```terminal + Resolver Provider + │ │ + Watch │ │ +───────────►│ │ + │ │ + . . + . . + . . + │ onChange │ + │◄────────────────────┤ +◄───────────┤ │ + | | + Resolve │ │ +───────────►│ │ + │ │ + │ Retrieve │ + ├────────────────────►│ + │ Conf │ + │◄────────────────────┤ +◄───────────┤ │ +``` + +An example of a `Provider` with an `onChange` func that periodically gets notified can be found in provider_test.go as UpdatingProvider + +## Troubleshooting + +### Null Maps + +Due to how our underlying merge library, [koanf](https://github.com/knadh/koanf), behaves, configuration resolution +will treat configuration such as + +```yaml +processors: +``` + +as null, which is a valid value. As a result if you have configuration `A`: + +```yaml +receivers: + nop: + +processors: + nop: + +exporters: + nop: + +extensions: + nop: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] +``` + +and configuration `B`: + +```yaml +processors: +``` + +and do `./otelcorecol --config A.yaml --config B.yaml` + +The result will be an error: + +``` +Error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +2024/06/10 14:37:14 collector server run finished with error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +``` + +This happens because configuration `B` sets `processors` to null, removing the `nop` processor defined in configuration `A`, +so the `nop` processor referenced in configuration `A`'s pipeline no longer exists. + +This situation can be remedied 2 ways: +1. Use `{}` when you want to represent an empty map, such as `processors: {}` instead of `processors:`. +2. Omit configuration like `processors:` from your configuration. diff --git a/vendor/go.opentelemetry.io/collector/confmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/confmap.go new file mode 100644 index 0000000000..ec6669b219 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/confmap.go @@ -0,0 +1,583 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "slices" + "strings" + + "github.com/go-viper/mapstructure/v2" + "github.com/knadh/koanf/maps" + "github.com/knadh/koanf/providers/confmap" + "github.com/knadh/koanf/v2" + + encoder "go.opentelemetry.io/collector/confmap/internal/mapstructure" +) + +const ( + // KeyDelimiter is used as the default key delimiter in the default koanf instance. + KeyDelimiter = "::" +) + +const ( + // MapstructureTag is the struct field tag used to record marshaling/unmarshaling settings. + // See https://pkg.go.dev/github.com/go-viper/mapstructure/v2 for supported values. + MapstructureTag = "mapstructure" +) + +// New creates a new empty confmap.Conf instance. +func New() *Conf { + return &Conf{k: koanf.New(KeyDelimiter)} +} + +// NewFromStringMap creates a confmap.Conf from a map[string]any. +func NewFromStringMap(data map[string]any) *Conf { + p := New() + // Cannot return error because the koanf instance is empty. + _ = p.k.Load(confmap.Provider(data, KeyDelimiter), nil) + return p +} + +// Conf represents the raw configuration map for the OpenTelemetry Collector. +// The confmap.Conf can be unmarshalled into the Collector's config using the "service" package. +type Conf struct { + k *koanf.Koanf + // If true, upon unmarshaling do not call the Unmarshal function on the struct + // if it implements Unmarshaler and is the top-level struct. + // This avoids running into an infinite recursion where Unmarshaler.Unmarshal and + // Conf.Unmarshal would call each other. + skipTopLevelUnmarshaler bool +} + +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a KeyDelimiter separator. +func (l *Conf) AllKeys() []string { + return l.k.Keys() +} + +type UnmarshalOption interface { + apply(*unmarshalOption) +} + +type unmarshalOption struct { + ignoreUnused bool +} + +// WithIgnoreUnused sets an option to ignore errors if existing +// keys in the original Conf were unused in the decoding process +// (extra keys). +func WithIgnoreUnused() UnmarshalOption { + return unmarshalOptionFunc(func(uo *unmarshalOption) { + uo.ignoreUnused = true + }) +} + +type unmarshalOptionFunc func(*unmarshalOption) + +func (fn unmarshalOptionFunc) apply(set *unmarshalOption) { + fn(set) +} + +// Unmarshal unmarshalls the config into a struct using the given options. +// Tags on the fields of the structure must be properly set. +func (l *Conf) Unmarshal(result any, opts ...UnmarshalOption) error { + set := unmarshalOption{} + for _, opt := range opts { + opt.apply(&set) + } + return decodeConfig(l, result, !set.ignoreUnused, l.skipTopLevelUnmarshaler) +} + +type marshalOption struct{} + +type MarshalOption interface { + apply(*marshalOption) +} + +// Marshal encodes the config and merges it into the Conf. +func (l *Conf) Marshal(rawVal any, _ ...MarshalOption) error { + enc := encoder.New(encoderConfig(rawVal)) + data, err := enc.Encode(rawVal) + if err != nil { + return err + } + out, ok := data.(map[string]any) + if !ok { + return errors.New("invalid config encoding") + } + return l.Merge(NewFromStringMap(out)) +} + +func (l *Conf) unsanitizedGet(key string) any { + return l.k.Get(key) +} + +// sanitize recursively removes expandedValue references from the given data. +// It uses the expandedValue.Value field to replace the expandedValue references. +func sanitize(a any) any { + return sanitizeExpanded(a, false) +} + +// sanitizeToStringMap recursively removes expandedValue references from the given data. +// It uses the expandedValue.Original field to replace the expandedValue references. +func sanitizeToStr(a any) any { + return sanitizeExpanded(a, true) +} + +func sanitizeExpanded(a any, useOriginal bool) any { + switch m := a.(type) { + case map[string]any: + c := maps.Copy(m) + for k, v := range m { + c[k] = sanitizeExpanded(v, useOriginal) + } + return c + case []any: + var newSlice []any + for _, e := range m { + newSlice = append(newSlice, sanitizeExpanded(e, useOriginal)) + } + return newSlice + case expandedValue: + if useOriginal { + return m.Original + } + return m.Value + } + return a +} + +// Get can retrieve any value given the key to use. +func (l *Conf) Get(key string) any { + val := l.unsanitizedGet(key) + return sanitizeExpanded(val, false) +} + +// IsSet checks to see if the key has been set in any of the data locations. +func (l *Conf) IsSet(key string) bool { + return l.k.Exists(key) +} + +// Merge merges the input given configuration into the existing config. +// Note that the given map may be modified. +func (l *Conf) Merge(in *Conf) error { + return l.k.Merge(in.k) +} + +// mergeAppend merges the input given configuration into the existing config. +// Note that the given map may be modified. +// Additionally, mergeAppend performs deduplication when merging lists. +// For example, if listA = [extension1, extension2] and listB = [extension1, extension3], +// the resulting list will be [extension1, extension2, extension3]. +func (l *Conf) mergeAppend(in *Conf) error { + return l.k.Load(confmap.Provider(in.ToStringMap(), ""), nil, koanf.WithMergeFunc(mergeAppend)) +} + +// Sub returns new Conf instance representing a sub-config of this instance. +// It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists. +func (l *Conf) Sub(key string) (*Conf, error) { + // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type. + data := l.unsanitizedGet(key) + if data == nil { + return New(), nil + } + + switch v := data.(type) { + case map[string]any: + return NewFromStringMap(v), nil + case expandedValue: + if m, ok := v.Value.(map[string]any); ok { + return NewFromStringMap(m), nil + } + } + + return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind()) +} + +func (l *Conf) toStringMapWithExpand() map[string]any { + m := maps.Unflatten(l.k.All(), KeyDelimiter) + return m +} + +// ToStringMap creates a map[string]any from a Parser. +func (l *Conf) ToStringMap() map[string]any { + return sanitize(l.toStringMapWithExpand()).(map[string]any) +} + +// decodeConfig decodes the contents of the Conf into the result argument, using a +// mapstructure decoder with the following notable behaviors. Ensures that maps whose +// values are nil pointer structs resolved to the zero value of the target struct (see +// expandNilStructPointers). Converts string to []string by splitting on ','. Ensures +// uniqueness of component IDs (see mapKeyStringToMapKeyTextUnmarshalerHookFunc). +// Decodes time.Duration from strings. Allows custom unmarshaling for structs implementing +// encoding.TextUnmarshaler. Allows custom unmarshaling for structs implementing confmap.Unmarshaler. +func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler bool) error { + dc := &mapstructure.DecoderConfig{ + ErrorUnused: errorUnused, + Result: result, + TagName: MapstructureTag, + WeaklyTypedInput: false, + MatchName: caseSensitiveMatchName, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + useExpandValue(), + expandNilStructPointersHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapKeyStringToMapKeyTextUnmarshalerHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.TextUnmarshallerHookFunc(), + unmarshalerHookFunc(result, skipTopLevelUnmarshaler), + // after the main unmarshaler hook is called, + // we unmarshal the embedded structs if present to merge with the result: + unmarshalerEmbeddedStructsHookFunc(), + zeroSliceHookFunc(), + ), + } + decoder, err := mapstructure.NewDecoder(dc) + if err != nil { + return err + } + if err = decoder.Decode(m.toStringMapWithExpand()); err != nil { + if strings.HasPrefix(err.Error(), "error decoding ''") { + return errors.Unwrap(err) + } + return err + } + return nil +} + +// encoderConfig returns a default encoder.EncoderConfig that includes +// an EncodeHook that handles both TextMarshaller and Marshaler +// interfaces. +func encoderConfig(rawVal any) *encoder.EncoderConfig { + return &encoder.EncoderConfig{ + EncodeHook: mapstructure.ComposeDecodeHookFunc( + encoder.YamlMarshalerHookFunc(), + encoder.TextMarshalerHookFunc(), + marshalerHookFunc(rawVal), + ), + } +} + +// case-sensitive version of the callback to be used in the MatchName property +// of the DecoderConfig. The default for MatchEqual is to use strings.EqualFold, +// which is case-insensitive. +func caseSensitiveMatchName(a, b string) bool { + return a == b +} + +func castTo(exp expandedValue, useOriginal bool) any { + // If the target field is a string, use `exp.Original` or fail if not available. + if useOriginal { + return exp.Original + } + // Otherwise, use the parsed value (previous behavior). + return exp.Value +} + +// Check if a reflect.Type is of the form T, where: +// X is any type or interface +// T = string | map[X]T | []T | [n]T +func isStringyStructure(t reflect.Type) bool { + if t.Kind() == reflect.String { + return true + } + if t.Kind() == reflect.Map { + return isStringyStructure(t.Elem()) + } + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + return isStringyStructure(t.Elem()) + } + return false +} + +// When a value has been loaded from an external source via a provider, we keep both the +// parsed value and the original string value. This allows us to expand the value to its +// original string representation when decoding into a string field, and use the original otherwise. +func useExpandValue() mapstructure.DecodeHookFuncType { + return func( + _ reflect.Type, + to reflect.Type, + data any, + ) (any, error) { + if exp, ok := data.(expandedValue); ok { + v := castTo(exp, to.Kind() == reflect.String) + // See https://github.com/open-telemetry/opentelemetry-collector/issues/10949 + // If the `to.Kind` is not a string, then expandValue's original value is useless and + // the casted-to value will be nil. In that scenario, we need to use the default value of `to`'s kind. + if v == nil { + return reflect.Zero(to).Interface(), nil + } + return v, nil + } + + switch to.Kind() { + case reflect.Array, reflect.Slice, reflect.Map: + if isStringyStructure(to) { + // If the target field is a stringy structure, sanitize to use the original string value everywhere. + return sanitizeToStr(data), nil + } + // Otherwise, sanitize to use the parsed value everywhere. + return sanitize(data), nil + } + return data, nil + } +} + +// In cases where a config has a mapping of something to a struct pointers +// we want nil values to resolve to a pointer to the zero value of the +// underlying struct just as we want nil values of a mapping of something +// to a struct to resolve to the zero value of that struct. +// +// e.g. given a config type: +// type Config struct { Thing *SomeStruct `mapstructure:"thing"` } +// +// and yaml of: +// config: +// +// thing: +// +// we want an unmarshaled Config to be equivalent to +// Config{Thing: &SomeStruct{}} instead of Config{Thing: nil} +func expandNilStructPointersHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, to reflect.Value) (any, error) { + // ensure we are dealing with map to map comparison + if from.Kind() == reflect.Map && to.Kind() == reflect.Map { + toElem := to.Type().Elem() + // ensure that map values are pointers to a struct + // (that may be nil and require manual setting w/ zero value) + if toElem.Kind() == reflect.Ptr && toElem.Elem().Kind() == reflect.Struct { + fromRange := from.MapRange() + for fromRange.Next() { + fromKey := fromRange.Key() + fromValue := fromRange.Value() + // ensure that we've run into a nil pointer instance + if fromValue.IsNil() { + newFromValue := reflect.New(toElem.Elem()) + from.SetMapIndex(fromKey, newFromValue) + } + } + } + } + return from.Interface(), nil + } +} + +// mapKeyStringToMapKeyTextUnmarshalerHookFunc returns a DecodeHookFuncType that checks that a conversion from +// map[string]any to map[encoding.TextUnmarshaler]any does not overwrite keys, +// when UnmarshalText produces equal elements from different strings (e.g. trims whitespaces). +// +// This is needed in combination with ComponentID, which may produce equal IDs for different strings, +// and an error needs to be returned in that case, otherwise the last equivalent ID overwrites the previous one. +func mapKeyStringToMapKeyTextUnmarshalerHookFunc() mapstructure.DecodeHookFuncType { + return func(from reflect.Type, to reflect.Type, data any) (any, error) { + if from.Kind() != reflect.Map || from.Key().Kind() != reflect.String { + return data, nil + } + + if to.Kind() != reflect.Map { + return data, nil + } + + // Checks that the key type of to implements the TextUnmarshaler interface. + if _, ok := reflect.New(to.Key()).Interface().(encoding.TextUnmarshaler); !ok { + return data, nil + } + + // Create a map with key value of to's key to bool. + fieldNameSet := reflect.MakeMap(reflect.MapOf(to.Key(), reflect.TypeOf(true))) + for k := range data.(map[string]any) { + // Create a new value of the to's key type. + tKey := reflect.New(to.Key()) + + // Use tKey to unmarshal the key of the map. + if err := tKey.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(k)); err != nil { + return nil, err + } + // Checks if the key has already been decoded in a previous iteration. + if fieldNameSet.MapIndex(reflect.Indirect(tKey)).IsValid() { + return nil, fmt.Errorf("duplicate name %q after unmarshaling %v", k, tKey) + } + fieldNameSet.SetMapIndex(reflect.Indirect(tKey), reflect.ValueOf(true)) + } + return data, nil + } +} + +// unmarshalerEmbeddedStructsHookFunc provides a mechanism for embedded structs to define their own unmarshal logic, +// by implementing the Unmarshaler interface. +func unmarshalerEmbeddedStructsHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, to reflect.Value) (any, error) { + if to.Type().Kind() != reflect.Struct { + return from.Interface(), nil + } + fromAsMap, ok := from.Interface().(map[string]any) + if !ok { + return from.Interface(), nil + } + for i := 0; i < to.Type().NumField(); i++ { + // embedded structs passed in via `squash` cannot be pointers. We just check if they are structs: + f := to.Type().Field(i) + if f.IsExported() && slices.Contains(strings.Split(f.Tag.Get(MapstructureTag), ","), "squash") { + if unmarshaler, ok := to.Field(i).Addr().Interface().(Unmarshaler); ok { + c := NewFromStringMap(fromAsMap) + c.skipTopLevelUnmarshaler = true + if err := unmarshaler.Unmarshal(c); err != nil { + return nil, err + } + // the struct we receive from this unmarshaling only contains fields related to the embedded struct. + // we merge this partially unmarshaled struct with the rest of the result. + // note we already unmarshaled the main struct earlier, and therefore merge with it. + conf := New() + if err := conf.Marshal(unmarshaler); err != nil { + return nil, err + } + resultMap := conf.ToStringMap() + for k, v := range resultMap { + fromAsMap[k] = v + } + } + } + } + return fromAsMap, nil + } +} + +// Provides a mechanism for individual structs to define their own unmarshal logic, +// by implementing the Unmarshaler interface, unless skipTopLevelUnmarshaler is +// true and the struct matches the top level object being unmarshaled. +func unmarshalerHookFunc(result any, skipTopLevelUnmarshaler bool) mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, to reflect.Value) (any, error) { + if !to.CanAddr() { + return from.Interface(), nil + } + + toPtr := to.Addr().Interface() + // Need to ignore the top structure to avoid running into an infinite recursion + // where Unmarshaler.Unmarshal and Conf.Unmarshal would call each other. + if toPtr == result && skipTopLevelUnmarshaler { + return from.Interface(), nil + } + + unmarshaler, ok := toPtr.(Unmarshaler) + if !ok { + return from.Interface(), nil + } + + if _, ok = from.Interface().(map[string]any); !ok { + return from.Interface(), nil + } + + // Use the current object if not nil (to preserve other configs in the object), otherwise zero initialize. + if to.Addr().IsNil() { + unmarshaler = reflect.New(to.Type()).Interface().(Unmarshaler) + } + + c := NewFromStringMap(from.Interface().(map[string]any)) + c.skipTopLevelUnmarshaler = true + if err := unmarshaler.Unmarshal(c); err != nil { + return nil, err + } + + return unmarshaler, nil + } +} + +// marshalerHookFunc returns a DecodeHookFuncValue that checks structs that aren't +// the original to see if they implement the Marshaler interface. +func marshalerHookFunc(orig any) mapstructure.DecodeHookFuncValue { + origType := reflect.TypeOf(orig) + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() != reflect.Struct { + return from.Interface(), nil + } + + // ignore original to avoid infinite loop. + if from.Type() == origType && reflect.DeepEqual(from.Interface(), orig) { + return from.Interface(), nil + } + marshaler, ok := from.Interface().(Marshaler) + if !ok { + return from.Interface(), nil + } + conf := New() + if err := marshaler.Marshal(conf); err != nil { + return nil, err + } + return conf.ToStringMap(), nil + } +} + +// Unmarshaler interface may be implemented by types to customize their behavior when being unmarshaled from a Conf. +type Unmarshaler interface { + // Unmarshal a Conf into the struct in a custom way. + // The Conf for this specific component may be nil or empty if no config available. + // This method should only be called by decoding hooks when calling Conf.Unmarshal. + Unmarshal(component *Conf) error +} + +// Marshaler defines an optional interface for custom configuration marshaling. +// A configuration struct can implement this interface to override the default +// marshaling. +type Marshaler interface { + // Marshal the config into a Conf in a custom way. + // The Conf will be empty and can be merged into. + Marshal(component *Conf) error +} + +// This hook is used to solve the issue: https://github.com/open-telemetry/opentelemetry-collector/issues/4001 +// We adopt the suggestion provided in this issue: https://github.com/mitchellh/mapstructure/issues/74#issuecomment-279886492 +// We should empty every slice before unmarshalling unless user provided slice is nil. +// Assume that we had a struct with a field of type slice called `keys`, which has default values of ["a", "b"] +// +// type Config struct { +// Keys []string `mapstructure:"keys"` +// } +// +// The configuration provided by users may have following cases +// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overridden +// - for example, input is {"keys", ["c"]}, then output is Config{ Keys: ["c"]} +// +// 2. configuration have `keys` field and have an empty slice for this key, the output should be overridden by empty slices +// - for example, input is {"keys", []}, then output is Config{ Keys: []} +// +// 3. configuration have `keys` field and have nil value for this key, the output should be default config +// - for example, input is {"keys": nil}, then output is Config{ Keys: ["a", "b"]} +// +// 4. configuration have no `keys` field specified, the output should be default config +// - for example, input is {}, then output is Config{ Keys: ["a", "b"]} +func zeroSliceHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, to reflect.Value) (any, error) { + if to.CanSet() && to.Kind() == reflect.Slice && from.Kind() == reflect.Slice { + to.Set(reflect.MakeSlice(to.Type(), from.Len(), from.Cap())) + } + + return from.Interface(), nil + } +} + +type moduleFactory[T any, S any] interface { + Create(s S) T +} + +type createConfmapFunc[T any, S any] func(s S) T + +type confmapModuleFactory[T any, S any] struct { + f createConfmapFunc[T, S] +} + +func (c confmapModuleFactory[T, S]) Create(s S) T { + return c.f(s) +} + +func newConfmapModuleFactory[T any, S any](f createConfmapFunc[T, S]) moduleFactory[T, S] { + return confmapModuleFactory[T, S]{ + f: f, + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter.go b/vendor/go.opentelemetry.io/collector/confmap/converter.go new file mode 100644 index 0000000000..0e3a5c03f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/converter.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "context" + + "go.uber.org/zap" +) + +// ConverterSettings are the settings to initialize a Converter. +type ConverterSettings struct { + // Logger is a zap.Logger that will be passed to Converters. + // Converters should be able to rely on the Logger being non-nil; + // when instantiating a Converter with a ConverterFactory, + // nil Logger references should be replaced with a no-op Logger. + Logger *zap.Logger +} + +// ConverterFactory defines a factory that can be used to instantiate +// new instances of a Converter. +type ConverterFactory = moduleFactory[Converter, ConverterSettings] + +// CreateConverterFunc is a function that creates a Converter instance. +type CreateConverterFunc = createConfmapFunc[Converter, ConverterSettings] + +// NewConverterFactory can be used to create a ConverterFactory. +func NewConverterFactory(f CreateConverterFunc) ConverterFactory { + return newConfmapModuleFactory(f) +} + +// Converter is a converter interface for the confmap.Conf that allows distributions +// (in the future components as well) to build backwards compatible config converters. +type Converter interface { + // Convert applies the conversion logic to the given "conf". + Convert(ctx context.Context, conf *Conf) error +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/expand.go b/vendor/go.opentelemetry.io/collector/confmap/expand.go new file mode 100644 index 0000000000..42f3b6296d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/expand.go @@ -0,0 +1,237 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" +) + +// schemePattern defines the regexp pattern for scheme names. +// Scheme name consist of a sequence of characters beginning with a letter and followed by any +// combination of letters, digits, plus ("+"), period ("."), or hyphen ("-"). +const schemePattern = `[A-Za-z][A-Za-z0-9+.-]+` + +var ( + // Need to match new line as well in the OpaqueValue, so setting the "s" flag. See https://pkg.go.dev/regexp/syntax. + uriRegexp = regexp.MustCompile(`(?s:^(?P` + schemePattern + `):(?P.*)$)`) + + errTooManyRecursiveExpansions = errors.New("too many recursive expansions") +) + +func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, error) { + for i := 0; i < 1000; i++ { + val, changed, err := mr.expandValue(ctx, value) + if err != nil { + return nil, err + } + if !changed { + return val, nil + } + value = val + } + return nil, errTooManyRecursiveExpansions +} + +func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, error) { + switch v := value.(type) { + case expandedValue: + expanded, changed, err := mr.expandValue(ctx, v.Value) + if err != nil { + return nil, false, err + } + + switch exp := expanded.(type) { + case expandedValue, string: + // Return expanded values or strings verbatim. + return exp, changed, nil + } + + // At this point we don't know the target field type, so we need to expand the original representation as well. + originalExpanded, originalChanged, err := mr.expandValue(ctx, v.Original) + if err != nil { + // The original representation is not valid, return the expanded value. + return expanded, changed, nil + } + + if originalExpanded, ok := originalExpanded.(string); ok { + // If the original representation is a string, return the expanded value with the original representation. + return expandedValue{ + Value: expanded, + Original: originalExpanded, + }, changed || originalChanged, nil + } + + return expanded, changed, nil + case string: + if !strings.Contains(v, "${") || !strings.Contains(v, "}") { + // No URIs to expand. + return value, false, nil + } + // Embedded or nested URIs. + return mr.findAndExpandURI(ctx, v) + case []any: + nslice := make([]any, 0, len(v)) + nchanged := false + for _, vint := range v { + val, changed, err := mr.expandValue(ctx, vint) + if err != nil { + return nil, false, err + } + nslice = append(nslice, val) + nchanged = nchanged || changed + } + return nslice, nchanged, nil + case map[string]any: + nmap := map[string]any{} + nchanged := false + for mk, mv := range v { + val, changed, err := mr.expandValue(ctx, mv) + if err != nil { + return nil, false, err + } + nmap[mk] = val + nchanged = nchanged || changed + } + return nmap, nchanged, nil + } + return value, false, nil +} + +// findURI attempts to find the first potentially expandable URI in input. It returns a potentially expandable +// URI, or an empty string if none are found. +// Note: findURI is only called when input contains a closing bracket. +// We do not support escaping nested URIs (such as ${env:$${FOO}}, since that would result in an invalid outer URI (${env:${FOO}}). +func (mr *Resolver) findURI(input string) string { + closeIndex := strings.Index(input, "}") + remaining := input[closeIndex+1:] + openIndex := strings.LastIndex(input[:closeIndex+1], "${") + + // if there is any of: + // - a missing "${" + // - there is no default scheme AND no scheme is detected because no `:` is found. + // then check the next URI. + if openIndex < 0 || (mr.defaultScheme == "" && !strings.Contains(input[openIndex:closeIndex+1], ":")) { + // if remaining does not contain "}", there are no URIs left: stop recursion. + if !strings.Contains(remaining, "}") { + return "" + } + return mr.findURI(remaining) + } + + index := openIndex - 1 + currentRune := '$' + count := 0 + for index >= 0 && currentRune == '$' { + currentRune = rune(input[index]) + if currentRune == '$' { + count++ + } + index-- + } + // if we found an odd number of immediately $ preceding ${, then the expansion is escaped + if count%2 == 1 { + return "" + } + + return input[openIndex : closeIndex+1] +} + +// expandedValue holds the YAML parsed value and original representation of a value. +// It keeps track of the original representation to be used by the 'useExpandValue' hook +// if the target field is a string. We need to keep both representations because we don't know +// what the target field type is until `Unmarshal` is called. +type expandedValue struct { + // Value is the expanded value. + Value any + // Original is the original representation of the value. + Original string +} + +// findAndExpandURI attempts to find and expand the first occurrence of an expandable URI in input. If an expandable URI is found it +// returns the input with the URI expanded, true and nil. Otherwise, it returns the unchanged input, false and the expanding error. +// This method expects input to start with ${ and end with } +func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bool, error) { + uri := mr.findURI(input) + if uri == "" { + // No URI found, return. + return input, false, nil + } + if uri == input { + // If the value is a single URI, then the return value can be anything. + // This is the case `foo: ${file:some_extra_config.yml}`. + ret, err := mr.expandURI(ctx, input) + if err != nil { + return input, false, err + } + + val, err := ret.AsRaw() + if err != nil { + return input, false, err + } + + if asStr, err2 := ret.AsString(); err2 == nil { + return expandedValue{ + Value: val, + Original: asStr, + }, true, nil + } + + return val, true, nil + } + expanded, err := mr.expandURI(ctx, uri) + if err != nil { + return input, false, err + } + + repl, err := expanded.AsString() + if err != nil { + return input, false, fmt.Errorf("expanding %v: %w", uri, err) + } + return strings.ReplaceAll(input, uri, repl), true, err +} + +func (mr *Resolver) expandURI(ctx context.Context, input string) (*Retrieved, error) { + // strip ${ and } + uri := input[2 : len(input)-1] + + if !strings.Contains(uri, ":") { + uri = fmt.Sprintf("%s:%s", mr.defaultScheme, uri) + } + + lURI, err := newLocation(uri) + if err != nil { + return nil, err + } + + if strings.Contains(lURI.opaqueValue, "$") { + return nil, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) + } + ret, err := mr.retrieveValue(ctx, lURI) + if err != nil { + return nil, err + } + mr.closers = append(mr.closers, ret.Close) + return ret, nil +} + +type location struct { + scheme string + opaqueValue string +} + +func (c location) asString() string { + return c.scheme + ":" + c.opaqueValue +} + +func newLocation(uri string) (location, error) { + submatches := uriRegexp.FindStringSubmatch(uri) + if len(submatches) != 3 { + return location{}, fmt.Errorf("invalid uri: %q", uri) + } + return location{scheme: submatches[1], opaqueValue: submatches[2]}, nil +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go new file mode 100644 index 0000000000..ffc0bdc298 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go @@ -0,0 +1,261 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package mapstructure // import "go.opentelemetry.io/collector/confmap/internal/mapstructure" + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/go-viper/mapstructure/v2" + "gopkg.in/yaml.v3" +) + +const ( + tagNameMapStructure = "mapstructure" + optionSeparator = "," + optionOmitEmpty = "omitempty" + optionSquash = "squash" + optionRemain = "remain" + optionSkip = "-" +) + +var errNonStringEncodedKey = errors.New("non string-encoded key") + +// tagInfo stores the mapstructure tag details. +type tagInfo struct { + name string + omitEmpty bool + squash bool +} + +// An Encoder takes structured data and converts it into an +// interface following the mapstructure tags. +type Encoder struct { + config *EncoderConfig +} + +// EncoderConfig is the configuration used to create a new encoder. +type EncoderConfig struct { + // EncodeHook, if set, is a way to provide custom encoding. It + // will be called before structs and primitive types. + EncodeHook mapstructure.DecodeHookFunc +} + +// New returns a new encoder for the configuration. +func New(cfg *EncoderConfig) *Encoder { + return &Encoder{config: cfg} +} + +// Encode takes the input and uses reflection to encode it to +// an interface based on the mapstructure spec. +func (e *Encoder) Encode(input any) (any, error) { + return e.encode(reflect.ValueOf(input)) +} + +// encode processes the value based on the reflect.Kind. +func (e *Encoder) encode(value reflect.Value) (any, error) { + if value.IsValid() { + switch value.Kind() { + case reflect.Interface, reflect.Ptr: + return e.encode(value.Elem()) + case reflect.Map: + return e.encodeMap(value) + case reflect.Slice: + return e.encodeSlice(value) + case reflect.Struct: + return e.encodeStruct(value) + default: + return e.encodeHook(value) + } + } + return nil, nil +} + +// encodeHook calls the EncodeHook in the EncoderConfig with the value passed in. +// This is called before processing structs and for primitive data types. +func (e *Encoder) encodeHook(value reflect.Value) (any, error) { + if e.config != nil && e.config.EncodeHook != nil { + out, err := mapstructure.DecodeHookExec(e.config.EncodeHook, value, value) + if err != nil { + return nil, fmt.Errorf("error running encode hook: %w", err) + } + return out, nil + } + return value.Interface(), nil +} + +// encodeStruct encodes the struct by iterating over the fields, getting the +// mapstructure tagInfo for each exported field, and encoding the value. +func (e *Encoder) encodeStruct(value reflect.Value) (any, error) { + if value.Kind() != reflect.Struct { + return nil, &reflect.ValueError{ + Method: "encodeStruct", + Kind: value.Kind(), + } + } + out, err := e.encodeHook(value) + if err != nil { + return nil, err + } + value = reflect.ValueOf(out) + // if the output of encodeHook is no longer a struct, + // call encode against it. + if value.Kind() != reflect.Struct { + return e.encode(value) + } + result := make(map[string]any) + for i := 0; i < value.NumField(); i++ { + field := value.Field(i) + if field.CanInterface() { + info := getTagInfo(value.Type().Field(i)) + if (info.omitEmpty && field.IsZero()) || info.name == optionSkip { + continue + } + encoded, err := e.encode(field) + if err != nil { + return nil, fmt.Errorf("error encoding field %q: %w", info.name, err) + } + if info.squash { + if m, ok := encoded.(map[string]any); ok { + for k, v := range m { + result[k] = v + } + } + } else { + result[info.name] = encoded + } + } + } + return result, nil +} + +// encodeSlice iterates over the slice and encodes each of the elements. +func (e *Encoder) encodeSlice(value reflect.Value) (any, error) { + if value.Kind() != reflect.Slice { + return nil, &reflect.ValueError{ + Method: "encodeSlice", + Kind: value.Kind(), + } + } + result := make([]any, value.Len()) + for i := 0; i < value.Len(); i++ { + var err error + if result[i], err = e.encode(value.Index(i)); err != nil { + return nil, fmt.Errorf("error encoding element in slice at index %d: %w", i, err) + } + } + return result, nil +} + +// encodeMap encodes a map by encoding the key and value. Returns errNonStringEncodedKey +// if the key is not encoded into a string. +func (e *Encoder) encodeMap(value reflect.Value) (any, error) { + if value.Kind() != reflect.Map { + return nil, &reflect.ValueError{ + Method: "encodeMap", + Kind: value.Kind(), + } + } + result := make(map[string]any) + iterator := value.MapRange() + for iterator.Next() { + encoded, err := e.encode(iterator.Key()) + if err != nil { + return nil, fmt.Errorf("error encoding key: %w", err) + } + + v := reflect.ValueOf(encoded) + var key string + + switch v.Kind() { + case reflect.String: + key = v.String() + default: + return nil, fmt.Errorf("%w, key: %q, kind: %v, type: %T", errNonStringEncodedKey, iterator.Key().Interface(), iterator.Key().Kind(), encoded) + } + + if _, ok := result[key]; ok { + return nil, fmt.Errorf("duplicate key %q while encoding", key) + } + if result[key], err = e.encode(iterator.Value()); err != nil { + return nil, fmt.Errorf("error encoding map value for key %q: %w", key, err) + } + } + return result, nil +} + +// getTagInfo looks up the mapstructure tag and uses that if available. +// Uses the lowercase field if not found. Checks for omitempty and squash. +func getTagInfo(field reflect.StructField) *tagInfo { + info := tagInfo{} + if tag, ok := field.Tag.Lookup(tagNameMapStructure); ok { + options := strings.Split(tag, optionSeparator) + info.name = options[0] + if len(options) > 1 { + for _, option := range options[1:] { + switch option { + case optionOmitEmpty: + info.omitEmpty = true + case optionSquash, optionRemain: + info.squash = true + } + } + } + } else { + info.name = strings.ToLower(field.Name) + } + return &info +} + +// TextMarshalerHookFunc returns a DecodeHookFuncValue that checks +// for the encoding.TextMarshaler interface and calls the MarshalText +// function if found. +func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + marshaler, ok := from.Interface().(encoding.TextMarshaler) + if !ok { + return from.Interface(), nil + } + out, err := marshaler.MarshalText() + if err != nil { + return nil, err + } + return string(out), nil + } +} + +// YamlMarshalerHookFunc returns a DecodeHookFuncValue that checks for structs +// that have yaml tags but no mapstructure tags. If found, it will convert the struct +// to map[string]any using the yaml package, which respects the yaml tags. Ultimately, +// this allows mapstructure to later marshal the map[string]any in a generic way. +func YamlMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() == reflect.Struct { + for i := 0; i < from.NumField(); i++ { + if _, ok := from.Type().Field(i).Tag.Lookup("mapstructure"); ok { + // The struct has at least one mapstructure tag so don't do anything. + return from.Interface(), nil + } + + if _, ok := from.Type().Field(i).Tag.Lookup("yaml"); ok { + // The struct has at least one yaml tag, so convert it to map[string]any using yaml. + yamlBytes, err := yaml.Marshal(from.Interface()) + if err != nil { + return nil, err + } + var m map[string]any + err = yaml.Unmarshal(yamlBytes, &m) + if err != nil { + return nil, err + } + return m, nil + } + } + } + return from.Interface(), nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/merge.go b/vendor/go.opentelemetry.io/collector/confmap/merge.go new file mode 100644 index 0000000000..3fcfb74e0d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/merge.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "reflect" +) + +func mergeAppend(src, dest map[string]any) error { + // mergeAppend recursively merges the src map into the dest map (left to right), + // modifying and expanding the dest map in the process. + // This function does not overwrite lists, and ensures that the final value is a name-aware + // copy of lists from src and dest. + + for sKey, sVal := range src { + dVal, dOk := dest[sKey] + if !dOk { + // key is not present in destination config. Hence, add it to destination map + dest[sKey] = sVal + continue + } + + srcVal := reflect.ValueOf(sVal) + destVal := reflect.ValueOf(dVal) + + if destVal.Kind() != srcVal.Kind() { + // different kinds. Override the destination map + dest[sKey] = sVal + continue + } + + switch srcVal.Kind() { + case reflect.Array, reflect.Slice: + // both of them are array. Merge them + dest[sKey] = mergeSlice(srcVal, destVal) + case reflect.Map: + // both of them are maps. Recursively call the mergeAppend + _ = mergeAppend(sVal.(map[string]any), dVal.(map[string]any)) + default: + // any other datatype. Override the destination map + dest[sKey] = sVal + } + } + + return nil +} + +func mergeSlice(src, dest reflect.Value) any { + slice := reflect.MakeSlice(src.Type(), 0, src.Cap()+dest.Cap()) + for i := 0; i < dest.Len(); i++ { + slice = reflect.Append(slice, dest.Index(i)) + } + + for i := 0; i < src.Len(); i++ { + if isPresent(slice, src.Index(i)) { + continue + } + slice = reflect.Append(slice, src.Index(i)) + } + return slice.Interface() +} + +func isPresent(slice reflect.Value, val reflect.Value) bool { + for i := 0; i < slice.Len(); i++ { + if slice.Index(i).Equal(val) { + return true + } + } + return false +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/metadata.yaml b/vendor/go.opentelemetry.io/collector/confmap/metadata.yaml new file mode 100644 index 0000000000..d250015098 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/metadata.yaml @@ -0,0 +1,11 @@ +type: confmap +github_project: open-telemetry/opentelemetry-collector + +status: + codeowners: + active: + - mx-psi + - evan-bradley + class: pkg + stability: + stable: [logs, metrics, traces] diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go new file mode 100644 index 0000000000..9718b69201 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go @@ -0,0 +1,261 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + "gopkg.in/yaml.v3" +) + +// ProviderSettings are the settings to initialize a Provider. +type ProviderSettings struct { + // Logger is a zap.Logger that will be passed to Providers. + // Providers should be able to rely on the Logger being non-nil; + // when instantiating a Provider with a ProviderFactory, + // nil Logger references should be replaced with a no-op Logger. + Logger *zap.Logger +} + +// ProviderFactory defines a factory that can be used to instantiate +// new instances of a Provider. +type ProviderFactory = moduleFactory[Provider, ProviderSettings] + +// CreateProviderFunc is a function that creates a Provider instance. +type CreateProviderFunc = createConfmapFunc[Provider, ProviderSettings] + +// NewProviderFactory can be used to create a ProviderFactory. +func NewProviderFactory(f CreateProviderFunc) ProviderFactory { + return newConfmapModuleFactory(f) +} + +// Provider is an interface that helps to retrieve a config map and watch for any +// changes to the config map. Implementations may load the config from a file, +// a database or any other source. +// +// The typical usage is the following: +// +// r, err := provider.Retrieve("file:/path/to/config") +// // Use r.Map; wait for watcher to be called. +// r.Close() +// r, err = provider.Retrieve("file:/path/to/config") +// // Use r.Map; wait for watcher to be called. +// r.Close() +// // repeat retrieve/wait/close cycle until it is time to shut down the Collector process. +// // ... +// provider.Shutdown() +type Provider interface { + // Retrieve goes to the configuration source and retrieves the selected data which + // contains the value to be injected in the configuration and the corresponding watcher that + // will be used to monitor for updates of the retrieved value. + // + // `uri` must follow the ":" format. This format is compatible + // with the URI definition (see https://datatracker.ietf.org/doc/html/rfc3986). The "" + // must be always included in the `uri`. The "" supported by any provider: + // - MUST consist of a sequence of characters beginning with a letter and followed by any + // combination of letters, digits, plus ("+"), period ("."), or hyphen ("-"). + // See https://datatracker.ietf.org/doc/html/rfc3986#section-3.1. + // - MUST be at least 2 characters long to avoid conflicting with a driver-letter identifier as specified + // in https://tools.ietf.org/id/draft-kerwin-file-scheme-07.html#syntax. + // - For testing, all implementation MUST check that confmaptest.ValidateProviderScheme returns no error. + // + // `watcher` callback is called when the config changes. watcher may be called from + // a different go routine. After watcher is called, Provider.Retrieve should be called + // to get the new config. See description of Retrieved for more details. + // watcher may be nil, which indicates that the caller is not interested in + // knowing about the changes. + // + // If ctx is cancelled should return immediately with an error. + // Should never be called concurrently with itself or with Shutdown. + Retrieve(ctx context.Context, uri string, watcher WatcherFunc) (*Retrieved, error) + + // Scheme returns the location scheme used by Retrieve. + Scheme() string + + // Shutdown signals that the configuration for which this Provider was used to + // retrieve values is no longer in use and the Provider should close and release + // any resources that it may have created. + // + // This method must be called when the Collector service ends, either in case of + // success or error. Retrieve cannot be called after Shutdown. + // + // Should never be called concurrently with itself or with Retrieve. + // If ctx is cancelled should return immediately with an error. + Shutdown(ctx context.Context) error +} + +type WatcherFunc func(*ChangeEvent) + +// ChangeEvent describes the particular change event that happened with the config. +type ChangeEvent struct { + // Error is nil if the config is changed and needs to be re-fetched. + // Any non-nil error indicates that there was a problem with watching the config changes. + Error error +} + +// Retrieved holds the result of a call to the Retrieve method of a Provider object. +type Retrieved struct { + rawConf any + errorHint error + closeFunc CloseFunc + + stringRepresentation string + isSetString bool +} + +type retrievedSettings struct { + errorHint error + stringRepresentation string + isSetString bool + closeFunc CloseFunc +} + +// RetrievedOption options to customize Retrieved values. +type RetrievedOption interface { + apply(*retrievedSettings) +} + +type retrievedOptionFunc func(*retrievedSettings) + +func (of retrievedOptionFunc) apply(e *retrievedSettings) { + of(e) +} + +// WithRetrievedClose overrides the default Retrieved.Close function. +// The default Retrieved.Close function does nothing and always returns nil. +func WithRetrievedClose(closeFunc CloseFunc) RetrievedOption { + return retrievedOptionFunc(func(settings *retrievedSettings) { + settings.closeFunc = closeFunc + }) +} + +func withStringRepresentation(stringRepresentation string) RetrievedOption { + return retrievedOptionFunc(func(settings *retrievedSettings) { + settings.stringRepresentation = stringRepresentation + settings.isSetString = true + }) +} + +func withErrorHint(errorHint error) RetrievedOption { + return retrievedOptionFunc(func(settings *retrievedSettings) { + settings.errorHint = errorHint + }) +} + +// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. +// * yamlBytes the yaml bytes that will be deserialized. +// * opts specifies options associated with this Retrieved value, such as CloseFunc. +func NewRetrievedFromYAML(yamlBytes []byte, opts ...RetrievedOption) (*Retrieved, error) { + var rawConf any + if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { + // If the string is not valid YAML, we try to use it verbatim as a string. + strRep := string(yamlBytes) + return NewRetrieved(strRep, append(opts, + withStringRepresentation(strRep), + withErrorHint(fmt.Errorf("assuming string type since contents are not valid YAML: %w", err)), + )...) + } + + switch rawConf.(type) { + case string: + val := string(yamlBytes) + return NewRetrieved(val, append(opts, withStringRepresentation(val))...) + default: + opts = append(opts, withStringRepresentation(string(yamlBytes))) + } + + return NewRetrieved(rawConf, opts...) +} + +// NewRetrieved returns a new Retrieved instance that contains the data from the raw deserialized config. +// The rawConf can be one of the following types: +// - Primitives: int, int32, int64, float32, float64, bool, string; +// - []any; +// - map[string]any; +func NewRetrieved(rawConf any, opts ...RetrievedOption) (*Retrieved, error) { + if err := checkRawConfType(rawConf); err != nil { + return nil, err + } + set := retrievedSettings{} + for _, opt := range opts { + opt.apply(&set) + } + return &Retrieved{ + rawConf: rawConf, + errorHint: set.errorHint, + closeFunc: set.closeFunc, + stringRepresentation: set.stringRepresentation, + isSetString: set.isSetString, + }, nil +} + +// AsConf returns the retrieved configuration parsed as a Conf. +func (r *Retrieved) AsConf() (*Conf, error) { + if r.rawConf == nil { + return New(), nil + } + val, ok := r.rawConf.(map[string]any) + if !ok { + if r.errorHint != nil { + return nil, fmt.Errorf("retrieved value (type=%T) cannot be used as a Conf: %w", r.rawConf, r.errorHint) + } + return nil, fmt.Errorf("retrieved value (type=%T) cannot be used as a Conf", r.rawConf) + } + return NewFromStringMap(val), nil +} + +// AsRaw returns the retrieved configuration parsed as an any which can be one of the following types: +// - Primitives: int, int32, int64, float32, float64, bool, string; +// - []any - every member follows the same rules as the given any; +// - map[string]any - every value follows the same rules as the given any; +func (r *Retrieved) AsRaw() (any, error) { + return r.rawConf, nil +} + +// AsString returns the retrieved configuration as a string. +// If the retrieved configuration is not convertible to a string unambiguously, an error is returned. +// If the retrieved configuration is a string, the string is returned. +// This method is used to resolve ${} references in inline position. +func (r *Retrieved) AsString() (string, error) { + if !r.isSetString { + if str, ok := r.rawConf.(string); ok { + return str, nil + } + return "", fmt.Errorf("retrieved value does not have unambiguous string representation: %v", r.rawConf) + } + return r.stringRepresentation, nil +} + +// Close and release any watchers that Provider.Retrieve may have created. +// +// Should block until all resources are closed, and guarantee that `onChange` is not +// going to be called after it returns except when `ctx` is cancelled. +// +// Should never be called concurrently with itself. +func (r *Retrieved) Close(ctx context.Context) error { + if r.closeFunc == nil { + return nil + } + return r.closeFunc(ctx) +} + +// CloseFunc a function equivalent to Retrieved.Close. +type CloseFunc func(context.Context) error + +func checkRawConfType(rawConf any) error { + if rawConf == nil { + return nil + } + switch rawConf.(type) { + case int, int32, int64, float32, float64, bool, string, []any, map[string]any, time.Time: + return nil + default: + return fmt.Errorf( + "unsupported type=%T for retrieved config,"+ + " ensure that values are wrapped in quotes", rawConf) + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/resolver.go b/vendor/go.opentelemetry.io/collector/confmap/resolver.go new file mode 100644 index 0000000000..039d4aea1b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/resolver.go @@ -0,0 +1,285 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confmap // import "go.opentelemetry.io/collector/confmap" + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/featuregate" +) + +var enableMergeAppendOption = featuregate.GlobalRegistry().MustRegister( + "confmap.enableMergeAppendOption", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.120.0"), + featuregate.WithRegisterDescription("Combines lists when resolving configs from different sources. This feature gate will not be stabilized 'as is'; the current behavior will remain the default."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/8754"), +) + +// follows drive-letter specification: +// https://datatracker.ietf.org/doc/html/draft-kerwin-file-scheme-07.html#section-2.2 +var driverLetterRegexp = regexp.MustCompile("^[A-z]:") + +// Resolver resolves a configuration as a Conf. +type Resolver struct { + uris []location + providers map[string]Provider + defaultScheme string + converters []Converter + + closers []CloseFunc + watcher chan error +} + +// ResolverSettings are the settings to configure the behavior of the Resolver. +type ResolverSettings struct { + // URIs locations from where the Conf is retrieved, and merged in the given order. + // It is required to have at least one location. + URIs []string + + // ProviderFactories is a slice of Provider factories. + // It is required to have at least one factory. + ProviderFactories []ProviderFactory + + // DefaultScheme is the scheme that is used if ${} syntax is used but no schema is provided. + // If no DefaultScheme is set, ${} with no schema will not be expanded. + // It is strongly recommended to set "env" as the default scheme to align with the + // OpenTelemetry Configuration Specification + DefaultScheme string + + // ProviderSettings contains settings that will be passed to Provider + // factories when instantiating Providers. + ProviderSettings ProviderSettings + + // ConverterFactories is a slice of Converter creation functions. + ConverterFactories []ConverterFactory + + // ConverterSettings contains settings that will be passed to Converter + // factories when instantiating Converters. + ConverterSettings ConverterSettings +} + +// NewResolver returns a new Resolver that resolves configuration from multiple URIs. +// +// To resolve a configuration the following steps will happen: +// 1. Retrieves individual configurations from all given "URIs", and merge them in the retrieve order. +// 2. Once the Conf is merged, apply the converters in the given order. +// +// After the configuration was resolved the `Resolver` can be used as a single point to watch for updates in +// the configuration data retrieved via the config providers used to process the "initial" configuration and to generate +// the "effective" one. The typical usage is the following: +// +// Resolver.Resolve(ctx) +// Resolver.Watch() // wait for an event. +// Resolver.Resolve(ctx) +// Resolver.Watch() // wait for an event. +// // repeat Resolve/Watch cycle until it is time to shut down the Collector process. +// Resolver.Shutdown(ctx) +// +// `uri` must follow the ":" format. This format is compatible with the URI definition +// (see https://datatracker.ietf.org/doc/html/rfc3986). An empty "" defaults to "file" schema. +func NewResolver(set ResolverSettings) (*Resolver, error) { + if len(set.URIs) == 0 { + return nil, errors.New("invalid 'confmap.ResolverSettings' configuration: no URIs") + } + + if len(set.ProviderFactories) == 0 { + return nil, errors.New("invalid 'confmap.ResolverSettings' configuration: no Providers") + } + + if set.ProviderSettings.Logger == nil { + set.ProviderSettings.Logger = zap.NewNop() + } + + if set.ConverterSettings.Logger == nil { + set.ConverterSettings.Logger = zap.NewNop() + } + + providers := make(map[string]Provider, len(set.ProviderFactories)) + for _, factory := range set.ProviderFactories { + provider := factory.Create(set.ProviderSettings) + scheme := provider.Scheme() + // Check that the scheme follows the pattern. + if !regexp.MustCompile(schemePattern).MatchString(scheme) { + return nil, fmt.Errorf("invalid 'confmap.Provider' scheme %q", scheme) + } + // Check that the scheme is unique. + if _, ok := providers[scheme]; ok { + return nil, fmt.Errorf("duplicate 'confmap.Provider' scheme %q", scheme) + } + + providers[scheme] = provider + } + + if set.DefaultScheme != "" { + _, ok := providers[set.DefaultScheme] + if !ok { + return nil, errors.New("invalid 'confmap.ResolverSettings' configuration: DefaultScheme not found in providers list") + } + } + + converters := make([]Converter, len(set.ConverterFactories)) + for i, factory := range set.ConverterFactories { + converters[i] = factory.Create(set.ConverterSettings) + } + + // Safe copy, ensures the slices and maps cannot be changed from the caller. + uris := make([]location, len(set.URIs)) + for i, uri := range set.URIs { + // For backwards compatibility: + // - empty url scheme means "file". + // - "^[A-z]:" also means "file" + if driverLetterRegexp.MatchString(uri) || !strings.Contains(uri, ":") { + uris[i] = location{scheme: "file", opaqueValue: uri} + continue + } + lURI, err := newLocation(uri) + if err != nil { + return nil, err + } + if _, ok := providers[lURI.scheme]; !ok { + return nil, fmt.Errorf("unsupported scheme on URI %q", uri) + } + uris[i] = lURI + } + + return &Resolver{ + uris: uris, + providers: providers, + defaultScheme: set.DefaultScheme, + converters: converters, + watcher: make(chan error, 1), + }, nil +} + +// Resolve returns the configuration as a Conf, or error otherwise. +// Should never be called concurrently with itself, Watch or Shutdown. +func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { + // First check if already an active watching, close that if any. + if err := mr.closeIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("cannot close previous watch: %w", err) + } + + // Retrieves individual configurations from all URIs in the given order, and merge them in retMap. + retMap := New() + for _, uri := range mr.uris { + ret, err := mr.retrieveValue(ctx, uri) + if err != nil { + return nil, fmt.Errorf("cannot retrieve the configuration: %w", err) + } + mr.closers = append(mr.closers, ret.Close) + retCfgMap, err := ret.AsConf() + if err != nil { + return nil, err + } + if enableMergeAppendOption.IsEnabled() { + // only use MergeAppend when enableMergeAppendOption featuregate is enabled. + err = retMap.mergeAppend(retCfgMap) + } else { + err = retMap.Merge(retCfgMap) + } + if err != nil { + return nil, err + } + } + + cfgMap := make(map[string]any) + for _, k := range retMap.AllKeys() { + val, err := mr.expandValueRecursively(ctx, retMap.unsanitizedGet(k)) + if err != nil { + return nil, err + } + cfgMap[k] = escapeDollarSigns(val) + } + retMap = NewFromStringMap(cfgMap) + + // Apply the converters in the given order. + for _, confConv := range mr.converters { + if err := confConv.Convert(ctx, retMap); err != nil { + return nil, fmt.Errorf("cannot convert the confmap.Conf: %w", err) + } + } + + return retMap, nil +} + +func escapeDollarSigns(val any) any { + switch v := val.(type) { + case string: + return strings.ReplaceAll(v, "$$", "$") + case expandedValue: + v.Original = strings.ReplaceAll(v.Original, "$$", "$") + v.Value = escapeDollarSigns(v.Value) + return v + case []any: + nslice := make([]any, len(v)) + for i, x := range v { + nslice[i] = escapeDollarSigns(x) + } + return nslice + case map[string]any: + nmap := make(map[string]any, len(v)) + for k, x := range v { + nmap[k] = escapeDollarSigns(x) + } + return nmap + default: + return val + } +} + +// Watch blocks until any configuration change was detected or an unrecoverable error +// happened during monitoring the configuration changes. +// +// Error is nil if the configuration is changed and needs to be re-fetched. Any non-nil +// error indicates that there was a problem with watching the configuration changes. +// +// Should never be called concurrently with itself or Get. +func (mr *Resolver) Watch() <-chan error { + return mr.watcher +} + +// Shutdown signals that the provider is no longer in use and the that should close +// and release any resources that it may have created. It terminates the Watch channel. +// +// Should never be called concurrently with itself or Get. +func (mr *Resolver) Shutdown(ctx context.Context) error { + close(mr.watcher) + + var errs error + errs = multierr.Append(errs, mr.closeIfNeeded(ctx)) + for _, p := range mr.providers { + errs = multierr.Append(errs, p.Shutdown(ctx)) + } + + return errs +} + +func (mr *Resolver) onChange(event *ChangeEvent) { + mr.watcher <- event.Error +} + +func (mr *Resolver) closeIfNeeded(ctx context.Context) error { + var err error + for _, ret := range mr.closers { + err = multierr.Append(err, ret(ctx)) + } + mr.closers = nil + return err +} + +func (mr *Resolver) retrieveValue(ctx context.Context, uri location) (*Retrieved, error) { + p, ok := mr.providers[uri.scheme] + if !ok { + return nil, fmt.Errorf("scheme %q is not supported for uri %q", uri.scheme, uri.asString()) + } + return p.Retrieve(ctx, uri.asString(), mr.onChange) +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/config/configtelemetry/Makefile rename to vendor/go.opentelemetry.io/collector/confmap/xconfmap/Makefile diff --git a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go new file mode 100644 index 0000000000..b9e8edbbaa --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconfmap // import "go.opentelemetry.io/collector/confmap/xconfmap" + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/collector/confmap" +) + +// As interface types are only used for static typing, a common idiom to find the reflection Type +// for an interface type Foo is to use a *Foo value. +var configValidatorType = reflect.TypeOf((*Validator)(nil)).Elem() + +// Validator defines an optional interface for configurations to implement to do validation. +type Validator interface { + // Validate the configuration and returns an error if invalid. + Validate() error +} + +// Validate validates a config, by doing this: +// - Call Validate on the config itself if the config implements ConfigValidator. +func Validate(cfg any) error { + var err error + + for _, validationErr := range validate(reflect.ValueOf(cfg)) { + err = errors.Join(err, validationErr) + } + + return err +} + +type pathError struct { + err error + path []string +} + +func (pe pathError) Error() string { + if len(pe.path) > 0 { + var path string + sb := strings.Builder{} + + _, _ = sb.WriteString(pe.path[len(pe.path)-1]) + for i := len(pe.path) - 2; i >= 0; i-- { + _, _ = sb.WriteString(confmap.KeyDelimiter) + _, _ = sb.WriteString(pe.path[i]) + } + path = sb.String() + + return fmt.Sprintf("%s: %s", path, pe.err) + } + + return pe.err.Error() +} + +func (pe pathError) Unwrap() error { + return pe.err +} + +func validate(v reflect.Value) []pathError { + errs := []pathError{} + // Validate the value itself. + switch v.Kind() { + case reflect.Invalid: + return nil + case reflect.Ptr, reflect.Interface: + return validate(v.Elem()) + case reflect.Struct: + err := callValidateIfPossible(v) + if err != nil { + errs = append(errs, pathError{err: err}) + } + + // Reflect on the pointed data and check each of its fields. + for i := 0; i < v.NumField(); i++ { + if !v.Type().Field(i).IsExported() { + continue + } + field := v.Type().Field(i) + path := fieldName(field) + + subpathErrs := validate(v.Field(i)) + for _, err := range subpathErrs { + errs = append(errs, pathError{ + err: err.err, + path: append(err.path, path), + }) + } + } + return errs + case reflect.Slice, reflect.Array: + err := callValidateIfPossible(v) + if err != nil { + errs = append(errs, pathError{err: err}) + } + + // Reflect on the pointed data and check each of its fields. + for i := 0; i < v.Len(); i++ { + subPathErrs := validate(v.Index(i)) + + for _, err := range subPathErrs { + errs = append(errs, pathError{ + err: err.err, + path: append(err.path, strconv.Itoa(i)), + }) + } + } + return errs + case reflect.Map: + err := callValidateIfPossible(v) + if err != nil { + errs = append(errs, pathError{err: err}) + } + + iter := v.MapRange() + for iter.Next() { + keyErrs := validate(iter.Key()) + valueErrs := validate(iter.Value()) + key := stringifyMapKey(iter.Key()) + + for _, err := range keyErrs { + errs = append(errs, pathError{err: err.err, path: append(err.path, key)}) + } + + for _, err := range valueErrs { + errs = append(errs, pathError{err: err.err, path: append(err.path, key)}) + } + } + return errs + default: + err := callValidateIfPossible(v) + if err != nil { + return []pathError{{err: err}} + } + + return nil + } +} + +func callValidateIfPossible(v reflect.Value) error { + // If the value type implements ConfigValidator just call Validate + if v.Type().Implements(configValidatorType) { + return v.Interface().(Validator).Validate() + } + + // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. + if reflect.PointerTo(v.Type()).Implements(configValidatorType) { + // If not addressable, then create a new *V pointer and set the value to current v. + if !v.CanAddr() { + pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) + pv.Elem().Set(v) + v = pv.Elem() + } + return v.Addr().Interface().(Validator).Validate() + } + + return nil +} + +func fieldName(field reflect.StructField) string { + var fieldName string + if tag, ok := field.Tag.Lookup(confmap.MapstructureTag); ok { + tags := strings.Split(tag, ",") + if len(tags) > 0 { + fieldName = tags[0] + } + } + // Even if the mapstructure tag exists, the field name may not + // be available, so set it if it is still blank. + if len(fieldName) == 0 { + fieldName = strings.ToLower(field.Name) + } + + return fieldName +} + +func stringifyMapKey(val reflect.Value) string { + var key string + + if str, ok := val.Interface().(string); ok { + key = str + } else if stringer, ok := val.Interface().(fmt.Stringer); ok { + key = stringer.String() + } else { + switch val.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + key = fmt.Sprintf("[%T key]", val.Interface()) + default: + key = fmt.Sprintf("%v", val.Interface()) + } + } + + return key +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/logs.go b/vendor/go.opentelemetry.io/collector/consumer/logs.go index 15166ef119..701ce5f35c 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/consumer/logs.go @@ -14,7 +14,8 @@ import ( // as needed, and sends it to the next processing node if any or to the destination. type Logs interface { internal.BaseConsumer - // ConsumeLogs receives plog.Logs for consumption. + // ConsumeLogs processes the logs. After the function returns, the logs are no longer accessible, + // and accessing them is considered undefined behavior. ConsumeLogs(ctx context.Context, ld plog.Logs) error } diff --git a/vendor/go.opentelemetry.io/collector/consumer/metrics.go b/vendor/go.opentelemetry.io/collector/consumer/metrics.go index 47897f9363..6a636a6cae 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/consumer/metrics.go @@ -14,7 +14,8 @@ import ( // as needed, and sends it to the next processing node if any or to the destination. type Metrics interface { internal.BaseConsumer - // ConsumeMetrics receives pmetric.Metrics for consumption. + // ConsumeMetrics processes the metrics. After the function returns, the metrics are no longer accessible, + // and accessing them is considered undefined behavior. ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error } diff --git a/vendor/go.opentelemetry.io/collector/consumer/traces.go b/vendor/go.opentelemetry.io/collector/consumer/traces.go index 60df2d0453..1fed65656f 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/consumer/traces.go @@ -14,7 +14,8 @@ import ( // as needed, and sends it to the next processing node if any or to the destination. type Traces interface { internal.BaseConsumer - // ConsumeTraces receives ptrace.Traces for consumption. + // ConsumeTraces processes the traces. After the function returns, the traces are no longer accessible, + // and accessing them is considered undefined behavior. ConsumeTraces(ctx context.Context, td ptrace.Traces) error } diff --git a/vendor/go.opentelemetry.io/collector/featuregate/LICENSE b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/Makefile b/vendor/go.opentelemetry.io/collector/featuregate/Makefile new file mode 100644 index 0000000000..39734bfaeb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/featuregate/README.md b/vendor/go.opentelemetry.io/collector/featuregate/README.md new file mode 100644 index 0000000000..d3e3c802d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/README.md @@ -0,0 +1,77 @@ +# Collector Feature Gates + +This package provides a mechanism that allows operators to enable and disable +experimental or transitional features at deployment time. These flags should +be able to govern the behavior of the application starting as early as possible +and should be available to every component such that decisions may be made +based on flags at the component level. + +## Usage + +Feature gates must be defined and registered with the global registry in +an `init()` function. This makes the `Gate` available to be configured and +queried with the defined [`Stage`](#feature-lifecycle) default value. +A `Gate` can have a list of associated issues that allow users to refer to +the issue and report any additional problems or understand the context of the `Gate`. +Once a `Gate` has been marked as `Stable`, it must have a `RemovalVersion` set. + +```go +var myFeatureGate = featuregate.GlobalRegistry().MustRegister( + "namespaced.uniqueIdentifier", + featuregate.Stable, + featuregate.WithRegisterFromVersion("v0.65.0") + featuregate.WithRegisterDescription("A brief description of what the gate controls"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/6167"), + featuregate.WithRegisterToVersion("v0.70.0")) +``` + +The status of the gate may later be checked by interrogating the global +feature gate registry: + +```go +if myFeatureGate.IsEnabled() { + setupNewFeature() +} +``` + +Note that querying the registry takes a read lock and accesses a map, so it +should be done once and the result cached for local use if repeated checks +are required. Avoid querying the registry in a loop. + +## Controlling Gates + +Feature gates can be enabled or disabled via the CLI, with the +`--feature-gates` flag. When using the CLI flag, gate +identifiers must be presented as a comma-delimited list. Gate identifiers +prefixed with `-` will disable the gate and prefixing with `+` or with no +prefix will enable the gate. + +```shell +otelcol --config=config.yaml --feature-gates=gate1,-gate2,+gate3 +``` + +This will enable `gate1` and `gate3` and disable `gate2`. + +## Feature Lifecycle + +Features controlled by a `Gate` should follow a three-stage lifecycle, +modeled after the [system used by Kubernetes](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages): + +1. An `alpha` stage where the feature is disabled by default and must be enabled + through a `Gate`. +2. A `beta` stage where the feature has been well tested and is enabled by + default but can be disabled through a `Gate`. +3. A generally available or `stable` stage where the feature is permanently enabled. At this stage + the gate should no longer be explicitly used. Disabling the gate will produce an error and + explicitly enabling will produce a warning log. +4. A `stable` feature gate will be removed in the version specified by its `ToVersion` value. + +Features that prove unworkable in the `alpha` stage may be discontinued +without proceeding to the `beta` stage. Instead, they will proceed to the +`deprecated` stage, which will feature is permanently disabled. A feature gate will +be removed once it has been `deprecated` for at least 2 releases of the collector. + +Features that make it to the `beta` stage are intended to reach general availability but may still be discontinued. +If, after wider use, it is determined that the gate should be discontinued it will be reverted to the `alpha` stage +for 2 releases and then proceed to the `deprecated` stage. If instead it is ready for general availability it will +proceed to the `stable` stage. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go new file mode 100644 index 0000000000..1c6f3a5e87 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "flag" + "strings" + + "go.uber.org/multierr" +) + +const ( + featureGatesFlag = "feature-gates" + featureGatesFlagDescription = "Comma-delimited list of feature gate identifiers. Prefix with '-' to disable the feature. '+' or no prefix will enable the feature." +) + +// RegisterFlagsOption is an option for RegisterFlags. +type RegisterFlagsOption interface { + private() +} + +// RegisterFlags that directly applies feature gate statuses to a Registry. +func (r *Registry) RegisterFlags(flagSet *flag.FlagSet, _ ...RegisterFlagsOption) { + flagSet.Var(&flagValue{reg: r}, featureGatesFlag, featureGatesFlagDescription) +} + +// flagValue implements the flag.Value interface and directly applies feature gate statuses to a Registry. +type flagValue struct { + reg *Registry +} + +func (f *flagValue) String() string { + // This function can be called by isZeroValue https://github.com/golang/go/blob/go1.23.3/src/flag/flag.go#L630 + // which creates an instance of flagValue using reflect.New. In this case, the field `reg` is nil. + if f.reg == nil { + return "" + } + + var ids []string + f.reg.VisitAll(func(g *Gate) { + id := g.ID() + if !g.IsEnabled() { + id = "-" + id + } + ids = append(ids, id) + }) + return strings.Join(ids, ",") +} + +func (f *flagValue) Set(s string) error { + if s == "" { + return nil + } + + var errs error + ids := strings.Split(s, ",") + for i := range ids { + id := ids[i] + val := true + switch id[0] { + case '-': + id = id[1:] + val = false + case '+': + id = id[1:] + } + errs = multierr.Append(errs, f.reg.Set(id, val)) + } + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/gate.go b/vendor/go.opentelemetry.io/collector/featuregate/gate.go new file mode 100644 index 0000000000..a250ceb9a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/gate.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "fmt" + "sync/atomic" + + "github.com/hashicorp/go-version" +) + +// Gate is an immutable object that is owned by the Registry and represents an individual feature that +// may be enabled or disabled based on the lifecycle state of the feature and CLI flags specified by the user. +type Gate struct { + id string + description string + referenceURL string + fromVersion *version.Version + toVersion *version.Version + stage Stage + enabled *atomic.Bool +} + +// ID returns the id of the Gate. +func (g *Gate) ID() string { + return g.id +} + +// IsEnabled returns true if the feature described by the Gate is enabled. +func (g *Gate) IsEnabled() bool { + return g.enabled.Load() +} + +// Description returns the description for the Gate. +func (g *Gate) Description() string { + return g.description +} + +// Stage returns the Gate's lifecycle stage. +func (g *Gate) Stage() Stage { + return g.stage +} + +// ReferenceURL returns the URL to the contextual information about the Gate. +func (g *Gate) ReferenceURL() string { + return g.referenceURL +} + +// FromVersion returns the version information when the Gate's was added. +func (g *Gate) FromVersion() string { + return fmt.Sprintf("v%s", g.fromVersion) +} + +// ToVersion returns the version information when Gate's in StageStable. +func (g *Gate) ToVersion() string { + return fmt.Sprintf("v%s", g.toVersion) +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go new file mode 100644 index 0000000000..9309024c38 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -0,0 +1,211 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "sort" + "sync" + "sync/atomic" + + "github.com/hashicorp/go-version" +) + +var ( + globalRegistry = NewRegistry() + + // idRegexp is used to validate the ID of a Gate. + // IDs' characters must be alphanumeric or dots. + idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`) +) + +// ErrAlreadyRegistered is returned when adding a Gate that is already registered. +var ErrAlreadyRegistered = errors.New("gate is already registered") + +// GlobalRegistry returns the global Registry. +func GlobalRegistry() *Registry { + return globalRegistry +} + +type Registry struct { + gates sync.Map +} + +// NewRegistry returns a new empty Registry. +func NewRegistry() *Registry { + return &Registry{} +} + +// RegisterOption allows to configure additional information about a Gate during registration. +type RegisterOption interface { + apply(g *Gate) error +} + +type registerOptionFunc func(g *Gate) error + +func (ro registerOptionFunc) apply(g *Gate) error { + return ro(g) +} + +// WithRegisterDescription adds description for the Gate. +func WithRegisterDescription(description string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + g.description = description + return nil + }) +} + +// WithRegisterReferenceURL adds a URL that has all the contextual information about the Gate. +// referenceURL must be a valid URL as defined by `net/url.Parse`. +func WithRegisterReferenceURL(referenceURL string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + if _, err := url.Parse(referenceURL); err != nil { + return fmt.Errorf("WithRegisterReferenceURL: invalid reference URL %q: %w", referenceURL, err) + } + + g.referenceURL = referenceURL + return nil + }) +} + +// WithRegisterFromVersion is used to set the Gate "FromVersion". +// The "FromVersion" contains the Collector release when a feature is introduced. +// fromVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. +func WithRegisterFromVersion(fromVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + from, err := version.NewVersion(fromVersion) + if err != nil { + return fmt.Errorf("WithRegisterFromVersion: invalid version %q: %w", fromVersion, err) + } + + g.fromVersion = from + return nil + }) +} + +// WithRegisterToVersion is used to set the Gate "ToVersion". +// The "ToVersion", if not empty, contains the last Collector release in which you can still use a feature gate. +// If the feature stage is either "Deprecated" or "Stable", the "ToVersion" is the Collector release when the feature is removed. +// toVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. +func WithRegisterToVersion(toVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + to, err := version.NewVersion(toVersion) + if err != nil { + return fmt.Errorf("WithRegisterToVersion: invalid version %q: %w", toVersion, err) + } + + g.toVersion = to + return nil + }) +} + +// MustRegister like Register but panics if an invalid ID or gate options are provided. +func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) *Gate { + g, err := r.Register(id, stage, opts...) + if err != nil { + panic(err) + } + return g +} + +func validateID(id string) error { + if id == "" { + return errors.New("empty ID") + } + + if !idRegexp.MatchString(id) { + return errors.New("invalid character(s) in ID") + } + return nil +} + +// Register a Gate and return it. The returned Gate can be used to check if is enabled or not. +// id must be an ASCII alphanumeric nonempty string. Dots are allowed for namespacing. +func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Gate, error) { + if err := validateID(id); err != nil { + return nil, fmt.Errorf("invalid ID %q: %w", id, err) + } + + g := &Gate{ + id: id, + stage: stage, + } + for _, opt := range opts { + err := opt.apply(g) + if err != nil { + return nil, fmt.Errorf("failed to apply option: %w", err) + } + } + switch g.stage { + case StageAlpha, StageDeprecated: + g.enabled = &atomic.Bool{} + case StageBeta, StageStable: + enabled := &atomic.Bool{} + enabled.Store(true) + g.enabled = enabled + default: + return nil, fmt.Errorf("unknown stage value %q for gate %q", stage, id) + } + if (g.stage == StageStable || g.stage == StageDeprecated) && g.toVersion == nil { + return nil, fmt.Errorf("no removal version set for %v gate %q", g.stage.String(), id) + } + + if g.fromVersion != nil && g.toVersion != nil && g.toVersion.LessThan(g.fromVersion) { + return nil, fmt.Errorf("toVersion %q is before fromVersion %q", g.toVersion, g.fromVersion) + } + + if _, loaded := r.gates.LoadOrStore(id, g); loaded { + return nil, fmt.Errorf("failed to register %q: %w", id, ErrAlreadyRegistered) + } + return g, nil +} + +// Set the enabled valued for a Gate identified by the given id. +func (r *Registry) Set(id string, enabled bool) error { + v, ok := r.gates.Load(id) + if !ok { + validGates := []string{} + r.VisitAll(func(g *Gate) { + validGates = append(validGates, g.ID()) + }) + return fmt.Errorf("no such feature gate %q. valid gates: %v", id, validGates) + } + g := v.(*Gate) + + switch g.stage { + case StageStable: + if !enabled { + return fmt.Errorf("feature gate %q is stable, can not be disabled", id) + } + fmt.Printf("Feature gate %q is stable and already enabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + case StageDeprecated: + if enabled { + return fmt.Errorf("feature gate %q is deprecated, can not be enabled", id) + } + fmt.Printf("Feature gate %q is deprecated and already disabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + default: + g.enabled.Store(enabled) + } + return nil +} + +// VisitAll visits all the gates in lexicographical order, calling fn for each. +func (r *Registry) VisitAll(fn func(*Gate)) { + var gates []*Gate + r.gates.Range(func(_, value any) bool { + gates = append(gates, value.(*Gate)) + return true + }) + sort.Slice(gates, func(i, j int) bool { + return gates[i].ID() < gates[j].ID() + }) + for i := range gates { + fn(gates[i]) + } +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/stage.go b/vendor/go.opentelemetry.io/collector/featuregate/stage.go new file mode 100644 index 0000000000..f2be1b248d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/stage.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +// Stage represents the Gate's lifecycle and what is the expected state of it. +type Stage int8 + +const ( + // StageAlpha is used when creating a new feature and the Gate must be explicitly enabled + // by the operator. + // + // The Gate will be disabled by default. + StageAlpha Stage = iota + // StageBeta is used when the feature gate is well tested and is enabled by default, + // but can be disabled by a Gate. + // + // The Gate will be enabled by default. + StageBeta + // StageStable is used when feature is permanently enabled and can not be disabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be enabled by default and will return an error if disabled. + StageStable + // StageDeprecated is used when feature is permanently disabled and can not be enabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be disabled by default and will return an error if modified. + StageDeprecated +) + +func (s Stage) String() string { + switch s { + case StageAlpha: + return "Alpha" + case StageBeta: + return "Beta" + case StageStable: + return "Stable" + case StageDeprecated: + return "Deprecated" + } + return "Unknown" +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE b/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile b/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile new file mode 100644 index 0000000000..ded7a36092 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go new file mode 100644 index 0000000000..a246af4da3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) + +const ( + ComponentKindKey = "otelcol.component.kind" + ComponentIDKey = "otelcol.component.id" + PipelineIDKey = "otelcol.pipeline.id" + SignalKey = "otelcol.signal" + SignalOutputKey = "otelcol.signal.output" +) + +func RemoveAttributes(attrs attribute.Set, fields ...string) attribute.Set { + attrs, _ = attribute.NewSetWithFiltered(attrs.ToSlice(), func(kv attribute.KeyValue) bool { + return !slices.Contains(fields, string(kv.Key)) + }) + return attrs +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_provider.go new file mode 100644 index 0000000000..5aad00f246 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_provider.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log" +) + +type loggerProviderWithAttributes struct { + log.LoggerProvider + attrs []attribute.KeyValue +} + +// LoggerProviderWithAttributes creates a LoggerProvider with a new set of injected instrumentation scope attributes. +func LoggerProviderWithAttributes(lp log.LoggerProvider, attrs attribute.Set) log.LoggerProvider { + if lpwa, ok := lp.(loggerProviderWithAttributes); ok { + lp = lpwa.LoggerProvider + } + return loggerProviderWithAttributes{ + LoggerProvider: lp, + attrs: attrs.ToSlice(), + } +} + +func (lpwa loggerProviderWithAttributes) Logger(name string, opts ...log.LoggerOption) log.Logger { + conf := log.NewLoggerConfig(opts...) + attrSet := conf.InstrumentationAttributes() + // prepend our attributes so they can be overwritten + newAttrs := append(slices.Clone(lpwa.attrs), attrSet.ToSlice()...) + // append our attribute set option to overwrite the old one + opts = append(opts, log.WithInstrumentationAttributes(newAttrs...)) + return lpwa.LoggerProvider.Logger(name, opts...) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go new file mode 100644 index 0000000000..e20a4e5fd6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + +import ( + "go.opentelemetry.io/contrib/bridges/otelzap" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Interface for Zap cores that support setting and resetting a set of component attributes. +// +// There are three wrappers that implement this interface: +// +// - [NewConsoleCoreWithAttributes] injects component attributes as Zap fields. +// +// This is used for the Collector's console output. +// +// - [NewOTelTeeCoreWithAttributes] copies logs to a [log.LoggerProvider] using [otelzap]. For the +// copied logs, component attributes are injected as instrumentation scope attributes. +// +// This is used when service::telemetry::logs::processors is configured. +// +// - [NewWrapperCoreWithAttributes] applies a wrapper function to a core, similar to +// [zap.WrapCore]. It allows setting component attributes on the inner core and reapplying the +// wrapper function when needed. +// +// This is used when adding [zapcore.NewSamplerWithOptions] to our logger stack. +type coreWithAttributes interface { + zapcore.Core + withAttributeSet(attribute.Set) zapcore.Core +} + +// Tries setting the component attribute set for a Zap core. +// +// Does nothing if the core does not implement [coreWithAttributes]. +func tryWithAttributeSet(c zapcore.Core, attrs attribute.Set) zapcore.Core { + if cwa, ok := c.(coreWithAttributes); ok { + return cwa.withAttributeSet(attrs) + } + zap.New(c).Debug("Logger core does not support injecting component attributes") + return c +} + +type consoleCoreWithAttributes struct { + zapcore.Core + from zapcore.Core +} + +var _ coreWithAttributes = (*consoleCoreWithAttributes)(nil) + +// NewConsoleCoreWithAttributes wraps a Zap core in order to inject component attributes as Zap fields. +// +// This is used for the Collector's console output. +func NewConsoleCoreWithAttributes(c zapcore.Core, attrs attribute.Set) zapcore.Core { + var fields []zap.Field + for _, kv := range attrs.ToSlice() { + fields = append(fields, zap.String(string(kv.Key), kv.Value.AsString())) + } + return &consoleCoreWithAttributes{ + Core: c.With(fields), + from: c, + } +} + +func (ccwa *consoleCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core { + return NewConsoleCoreWithAttributes(ccwa.from, attrs) +} + +type otelTeeCoreWithAttributes struct { + zapcore.Core + consoleCore zapcore.Core + lp log.LoggerProvider + scopeName string + level zapcore.Level +} + +var _ coreWithAttributes = (*otelTeeCoreWithAttributes)(nil) + +// NewOTelTeeCoreWithAttributes wraps a Zap core in order to copy logs to a [log.LoggerProvider] using [otelzap]. For the copied +// logs, component attributes are injected as instrumentation scope attributes. +// +// This is used when service::telemetry::logs::processors is configured. +func NewOTelTeeCoreWithAttributes(consoleCore zapcore.Core, lp log.LoggerProvider, scopeName string, level zapcore.Level, attrs attribute.Set) zapcore.Core { + // TODO: Use `otelzap.WithAttributes` and remove `LoggerProviderWithAttributes` + // once we've upgraded to otelzap v0.11.0. + lpwa := LoggerProviderWithAttributes(lp, attrs) + otelCore, err := zapcore.NewIncreaseLevelCore(otelzap.NewCore( + scopeName, + otelzap.WithLoggerProvider(lpwa), + ), zap.NewAtomicLevelAt(level)) + if err != nil { + panic(err) + } + + return &otelTeeCoreWithAttributes{ + Core: zapcore.NewTee(consoleCore, otelCore), + consoleCore: consoleCore, + lp: lp, + scopeName: scopeName, + level: level, + } +} + +func (ocwa *otelTeeCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core { + return NewOTelTeeCoreWithAttributes( + tryWithAttributeSet(ocwa.consoleCore, attrs), + ocwa.lp, ocwa.scopeName, ocwa.level, + attrs, + ) +} + +type wrapperCoreWithAttributes struct { + zapcore.Core + from zapcore.Core + wrapper func(zapcore.Core) zapcore.Core +} + +var _ coreWithAttributes = (*wrapperCoreWithAttributes)(nil) + +// NewWrapperCoreWithAttributes applies a wrapper function to a core, similar to [zap.WrapCore]. The resulting wrapped core +// allows setting component attributes on the inner core and reapplying the wrapper function when +// needed. +// +// This is used when adding [zapcore.NewSamplerWithOptions] to our logger stack. +func NewWrapperCoreWithAttributes(from zapcore.Core, wrapper func(zapcore.Core) zapcore.Core) zapcore.Core { + return &wrapperCoreWithAttributes{ + Core: wrapper(from), + from: from, + wrapper: wrapper, + } +} + +func (wcwa *wrapperCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core { + return NewWrapperCoreWithAttributes(tryWithAttributeSet(wcwa.from, attrs), wcwa.wrapper) +} + +// ZapLoggerWithAttributes creates a Zap Logger with a new set of injected component attributes. +func ZapLoggerWithAttributes(logger *zap.Logger, attrs attribute.Set) *zap.Logger { + return logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + return tryWithAttributeSet(c, attrs) + })) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go new file mode 100644 index 0000000000..d17732dde5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +type meterProviderWithAttributes struct { + metric.MeterProvider + attrs []attribute.KeyValue +} + +// MeterProviderWithAttributes creates a MeterProvider with a new set of injected instrumentation scope attributes. +func MeterProviderWithAttributes(mp metric.MeterProvider, attrs attribute.Set) metric.MeterProvider { + if mpwa, ok := mp.(meterProviderWithAttributes); ok { + mp = mpwa.MeterProvider + } + return meterProviderWithAttributes{ + MeterProvider: mp, + attrs: attrs.ToSlice(), + } +} + +func (mpwa meterProviderWithAttributes) Meter(name string, opts ...metric.MeterOption) metric.Meter { + conf := metric.NewMeterConfig(opts...) + attrSet := conf.InstrumentationAttributes() + // prepend our attributes so they can be overwritten + newAttrs := append(slices.Clone(mpwa.attrs), attrSet.ToSlice()...) + // append our attribute set option to overwrite the old one + opts = append(opts, metric.WithInstrumentationAttributes(newAttrs...)) + return mpwa.MeterProvider.Meter(name, opts...) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go new file mode 100644 index 0000000000..de77ab0eed --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" + sdkTrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +type tracerProviderWithAttributes struct { + trace.TracerProvider + attrs []attribute.KeyValue +} + +// Necessary for components that use SDK-only methods, such as zpagesextension +type tracerProviderWithAttributesSdk struct { + *sdkTrace.TracerProvider + attrs []attribute.KeyValue +} + +// TracerProviderWithAttributes creates a TracerProvider with a new set of injected instrumentation scope attributes. +func TracerProviderWithAttributes(tp trace.TracerProvider, attrs attribute.Set) trace.TracerProvider { + if tpwa, ok := tp.(tracerProviderWithAttributesSdk); ok { + tp = tpwa.TracerProvider + } else if tpwa, ok := tp.(tracerProviderWithAttributes); ok { + tp = tpwa.TracerProvider + } + if tpSdk, ok := tp.(*sdkTrace.TracerProvider); ok { + return tracerProviderWithAttributesSdk{ + TracerProvider: tpSdk, + attrs: attrs.ToSlice(), + } + } + return tracerProviderWithAttributes{ + TracerProvider: tp, + attrs: attrs.ToSlice(), + } +} + +func tracerWithAttributes(tp trace.TracerProvider, attrs []attribute.KeyValue, name string, opts ...trace.TracerOption) trace.Tracer { + conf := trace.NewTracerConfig(opts...) + attrSet := conf.InstrumentationAttributes() + // prepend our attributes so they can be overwritten + newAttrs := append(slices.Clone(attrs), attrSet.ToSlice()...) + // append our attribute set option to overwrite the old one + opts = append(opts, trace.WithInstrumentationAttributes(newAttrs...)) + return tp.Tracer(name, opts...) +} + +func (tpwa tracerProviderWithAttributes) Tracer(name string, options ...trace.TracerOption) trace.Tracer { + return tracerWithAttributes(tpwa.TracerProvider, tpwa.attrs, name, options...) +} + +func (tpwa tracerProviderWithAttributesSdk) Tracer(name string, options ...trace.TracerOption) trace.Tracer { + return tracerWithAttributes(tpwa.TracerProvider, tpwa.attrs, name, options...) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go new file mode 100644 index 0000000000..2a00afebda --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/collector/internal/telemetry" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/internal/telemetry/componentattribute" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +var NewPipelineTelemetryGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.newPipelineTelemetry", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.123.0"), + featuregate.WithRegisterDescription("Instruments Collector pipelines and injects component-identifying attributes"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/component-universal-telemetry.md"), +) + +// IMPORTANT: This struct is reexported as part of the public API of +// go.opentelemetry.io/collector/component, a stable module. +// DO NOT MAKE BREAKING CHANGES TO EXPORTED FIELDS. +type TelemetrySettings struct { + // Logger that the factory can use during creation and can pass to the created + // component to be used later as well. + Logger *zap.Logger + + // TracerProvider that the factory can pass to other instrumented third-party libraries. + TracerProvider trace.TracerProvider + + // MeterProvider that the factory can pass to other instrumented third-party libraries. + MeterProvider metric.MeterProvider + + // Resource contains the resource attributes for the collector's telemetry. + Resource pcommon.Resource + + // Extra attributes added to instrumentation scopes + extraAttributes attribute.Set +} + +// The publicization of this API is tracked in https://github.com/open-telemetry/opentelemetry-collector/issues/12405 + +func WithoutAttributes(ts TelemetrySettings, fields ...string) TelemetrySettings { + if !NewPipelineTelemetryGate.IsEnabled() { + return ts + } + return WithAttributeSet(ts, componentattribute.RemoveAttributes(ts.extraAttributes, fields...)) +} + +func WithAttributeSet(ts TelemetrySettings, attrs attribute.Set) TelemetrySettings { + if !NewPipelineTelemetryGate.IsEnabled() { + return ts + } + ts.extraAttributes = attrs + ts.Logger = componentattribute.ZapLoggerWithAttributes(ts.Logger, ts.extraAttributes) + ts.TracerProvider = componentattribute.TracerProviderWithAttributes(ts.TracerProvider, ts.extraAttributes) + ts.MeterProvider = componentattribute.MeterProviderWithAttributes(ts.MeterProvider, ts.extraAttributes) + return ts +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go index aa2d3d0d0a..7ffb7ddf4f 100644 --- a/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go +++ b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go @@ -72,23 +72,19 @@ func (i ID) MarshalText() (text []byte, err error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. func (i *ID) UnmarshalText(text []byte) error { idStr := string(text) - items := strings.SplitN(idStr, typeAndNameSeparator, 2) - var signalStr, nameStr string - if len(items) >= 1 { - signalStr = strings.TrimSpace(items[0]) - } - - if len(items) == 1 && signalStr == "" { - return errors.New("id must not be empty") - } + signalStr, nameStr, hasName := strings.Cut(idStr, typeAndNameSeparator) + signalStr = strings.TrimSpace(signalStr) if signalStr == "" { - return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + if hasName { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + return errors.New("id must not be empty") } - if len(items) > 1 { + if hasName { // "name" part is present. - nameStr = strings.TrimSpace(items[1]) + nameStr = strings.TrimSpace(nameStr) if nameStr == "" { return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) } diff --git a/vendor/go.opentelemetry.io/collector/processor/internal/err.go b/vendor/go.opentelemetry.io/collector/processor/internal/err.go new file mode 100644 index 0000000000..aa412bba43 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/internal/err.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/processor/internal" + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" +) + +func ErrIDMismatch(id component.ID, typ component.Type) error { + return fmt.Errorf("component type mismatch: component ID %q does not have type %q", id, typ) +} diff --git a/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go new file mode 100644 index 0000000000..c96fbe5e9e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/processor/internal" + +const ( + MetricNameSep = "_" + + // ProcessorKey is the key used to identify processors in metrics and traces. + ProcessorKey = "processor" + + ProcessorMetricPrefix = ProcessorKey + MetricNameSep +) diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go index 8ecd4d497c..c54154fe9f 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -5,11 +5,11 @@ package processor // import "go.opentelemetry.io/collector/processor" import ( "context" - "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor/internal" ) // Traces is a processor that can consume traces. @@ -96,12 +96,12 @@ func (f factoryOptionFunc) applyOption(o *factory) { type factory struct { cfgType component.Type component.CreateDefaultConfigFunc - CreateTracesFunc - tracesStabilityLevel component.StabilityLevel - CreateMetricsFunc + createTracesFunc CreateTracesFunc + tracesStabilityLevel component.StabilityLevel + createMetricsFunc CreateMetricsFunc metricsStabilityLevel component.StabilityLevel - CreateLogsFunc - logsStabilityLevel component.StabilityLevel + createLogsFunc CreateLogsFunc + logsStabilityLevel component.StabilityLevel } func (f *factory) Type() component.Type { @@ -122,44 +122,56 @@ func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// CreateTracesFunc is the equivalent of Factory.CreateTraces(). -type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) - -// CreateTraces implements Factory.CreateTraces. -func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { - if f == nil { +func (f *factory) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { + if f.createTracesFunc == nil { return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, next) -} -// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). -type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + if set.ID.Type() != f.Type() { + return nil, internal.ErrIDMismatch(set.ID, f.Type()) + } -// CreateMetrics implements Factory.CreateMetrics. -func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { - if f == nil { + return f.createTracesFunc(ctx, set, cfg, next) +} + +func (f *factory) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { + if f.createMetricsFunc == nil { return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, next) -} -// CreateLogsFunc is the equivalent of Factory.CreateLogs. -type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) + if set.ID.Type() != f.Type() { + return nil, internal.ErrIDMismatch(set.ID, f.Type()) + } + + return f.createMetricsFunc(ctx, set, cfg, next) +} -// CreateLogs implements Factory.CreateLogs(). -func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { - if f == nil { +func (f *factory) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { + if f.createLogsFunc == nil { return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, next) + + if set.ID.Type() != f.Type() { + return nil, internal.ErrIDMismatch(set.ID, f.Type()) + } + + return f.createLogsFunc(ctx, set, cfg, next) } +// CreateTracesFunc is the equivalent of Factory.CreateTraces(). +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) + // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl - o.CreateTracesFunc = createTraces + o.createTracesFunc = createTraces }) } @@ -167,7 +179,7 @@ func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) Fact func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.metricsStabilityLevel = sl - o.CreateMetricsFunc = createMetrics + o.createMetricsFunc = createMetrics }) } @@ -175,7 +187,7 @@ func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) F func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.logsStabilityLevel = sl - o.CreateLogsFunc = createLogs + o.createLogsFunc = createLogs }) } @@ -190,16 +202,3 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa } return f } - -// MakeFactoryMap takes a list of factories and returns a map with Factory type as keys. -// It returns a non-nil error when there are factories with duplicate type. -func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { - fMap := map[component.Type]Factory{} - for _, f := range factories { - if _, ok := fMap[f.Type()]; ok { - return fMap, fmt.Errorf("duplicate processor factory %q", f.Type()) - } - fMap[f.Type()] = f - } - return fMap, nil -} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md new file mode 100644 index 0000000000..5565260ae5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md @@ -0,0 +1,3 @@ +# OpenTelemetry Zap Log Bridge + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/contrib/bridges/otelzap.svg)](https://pkg.go.dev/go.opentelemetry.io/contrib/bridges/otelzap) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go new file mode 100644 index 0000000000..6f64c794b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go @@ -0,0 +1,123 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/logutil/convert.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + "go.opentelemetry.io/otel/log" +) + +// convertValue converts various types to log.Value. +func convertValue(v any) log.Value { + // Handling the most common types without reflect is a small perf win. + switch val := v.(type) { + case bool: + return log.BoolValue(val) + case string: + return log.StringValue(val) + case int: + return log.Int64Value(int64(val)) + case int8: + return log.Int64Value(int64(val)) + case int16: + return log.Int64Value(int64(val)) + case int32: + return log.Int64Value(int64(val)) + case int64: + return log.Int64Value(val) + case uint: + return convertUintValue(uint64(val)) + case uint8: + return log.Int64Value(int64(val)) + case uint16: + return log.Int64Value(int64(val)) + case uint32: + return log.Int64Value(int64(val)) + case uint64: + return convertUintValue(val) + case uintptr: + return convertUintValue(uint64(val)) + case float32: + return log.Float64Value(float64(val)) + case float64: + return log.Float64Value(val) + case time.Duration: + return log.Int64Value(val.Nanoseconds()) + case complex64: + r := log.Float64("r", real(complex128(val))) + i := log.Float64("i", imag(complex128(val))) + return log.MapValue(r, i) + case complex128: + r := log.Float64("r", real(val)) + i := log.Float64("i", imag(val)) + return log.MapValue(r, i) + case time.Time: + return log.Int64Value(val.UnixNano()) + case []byte: + return log.BytesValue(val) + case error: + return log.StringValue(val.Error()) + } + + t := reflect.TypeOf(v) + if t == nil { + return log.Value{} + } + val := reflect.ValueOf(v) + switch t.Kind() { + case reflect.Struct: + return log.StringValue(fmt.Sprintf("%+v", v)) + case reflect.Slice, reflect.Array: + items := make([]log.Value, 0, val.Len()) + for i := 0; i < val.Len(); i++ { + items = append(items, convertValue(val.Index(i).Interface())) + } + return log.SliceValue(items...) + case reflect.Map: + kvs := make([]log.KeyValue, 0, val.Len()) + for _, k := range val.MapKeys() { + var key string + switch k.Kind() { + case reflect.String: + key = k.String() + default: + key = fmt.Sprintf("%+v", k.Interface()) + } + kvs = append(kvs, log.KeyValue{ + Key: key, + Value: convertValue(val.MapIndex(k).Interface()), + }) + } + return log.MapValue(kvs...) + case reflect.Ptr, reflect.Interface: + if val.IsNil() { + return log.Value{} + } + return convertValue(val.Elem().Interface()) + } + + // Try to handle this as gracefully as possible. + // + // Don't panic here. it is preferable to have user's open issue + // asking why their attributes have a "unhandled: " prefix than + // say that their code is panicking. + return log.StringValue(fmt.Sprintf("unhandled: (%s) %+v", t, v)) +} + +// convertUintValue converts a uint64 to a log.Value. +// If the value is too large to fit in an int64, it is converted to a string. +func convertUintValue(v uint64) log.Value { + if v > math.MaxInt64 { + return log.StringValue(strconv.FormatUint(v, 10)) + } + return log.Int64Value(int64(v)) +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go new file mode 100644 index 0000000000..e3564247ef --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go @@ -0,0 +1,262 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelzap provides a bridge between the [go.uber.org/zap] and +// [OpenTelemetry]. +// +// # Record Conversion +// +// The [zapcore.Entry] and [zapcore.Field] are converted to OpenTelemetry [log.Record] in the following +// way: +// +// - Time is set as the Timestamp. +// - Message is set as the Body using a [log.StringValue]. +// - Level is transformed and set as the Severity. The SeverityText is also +// set. +// - Fields are transformed and set as the Attributes. +// - Field value of type [context.Context] is used as context when emitting log records. +// - For named loggers, LoggerName is used to access [log.Logger] from [log.LoggerProvider] +// +// The Level is transformed to the OpenTelemetry Severity types in the following way. +// +// - [zapcore.DebugLevel] is transformed to [log.SeverityDebug] +// - [zapcore.InfoLevel] is transformed to [log.SeverityInfo] +// - [zapcore.WarnLevel] is transformed to [log.SeverityWarn] +// - [zapcore.ErrorLevel] is transformed to [log.SeverityError] +// - [zapcore.DPanicLevel] is transformed to [log.SeverityFatal1] +// - [zapcore.PanicLevel] is transformed to [log.SeverityFatal2] +// - [zapcore.FatalLevel] is transformed to [log.SeverityFatal3] +// +// Fields are transformed based on their type into log attributes, or +// into a string value encoded using [fmt.Sprintf] if there is no matching type. +// +// [OpenTelemetry]: https://opentelemetry.io/docs/concepts/signals/logs/ +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "context" + "slices" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/global" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type config struct { + provider log.LoggerProvider + version string + schemaURL string +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.apply(c) + } + + if c.provider == nil { + c.provider = global.GetLoggerProvider() + } + + return c +} + +// Option configures a [Core]. +type Option interface { + apply(config) config +} + +type optFunc func(config) config + +func (f optFunc) apply(c config) config { return f(c) } + +// WithVersion returns an [Option] that configures the version of the +// [log.Logger] used by a [Core]. The version should be the version of the +// package that is being logged. +func WithVersion(version string) Option { + return optFunc(func(c config) config { + c.version = version + return c + }) +} + +// WithSchemaURL returns an [Option] that configures the semantic convention +// schema URL of the [log.Logger] used by a [Core]. The schemaURL should be +// the schema URL for the semantic conventions used in log records. +func WithSchemaURL(schemaURL string) Option { + return optFunc(func(c config) config { + c.schemaURL = schemaURL + return c + }) +} + +// WithLoggerProvider returns an [Option] that configures [log.LoggerProvider] +// used by a [Core] to create its [log.Logger]. +// +// By default if this Option is not provided, the Handler will use the global +// LoggerProvider. +func WithLoggerProvider(provider log.LoggerProvider) Option { + return optFunc(func(c config) config { + c.provider = provider + return c + }) +} + +// Core is a [zapcore.Core] that sends logging records to OpenTelemetry. +type Core struct { + provider log.LoggerProvider + logger log.Logger + opts []log.LoggerOption + attr []log.KeyValue + ctx context.Context +} + +// Compile-time check *Core implements zapcore.Core. +var _ zapcore.Core = (*Core)(nil) + +// NewCore creates a new [zapcore.Core] that can be used with [go.uber.org/zap.New]. +// The name should be the package import path that is being logged. +// The name is ignored for named loggers created using [go.uber.org/zap.Logger.Named]. +func NewCore(name string, opts ...Option) *Core { + cfg := newConfig(opts) + + var loggerOpts []log.LoggerOption + if cfg.version != "" { + loggerOpts = append(loggerOpts, log.WithInstrumentationVersion(cfg.version)) + } + if cfg.schemaURL != "" { + loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL)) + } + + logger := cfg.provider.Logger(name, loggerOpts...) + + return &Core{ + provider: cfg.provider, + logger: logger, + opts: loggerOpts, + ctx: context.Background(), + } +} + +// Enabled decides whether a given logging level is enabled when logging a message. +func (o *Core) Enabled(level zapcore.Level) bool { + param := log.EnabledParameters{Severity: convertLevel(level)} + return o.logger.Enabled(context.Background(), param) +} + +// With adds structured context to the Core. +func (o *Core) With(fields []zapcore.Field) zapcore.Core { + cloned := o.clone() + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + cloned.ctx = ctx + } + cloned.attr = append(cloned.attr, attrbuf...) + } + return cloned +} + +func (o *Core) clone() *Core { + return &Core{ + provider: o.provider, + opts: o.opts, + logger: o.logger, + attr: slices.Clone(o.attr), + ctx: o.ctx, + } +} + +// Sync flushes buffered logs (if any). +func (o *Core) Sync() error { + return nil +} + +// Check determines whether the supplied Entry should be logged. +// If the entry should be logged, the Core adds itself to the CheckedEntry and returns the result. +func (o *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + param := log.EnabledParameters{Severity: convertLevel(ent.Level)} + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + + if logger.Enabled(context.Background(), param) { + return ce.AddCore(ent, o) + } + return ce +} + +// Write method encodes zap fields to OTel logs and emits them. +func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { + r := log.Record{} + r.SetTimestamp(ent.Time) + r.SetBody(log.StringValue(ent.Message)) + r.SetSeverity(convertLevel(ent.Level)) + r.SetSeverityText(ent.Level.String()) + + r.AddAttributes(o.attr...) + if ent.Caller.Defined { + r.AddAttributes( + log.String(string(semconv.CodeFilepathKey), ent.Caller.File), + log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line), + log.String(string(semconv.CodeFunctionKey), ent.Caller.Function), + ) + } + if ent.Stack != "" { + r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack)) + } + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + o.ctx = ctx + } + r.AddAttributes(attrbuf...) + } + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + logger.Emit(o.ctx, r) + return nil +} + +func convertField(fields []zapcore.Field) (context.Context, []log.KeyValue) { + var ctx context.Context + enc := newObjectEncoder(len(fields)) + for _, field := range fields { + if ctxFld, ok := field.Interface.(context.Context); ok { + ctx = ctxFld + continue + } + field.AddTo(enc) + } + + enc.calculate(enc.root) + return ctx, enc.root.attrs +} + +func convertLevel(level zapcore.Level) log.Severity { + switch level { + case zapcore.DebugLevel: + return log.SeverityDebug + case zapcore.InfoLevel: + return log.SeverityInfo + case zapcore.WarnLevel: + return log.SeverityWarn + case zapcore.ErrorLevel: + return log.SeverityError + case zapcore.DPanicLevel: + return log.SeverityFatal1 + case zapcore.PanicLevel: + return log.SeverityFatal2 + case zapcore.FatalLevel: + return log.SeverityFatal3 + default: + return log.SeverityUndefined + } +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go new file mode 100644 index 0000000000..8147576ae7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "time" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" +) + +var ( + _ zapcore.ObjectEncoder = (*objectEncoder)(nil) + _ zapcore.ArrayEncoder = (*arrayEncoder)(nil) +) + +type namespace struct { + name string + attrs []log.KeyValue + next *namespace +} + +// objectEncoder implements zapcore.ObjectEncoder. +// It encodes given fields to OTel key-values. +type objectEncoder struct { + // root is a pointer to the default namespace + root *namespace + // cur is a pointer to the namespace we're currently writing to. + cur *namespace +} + +func newObjectEncoder(n int) *objectEncoder { + keyval := make([]log.KeyValue, 0, n) + m := &namespace{ + attrs: keyval, + } + return &objectEncoder{ + root: m, + cur: m, + } +} + +// It iterates to the end of the linked list and appends namespace data. +// Run this function before accessing complete result. +func (m *objectEncoder) calculate(o *namespace) { + if o.next == nil { + return + } + m.calculate(o.next) + o.attrs = append(o.attrs, log.Map(o.next.name, o.next.attrs...)) +} + +func (m *objectEncoder) AddArray(key string, v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + m.cur.attrs = append(m.cur.attrs, log.Slice(key, arr.elems...)) + return err +} + +func (m *objectEncoder) AddObject(k string, v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + newobj := newObjectEncoder(2) + err := v.MarshalLogObject(newobj) + newobj.calculate(newobj.root) + m.cur.attrs = append(m.cur.attrs, log.Map(k, newobj.root.attrs...)) + return err +} + +func (m *objectEncoder) AddBinary(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.Bytes(k, v)) +} + +func (m *objectEncoder) AddByteString(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.String(k, string(v))) +} + +func (m *objectEncoder) AddBool(k string, v bool) { + m.cur.attrs = append(m.cur.attrs, log.Bool(k, v)) +} + +func (m *objectEncoder) AddDuration(k string, v time.Duration) { + m.AddInt64(k, v.Nanoseconds()) +} + +func (m *objectEncoder) AddComplex128(k string, v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + m.cur.attrs = append(m.cur.attrs, log.Map(k, r, i)) +} + +func (m *objectEncoder) AddFloat64(k string, v float64) { + m.cur.attrs = append(m.cur.attrs, log.Float64(k, v)) +} + +func (m *objectEncoder) AddInt64(k string, v int64) { + m.cur.attrs = append(m.cur.attrs, log.Int64(k, v)) +} + +func (m *objectEncoder) AddInt(k string, v int) { + m.cur.attrs = append(m.cur.attrs, log.Int(k, v)) +} + +func (m *objectEncoder) AddString(k string, v string) { + m.cur.attrs = append(m.cur.attrs, log.String(k, v)) +} + +func (m *objectEncoder) AddUint64(k string, v uint64) { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: assignUintValue(v), + }) +} + +func (m *objectEncoder) AddReflected(k string, v interface{}) error { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: convertValue(v), + }) + return nil +} + +// OpenNamespace opens an isolated namespace where all subsequent fields will +// be added. +func (m *objectEncoder) OpenNamespace(k string) { + keyValue := make([]log.KeyValue, 0, 5) + s := &namespace{ + name: k, + attrs: keyValue, + } + m.cur.next = s + m.cur = s +} + +func (m *objectEncoder) AddComplex64(k string, v complex64) { + m.AddComplex128(k, complex128(v)) +} + +func (m *objectEncoder) AddTime(k string, v time.Time) { + m.AddInt64(k, v.UnixNano()) +} + +func (m *objectEncoder) AddFloat32(k string, v float32) { + m.AddFloat64(k, float64(v)) +} + +func (m *objectEncoder) AddInt32(k string, v int32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt16(k string, v int16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt8(k string, v int8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint(k string, v uint) { + m.AddUint64(k, uint64(v)) +} + +func (m *objectEncoder) AddUint32(k string, v uint32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint16(k string, v uint16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint8(k string, v uint8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUintptr(k string, v uintptr) { + m.AddUint64(k, uint64(v)) +} + +func assignUintValue(v uint64) log.Value { + const maxInt64 = ^uint64(0) >> 1 + if v > maxInt64 { + return log.Float64Value(float64(v)) + } + return log.Int64Value(int64(v)) // nolint:gosec // Overflow checked above. +} + +// arrayEncoder implements [zapcore.ArrayEncoder]. +type arrayEncoder struct { + elems []log.Value +} + +func newArrayEncoder() *arrayEncoder { + return &arrayEncoder{ + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + elems: make([]log.Value, 0, 2), + } +} + +func (a *arrayEncoder) AppendArray(v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + a.elems = append(a.elems, log.SliceValue(arr.elems...)) + return err +} + +func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + m := newObjectEncoder(2) + err := v.MarshalLogObject(m) + m.calculate(m.root) + a.elems = append(a.elems, log.MapValue(m.root.attrs...)) + return err +} + +func (a *arrayEncoder) AppendReflected(v interface{}) error { + a.elems = append(a.elems, convertValue(v)) + return nil +} + +func (a *arrayEncoder) AppendByteString(v []byte) { + a.elems = append(a.elems, log.StringValue(string(v))) +} + +func (a *arrayEncoder) AppendBool(v bool) { + a.elems = append(a.elems, log.BoolValue(v)) +} + +func (a *arrayEncoder) AppendFloat64(v float64) { + a.elems = append(a.elems, log.Float64Value(v)) +} + +func (a *arrayEncoder) AppendFloat32(v float32) { + a.AppendFloat64(float64(v)) +} + +func (a *arrayEncoder) AppendInt(v int) { + a.elems = append(a.elems, log.IntValue(v)) +} + +func (a *arrayEncoder) AppendInt64(v int64) { + a.elems = append(a.elems, log.Int64Value(v)) +} + +func (a *arrayEncoder) AppendString(v string) { + a.elems = append(a.elems, log.StringValue(v)) +} + +func (a *arrayEncoder) AppendComplex128(v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + a.elems = append(a.elems, log.MapValue(r, i)) +} + +func (a *arrayEncoder) AppendUint64(v uint64) { + a.elems = append(a.elems, assignUintValue(v)) +} + +func (a *arrayEncoder) AppendComplex64(v complex64) { a.AppendComplex128(complex128(v)) } +func (a *arrayEncoder) AppendDuration(v time.Duration) { a.AppendInt64(v.Nanoseconds()) } +func (a *arrayEncoder) AppendInt32(v int32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt16(v int16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt8(v int8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendTime(v time.Time) { a.AppendInt64(v.UnixNano()) } +func (a *arrayEncoder) AppendUint(v uint) { a.AppendUint64(uint64(v)) } +func (a *arrayEncoder) AppendUint32(v uint32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint16(v uint16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint8(v uint8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUintptr(v uintptr) { a.AppendUint64(uint64(v)) } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go new file mode 100644 index 0000000000..5c8b2eea7e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +// Generate convert: +//go:generate gotmpl --body=../../internal/shared/logutil/convert_test.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert_test.go +//go:generate gotmpl --body=../../internal/shared/logutil/convert.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert.go diff --git a/vendor/go.opentelemetry.io/otel/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/log/DESIGN.md new file mode 100644 index 0000000000..47d39d34bf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/DESIGN.md @@ -0,0 +1,634 @@ +# Logs API + +## Abstract + +`go.opentelemetry.io/otel/log` provides +[Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/). + +The prototype was created in +[#4725](https://github.com/open-telemetry/opentelemetry-go/pull/4725). + +## Background + +The key challenge is to create a performant API compliant with the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/) +with an intuitive and user friendly design. +Performance is seen as one of the most important characteristics of logging libraries in Go. + +## Design + +This proposed design aims to: + +- be specification compliant, +- be similar to Trace and Metrics API, +- take advantage of both OpenTelemetry and `slog` experience to achieve acceptable performance. + +### Module structure + +The API is published as a single `go.opentelemetry.io/otel/log` Go module. + +The package structure is similar to Trace API and Metrics API. +The Go module consists of the following packages: + +- `go.opentelemetry.io/otel/log` +- `go.opentelemetry.io/otel/log/embedded` +- `go.opentelemetry.io/otel/log/logtest` +- `go.opentelemetry.io/otel/log/noop` + +Rejected alternative: + +- [Reuse slog](#reuse-slog) + +### LoggerProvider + +The [`LoggerProvider` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#loggerprovider) +is defined as `LoggerProvider` interface in [provider.go](provider.go). + +The specification may add new operations to `LoggerProvider`. +The interface may have methods added without a package major version bump. +This embeds `embedded.LoggerProvider` to help inform an API implementation +author about this non-standard API evolution. +This approach is already used in Trace API and Metrics API. + +#### LoggerProvider.Logger + +The `Logger` method implements the [`Get a Logger` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#get-a-logger). + +The required `name` parameter is accepted as a `string` method argument. + +The `LoggerOption` options are defined to support optional parameters. + +Implementation requirements: + +- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements) + the method to be safe to be called concurrently. + +- The method should use some default name if the passed name is empty + in order to meet the [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logger-creation) + to return a working logger when an invalid name is passed + as well as to resemble the behavior of getting tracers and meters. + +`Logger` can be extended by adding new `LoggerOption` options +and adding new exported fields to the `LoggerConfig` struct. +This design is already used in Trace API for getting tracers +and in Metrics API for getting meters. + +Rejected alternative: + +- [Passing struct as parameter to LoggerProvider.Logger](#passing-struct-as-parameter-to-loggerproviderlogger). + +### Logger + +The [`Logger` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#logger) +is defined as `Logger` interface in [logger.go](logger.go). + +The specification may add new operations to `Logger`. +The interface may have methods added without a package major version bump. +This embeds `embedded.Logger` to help inform an API implementation +author about this non-standard API evolution. +This approach is already used in Trace API and Metrics API. + +### Logger.Emit + +The `Emit` method implements the [`Emit a LogRecord` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord). + +[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/) +is accepted as a `context.Context` method argument. + +Calls to `Emit` are supposed to be on the hot path. +Therefore, in order to reduce the number of heap allocations, +the [`LogRecord` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord), +is defined as `Record` struct in [record.go](record.go). + +[`Timestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-timestamp) +is accessed using following methods: + +```go +func (r *Record) Timestamp() time.Time +func (r *Record) SetTimestamp(t time.Time) +``` + +[`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp) +is accessed using following methods: + +```go +func (r *Record) ObservedTimestamp() time.Time +func (r *Record) SetObservedTimestamp(t time.Time) +``` + +[`EventName`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-eventname) +is accessed using following methods: + +```go +func (r *Record) EventName() string +func (r *Record) SetEventName(s string) +``` + +[`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber) +is accessed using following methods: + +```go +func (r *Record) Severity() Severity +func (r *Record) SetSeverity(s Severity) +``` + +`Severity` type is defined in [severity.go](severity.go). +The constants are are based on +[Displaying Severity recommendation](https://opentelemetry.io/docs/specs/otel/logs/data-model/#displaying-severity). +Additionally, `Severity[Level]` constants are defined to make the API more readable and user friendly. + +[`SeverityText`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext) +is accessed using following methods: + +```go +func (r *Record) SeverityText() string +func (r *Record) SetSeverityText(s string) +``` + +[`Body`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body) +is accessed using following methods: + +```go +func (r *Record) Body() Value +func (r *Record) SetBody(v Value) +``` + +[Log record attributes](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes) +are accessed using following methods: + +```go +func (r *Record) WalkAttributes(f func(KeyValue) bool) +func (r *Record) AddAttributes(attrs ...KeyValue) +``` + +`Record` has a `AttributesLen` method that returns +the number of attributes to allow slice preallocation +when converting records to a different representation: + +```go +func (r *Record) AttributesLen() int +``` + +The records attributes design and implementation is based on +[`slog.Record`](https://pkg.go.dev/log/slog#Record). +It allows achieving high-performance access and manipulation of the attributes +while keeping the API user friendly. +It relieves the user from making his own improvements +for reducing the number of allocations when passing attributes. + +The abstractions described in +[the specification](https://opentelemetry.io/docs/specs/otel/logs/#new-first-party-application-logs) +are defined in [keyvalue.go](keyvalue.go). + +`Value` is representing `any`. +`KeyValue` is representing a key(string)-value(`any`) pair. + +`Kind` is an enumeration used for specifying the underlying value type. +`KindEmpty` is used for an empty (zero) value. +`KindBool` is used for boolean value. +`KindFloat64` is used for a double precision floating point (IEEE 754-1985) value. +`KindInt64` is used for a signed integer value. +`KindString` is used for a string value. +`KindBytes` is used for a slice of bytes (in spec: A byte array). +`KindSlice` is used for a slice of values (in spec: an array (a list) of any values). +`KindMap` is used for a slice of key-value pairs (in spec: `map`). + +These types are defined in `go.opentelemetry.io/otel/log` package +as they are tightly coupled with the API and different from common attributes. + +The internal implementation of `Value` is based on +[`slog.Value`](https://pkg.go.dev/log/slog#Value) +and the API is mostly inspired by +[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value). +The benchmarks[^1] show that the implementation is more performant than +[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value). + +The value accessors (`func (v Value) As[Kind]` methods) must not panic, +as it would violate the [specification](https://opentelemetry.io/docs/specs/otel/error-handling/): + +> API methods MUST NOT throw unhandled exceptions when used incorrectly by end +> users. The API and SDK SHOULD provide safe defaults for missing or invalid +> arguments. [...] Whenever the library suppresses an error that would otherwise +> have been exposed to the user, the library SHOULD log the error using +> language-specific conventions. + +Therefore, the value accessors should return a zero value +and log an error when a bad accessor is called. + +The `Severity`, `Kind`, `Value`, `KeyValue` may implement +the [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer) interface. +However, it is not needed for the first stable release +and the `String` methods can be added later. + +The caller must not subsequently mutate the record passed to `Emit`. +This would allow the implementation to not clone the record, +but simply retain, modify or discard it. +The implementation may still choose to clone the record or copy its attributes +if it needs to retain or modify it, +e.g. in case of asynchronous processing to eliminate the possibility of data races, +because the user can technically reuse the record and add new attributes +after the call (even when the documentation says that the caller must not do it). + +Implementation requirements: + +- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements) + the method to be safe to be called concurrently. + +- The method must not interrupt the record processing if the context is canceled + per ["ignoring context cancellation" guideline](../CONTRIBUTING.md#ignoring-context-cancellation). + +- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord) + use the current time as observed timestamp if the passed is empty. + +- The method should handle the trace context passed via `ctx` argument in order to meet the + [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord) + to populate the trace context fields from the resolved context. + +`Emit` can be extended by adding new exported fields to the `Record` struct. + +Rejected alternatives: + +- [Record as interface](#record-as-interface) +- [Options as parameter to Logger.Emit](#options-as-parameter-to-loggeremit) +- [Passing record as pointer to Logger.Emit](#passing-record-as-pointer-to-loggeremit) +- [Logger.WithAttributes](#loggerwithattributes) +- [Record attributes as slice](#record-attributes-as-slice) +- [Use any instead of defining Value](#use-any-instead-of-defining-value) +- [Severity type encapsulating number and text](#severity-type-encapsulating-number-and-text) +- [Reuse attribute package](#reuse-attribute-package) +- [Mix receiver types for Record](#mix-receiver-types-for-record) +- [Add XYZ method to Logger](#add-xyz-method-to-logger) +- [Rename KeyValue to Attr](#rename-keyvalue-to-attr) + +### Logger.Enabled + +The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#enabled). + +[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/) +is accepted as a `context.Context` method argument. + +Calls to `Enabled` are supposed to be on the hot path and the list of arguments +can be extendend in future. Therefore, in order to reduce the number of heap +allocations and make it possible to handle new arguments, `Enabled` accepts +a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second +method argument. + +The `EnabledParameters` uses fields, instead of getters and setters, to allow +simpler usage which allows configuring the `EnabledParameters` in the same line +where `Enabled` is called. + +### noop package + +The `go.opentelemetry.io/otel/log/noop` package provides +[Logs API No-Op Implementation](https://opentelemetry.io/docs/specs/otel/logs/noop/). + +### Trace context correlation + +The bridge implementation should do its best to pass +the `ctx` containing the trace context from the caller +so it can later be passed via `Logger.Emit`. + +It is not expected that users (caller or bridge implementation) reconstruct +a `context.Context`. Reconstructing a `context.Context` with +[`trace.ContextWithSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#ContextWithSpanContext) +and [`trace.NewSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#NewSpanContext) +would usually involve more memory allocations. + +The logging libraries which have recording methods that accepts `context.Context`, +such us [`slog`](https://pkg.go.dev/log/slog), +[`logrus`](https://pkg.go.dev/github.com/sirupsen/logrus), +[`zerolog`](https://pkg.go.dev/github.com/rs/zerolog), +makes passing the trace context trivial. + +However, some libraries do not accept a `context.Context` in their recording methods. +Structured logging libraries, +such as [`logr`](https://pkg.go.dev/github.com/go-logr/logr) +and [`zap`](https://pkg.go.dev/go.uber.org/zap), +offer passing `any` type as a log attribute/field. +Therefore, their bridge implementations can define a "special" log attributes/field +that will be used to capture the trace context. + +[The prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725) +has bridge implementations that handle trace context correlation efficiently. + +## Benchmarking + +The benchmarks take inspiration from [`slog`](https://pkg.go.dev/log/slog), +because for the Go team it was also critical to create API that would be fast +and interoperable with existing logging packages.[^2][^3] + +The benchmark results can be found in [the prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725). + +## Rejected alternatives + +### Reuse slog + +The API must not be coupled to [`slog`](https://pkg.go.dev/log/slog), +nor any other logging library. + +The API needs to evolve orthogonally to `slog`. + +`slog` is not compliant with the [Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/). +and we cannot expect the Go team to make `slog` compliant with it. + +The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge). + +You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/). + +### Record as interface + +`Record` is defined as a `struct` because of the following reasons. + +Log record is a value object without any behavior. +It is used as data input for Logger methods. + +The log record resembles the instrument config structs like [metric.Float64CounterConfig](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Float64CounterConfig). + +Using `struct` instead of `interface` improves the performance as e.g. +indirect calls are less optimized, +usage of interfaces tend to increase heap allocations.[^3] + +### Options as parameter to Logger.Emit + +One of the initial ideas was to have: + +```go +type Logger interface{ + embedded.Logger + Emit(ctx context.Context, options ...RecordOption) +} +``` + +The main reason was that design would be similar +to the [Meter API](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Meter) +for creating instruments. + +However, passing `Record` directly, instead of using options, +is more performant as it reduces heap allocations.[^4] + +Another advantage of passing `Record` is that API would not have functions like `NewRecord(options...)`, +which would be used by the SDK and not by the users. + +Finally, the definition would be similar to [`slog.Handler.Handle`](https://pkg.go.dev/log/slog#Handler) +that was designed to provide optimization opportunities.[^2] + +### Passing record as pointer to Logger.Emit + +So far the benchmarks do not show differences that would +favor passing the record via pointer (and vice versa). + +Passing via value feels safer because of the following reasons. + +The user would not be able to pass `nil`. +Therefore, it reduces the possibility to have a nil pointer dereference. + +It should reduce the possibility of a heap allocation. + +It follows the design of [`slog.Handler`](https://pkg.go.dev/log/slog#Handler). + +If follows one of Google's Go Style Decisions +to prefer [passing values](https://google.github.io/styleguide/go/decisions#pass-values). + +### Passing struct as parameter to LoggerProvider.Logger + +Similarly to `Logger.Emit`, we could have something like: + +```go +type LoggerProvider interface{ + embedded.LoggerProvider + Logger(name string, config LoggerConfig) +} +``` + +The drawback of this idea would be that this would be +a different design from Trace and Metrics API. + +The performance of acquiring a logger is not as critical +as the performance of emitting a log record. While a single +HTTP/RPC handler could write hundreds of logs, it should not +create a new logger for each log entry. +The bridge implementation should reuse loggers whenever possible. + +### Logger.WithAttributes + +We could add `WithAttributes` to the `Logger` interface. +Then `Record` could be a simple struct with only exported fields. +The idea was that the SDK would implement the performance improvements +instead of doing it in the API. +This would allow having different optimization strategies. + +During the analysis[^5], it occurred that the main problem of this proposal +is that the variadic slice passed to an interface method is always heap allocated. + +Moreover, the logger returned by `WithAttribute` was allocated on the heap. + +Lastly, the proposal was not specification compliant. + +### Record attributes as slice + +One of the proposals[^6] was to have `Record` as a simple struct: + +```go +type Record struct { + Timestamp time.Time + ObservedTimestamp time.Time + EventName string + Severity Severity + SeverityText string + Body Value + Attributes []KeyValue +} +``` + +The bridge implementations could use [`sync.Pool`](https://pkg.go.dev/sync#Pool) +for reducing the number of allocations when passing attributes. + +The benchmarks results were better. + +In such a design, most bridges would have a `sync.Pool` +to reduce the number of heap allocations. +However, the `sync.Pool` will not work correctly with API implementations +that would take ownership of the record +(e.g. implementations that do not copy records for asynchronous processing). +The current design, even in case of improper API implementation, +has lower chances of encountering a bug as most bridges would +create a record, pass it, and forget about it. + +For reference, here is the reason why `slog` does not use `sync.Pool`[^3] +as well: + +> We can use a sync pool for records though we decided not to. +You can but it's a bad idea for us. Why? +Because users have control of Records. +Handler writers can get their hands on a record +and we'd have to ask them to free it +or try to free it magically at some some point. +But either way, they could get themselves in trouble by freeing it twice +or holding on to one after they free it. +That's a use after free bug and that's why `zerolog` was problematic for us. +`zerolog` as as part of its speed exposes a pool allocated value to users +if you use `zerolog` the normal way, that you'll see in all the examples, +you will never encounter a problem. +But if you do something a little out of the ordinary you can get +use after free bugs and we just didn't want to put that in the standard library. + +Therefore, we decided to not follow the proposal as it is +less user friendly (users and bridges would use e.g. a `sync.Pool` to reduce +the number of heap allocation), less safe (more prone to use after free bugs +and race conditions), and the benchmark differences were not significant. + +### Use any instead of defining Value + +[Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body) +defines Body to be `any`. +One could propose to define `Body` (and attribute values) as `any` +instead of a defining a new type (`Value`). + +First of all, [`any` type defined in the specification](https://opentelemetry.io/docs/specs/otel/logs/data-model/#type-any) +is not the same as `any` (`interface{}`) in Go. + +Moreover, using `any` as a field would decrease the performance.[^7] + +Notice it will be still possible to add following kind and factories +in a backwards compatible way: + +```go +const KindMap Kind + +func AnyValue(value any) KeyValue + +func Any(key string, value any) KeyValue +``` + +However, currently, it would not be specification compliant. + +### Severity type encapsulating number and text + +We could combine severity into a single field defining a type: + +```go +type Severity struct { + Number SeverityNumber + Text string +} +``` + +However, the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition) +define it as independent fields. +It should be more user friendly to have them separated. +Especially when having getter and setter methods, setting one value +when the other is already set would be unpleasant. + +### Reuse attribute package + +It was tempting to reuse the existing +[https://pkg.go.dev/go.opentelemetry.io/otel/attribute] package +for defining log attributes and body. + +However, this would be wrong because [the log attribute definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes) +is different from [the common attribute definition](https://opentelemetry.io/docs/specs/otel/common/#attribute). + +Moreover, it there is nothing telling that [the body definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body) +has anything in common with a common attribute value. + +Therefore, we define new types representing the abstract types defined +in the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#definitions-used-in-this-document). + +### Mix receiver types for Record + +Methods of [`slog.Record`](https://pkg.go.dev/log/slog#Record) +have different receiver types. + +In `log/slog` GitHub issue we can only find that the reason is:[^8] + +>> some receiver of Record struct is by value +> Passing Records by value means they incur no heap allocation. +> That improves performance overall, even though they are copied. + +However, the benchmarks do not show any noticeable differences.[^9] + +The compiler is smart-enough to not make a heap allocation for any of these methods. +The use of a pointer receiver does not cause any heap allocation. +From Go FAQ:[^10] + +> In the current compilers, if a variable has its address taken, +> that variable is a candidate for allocation on the heap. +> However, a basic escape analysis recognizes some cases +> when such variables will not live past the return from the function +> and can reside on the stack. + +The [Understanding Allocations: the Stack and the Heap](https://www.youtube.com/watch?v=ZMZpH4yT7M0) +presentation by Jacob Walker describes the escape analysis with details. + +Moreover, also from Go FAQ:[^10] + +> Also, if a local variable is very large, +> it might make more sense to store it on the heap rather than the stack. + +Therefore, even if we use a value receiver and the value is very large +it may be heap allocated. + +Both [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments#receiver-type) +and [Google's Go Style Decisions](https://google.github.io/styleguide/go/decisions#receiver-type) +highly recommend making the methods for a type either all pointer methods +or all value methods. Google's Go Style Decisions even goes further and says: + +> There is a lot of misinformation about whether passing a value or a pointer +> to a function can affect performance. +> The compiler can choose to pass pointers to values on the stack +> as well as copying values on the stack, +> but these considerations should not outweigh the readability +> and correctness of the code in most circumstances. +> When the performance does matter, it is important to profile both approaches +> with a realistic benchmark before deciding that one approach outperforms the other. + +Because, the benchmarks[^9] do not proof any performance difference +and the general recommendation is to not mix receiver types, +we decided to use pointer receivers for all `Record` methods. + +### Add XYZ method to Logger + +The `Logger` does not have methods like `SetSeverity`, etc. +as the Logs API needs to follow (be compliant with) +the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/) + +### Rename KeyValue to Attr + +There was a proposal to rename `KeyValue` to `Attr` (or `Attribute`).[^11] +New developers may not intuitively know that `log.KeyValue` is an attribute in +the OpenTelemetry parlance. + +During the discussion we agreed to keep the `KeyValue` name. + +The type is used in multiple semantics: + +- as a log attribute, +- as a map item, +- as a log record Body. + +As for map item semantics, this type is a key-value pair, not an attribute. +Naming the type as `Attr` would convey semantical meaning +that would not be correct for a map. + +We expect that most of the Logs API users will be OpenTelemetry contributors. +We plan to implement bridges for the most popular logging libraries ourselves. +Given we will all have the context needed to disambiguate these overlapping +names, developers' confusion should not be an issue. + +For bridges not developed by us, +developers will likely look at our existing bridges for inspiration. +Our correct use of these types will be a reference to them. + +At last, we provide `ValueFromAttribute` and `KeyValueFromAttribute` +to offer reuse of `attribute.Value` and `attribute.KeyValue`. + +[^1]: [Handle structured body and attributes](https://github.com/pellared/opentelemetry-go/pull/7) +[^2]: Jonathan Amsterdam, [The Go Blog: Structured Logging with slog](https://go.dev/blog/slog) +[^3]: Jonathan Amsterdam, [GopherCon Europe 2023: A Fast Structured Logging Package](https://www.youtube.com/watch?v=tC4Jt3i62ns) +[^4]: [Emit definition discussion with benchmarks](https://github.com/open-telemetry/opentelemetry-go/pull/4725#discussion_r1400869566) +[^5]: [Logger.WithAttributes analysis](https://github.com/pellared/opentelemetry-go/pull/3) +[^6]: [Record attributes as field and use sync.Pool for reducing allocations](https://github.com/pellared/opentelemetry-go/pull/4) and [Record attributes based on slog.Record](https://github.com/pellared/opentelemetry-go/pull/6) +[^7]: [Record.Body as any](https://github.com/pellared/opentelemetry-go/pull/5) +[^8]: [log/slog: structured, leveled logging](https://github.com/golang/go/issues/56345#issuecomment-1302563756) +[^9]: [Record with pointer receivers only](https://github.com/pellared/opentelemetry-go/pull/8) +[^10]: [Go FAQ: Stack or heap](https://go.dev/doc/faq#stack_or_heap) +[^11]: [Rename KeyValue to Attr discussion](https://github.com/open-telemetry/opentelemetry-go/pull/4809#discussion_r1476080093) diff --git a/vendor/go.opentelemetry.io/otel/log/LICENSE b/vendor/go.opentelemetry.io/otel/log/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/log/README.md b/vendor/go.opentelemetry.io/otel/log/README.md new file mode 100644 index 0000000000..3f71427119 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/README.md @@ -0,0 +1,3 @@ +# Log API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log)](https://pkg.go.dev/go.opentelemetry.io/otel/log) diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go new file mode 100644 index 0000000000..18cbd1cb2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/doc.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package log provides the OpenTelemetry Logs API. + +This package is intended to be used by bridges between existing logging +libraries and OpenTelemetry. Users should not directly use this package as a +logging library. Instead, install one of the bridges listed in the +[registry], and use the associated logging library. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/log/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/log/embedded] in their implementation. For example, + + import "go.opentelemetry.io/otel/log/embedded" + + type LoggerProvider struct { + embedded.LoggerProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/log" + + type LoggerProvider struct { + log.LoggerProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/log]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/log/noop]: + + import "go.opentelemetry.io/otel/log/noop" + + type LoggerProvider struct { + noop.LoggerProvider + // ... + } + +It is strongly recommended that authors only embed +go.opentelemetry.io/otel/log/noop if they choose this default behavior. That +implementation is the only one OpenTelemetry authors can guarantee will fully +implement all the API interfaces when a user updates their API. + +[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge +*/ +package log // import "go.opentelemetry.io/otel/log" diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/README.md b/vendor/go.opentelemetry.io/otel/log/embedded/README.md new file mode 100644 index 0000000000..bae4ac68f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/embedded/README.md @@ -0,0 +1,3 @@ +# Log Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/log/embedded) diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go new file mode 100644 index 0000000000..a3714c4c69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry Logs +// Bridge API]. +// +// Implementers of the [OpenTelemetry Logs Bridge API] can embed the relevant +// type from this package into their implementation directly. Doing so will +// result in a compilation error for users when the [OpenTelemetry Logs Bridge +// API] is extended (which is something that can happen without a major version +// bump of the API package). +// +// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log +package embedded // import "go.opentelemetry.io/otel/log/embedded" + +// LoggerProvider is embedded in the [Logs Bridge API LoggerProvider]. +// +// Embed this interface in your implementation of the [Logs Bridge API +// LoggerProvider] if you want users to experience a compilation error, +// signaling they need to update to your latest implementation, when the [Logs +// Bridge API LoggerProvider] interface is extended (which is something that +// can happen without a major version bump of the API package). +// +// [Logs Bridge API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider +type LoggerProvider interface{ loggerProvider() } + +// Logger is embedded in [Logs Bridge API Logger]. +// +// Embed this interface in your implementation of the [Logs Bridge API Logger] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the [Logs Bridge API Logger] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +// +// [Logs Bridge API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger +type Logger interface{ logger() } diff --git a/vendor/go.opentelemetry.io/otel/log/global/README.md b/vendor/go.opentelemetry.io/otel/log/global/README.md new file mode 100644 index 0000000000..11e5afefc0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/README.md @@ -0,0 +1,3 @@ +# Log Global + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/global)](https://pkg.go.dev/go.opentelemetry.io/otel/log/global) diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go new file mode 100644 index 0000000000..71ec577986 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/log.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package global provides access to a global implementation of the OpenTelemetry +Logs Bridge API. + +This package is experimental. It will be deprecated and removed when the [log] +package becomes stable. Its functionality will be migrated to +go.opentelemetry.io/otel. +*/ +package global // import "go.opentelemetry.io/otel/log/global" + +import ( + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/internal/global" +) + +// Logger returns a [log.Logger] configured with the provided name and options +// from the globally configured [log.LoggerProvider]. +// +// If this is called before a global LoggerProvider is configured, the returned +// Logger will be a No-Op implementation of a Logger. When a global +// LoggerProvider is registered for the first time, the returned Logger is +// updated in-place to report to this new LoggerProvider. There is no need to +// call this function again for an updated instance. +// +// This is a convenience function. It is equivalent to: +// +// GetLoggerProvider().Logger(name, options...) +func Logger(name string, options ...log.LoggerOption) log.Logger { + return GetLoggerProvider().Logger(name, options...) +} + +// GetLoggerProvider returns the globally configured [log.LoggerProvider]. +// +// If a global LoggerProvider has not been configured with [SetLoggerProvider], +// the returned Logger will be a No-Op implementation of a LoggerProvider. When +// a global LoggerProvider is registered for the first time, the returned +// LoggerProvider and all of its created Loggers are updated in-place. There is +// no need to call this function again for an updated instance. +func GetLoggerProvider() log.LoggerProvider { + return global.GetLoggerProvider() +} + +// SetLoggerProvider configures provider as the global [log.LoggerProvider]. +func SetLoggerProvider(provider log.LoggerProvider) { + global.SetLoggerProvider(provider) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go new file mode 100644 index 0000000000..d97ee96635 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/embedded" +) + +// instLib defines the instrumentation library a logger is created for. +// +// Do not use sdk/instrumentation (API cannot depend on the SDK). +type instLib struct { + name string + version string + schemaURL string + attrs attribute.Set +} + +type loggerProvider struct { + embedded.LoggerProvider + + mu sync.Mutex + loggers map[instLib]*logger + delegate log.LoggerProvider +} + +// Compile-time guarantee loggerProvider implements LoggerProvider. +var _ log.LoggerProvider = (*loggerProvider)(nil) + +func (p *loggerProvider) Logger(name string, options ...log.LoggerOption) log.Logger { + p.mu.Lock() + defer p.mu.Unlock() + + if p.delegate != nil { + return p.delegate.Logger(name, options...) + } + + cfg := log.NewLoggerConfig(options...) + key := instLib{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + attrs: cfg.InstrumentationAttributes(), + } + + if p.loggers == nil { + l := &logger{name: name, options: options} + p.loggers = map[instLib]*logger{key: l} + return l + } + + if l, ok := p.loggers[key]; ok { + return l + } + + l := &logger{name: name, options: options} + p.loggers[key] = l + return l +} + +func (p *loggerProvider) setDelegate(provider log.LoggerProvider) { + p.mu.Lock() + defer p.mu.Unlock() + + p.delegate = provider + for _, l := range p.loggers { + l.setDelegate(provider) + } + p.loggers = nil // Only set logger delegates once. +} + +type logger struct { + embedded.Logger + + name string + options []log.LoggerOption + + delegate atomic.Value // log.Logger +} + +// Compile-time guarantee logger implements Logger. +var _ log.Logger = (*logger)(nil) + +func (l *logger) Emit(ctx context.Context, r log.Record) { + if del, ok := l.delegate.Load().(log.Logger); ok { + del.Emit(ctx, r) + } +} + +func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { + var enabled bool + if del, ok := l.delegate.Load().(log.Logger); ok { + enabled = del.Enabled(ctx, param) + } + return enabled +} + +func (l *logger) setDelegate(provider log.LoggerProvider) { + l.delegate.Store(provider.Logger(l.name, l.options...)) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go new file mode 100644 index 0000000000..dbe1c2fbfb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/log" +) + +var ( + globalLoggerProvider = defaultLoggerProvider() + + delegateLoggerOnce sync.Once +) + +func defaultLoggerProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(loggerProviderHolder{provider: &loggerProvider{}}) + return v +} + +type loggerProviderHolder struct { + provider log.LoggerProvider +} + +// GetLoggerProvider returns the global LoggerProvider. +func GetLoggerProvider() log.LoggerProvider { + return globalLoggerProvider.Load().(loggerProviderHolder).provider +} + +// SetLoggerProvider sets the global LoggerProvider. +func SetLoggerProvider(provider log.LoggerProvider) { + current := GetLoggerProvider() + if _, cOk := current.(*loggerProvider); cOk { + if _, mpOk := provider.(*loggerProvider); mpOk && current == provider { + err := errors.New("invalid delegation: LoggerProvider self-delegation") + global.Error(err, "No delegate will be configured") + return + } + } + + delegateLoggerOnce.Do(func() { + if def, ok := current.(*loggerProvider); ok { + def.setDelegate(provider) + } + }) + globalLoggerProvider.Store(loggerProviderHolder{provider: provider}) +} diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go new file mode 100644 index 0000000000..73e4e7dca1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -0,0 +1,443 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Kind -trimprefix=Kind + +package log // import "go.opentelemetry.io/otel/log" + +import ( + "bytes" + "cmp" + "errors" + "fmt" + "math" + "slices" + "strconv" + "unsafe" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/internal/global" +) + +// errKind is logged when a Value is decoded to an incompatible type. +var errKind = errors.New("invalid Kind") + +// Kind is the kind of a [Value]. +type Kind int + +// Kind values. +const ( + KindEmpty Kind = iota + KindBool + KindFloat64 + KindInt64 + KindString + KindBytes + KindSlice + KindMap +) + +// A Value represents a structured log value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *KeyValue +) + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + // This can be later converted back to int64 (overflow not checked). + return Value{num: uint64(v), any: KindInt64} // nolint:gosec +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: KindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: KindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...KeyValue) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + global.Error(errKind, "AsString", "Kind", v.Kind()) + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != KindInt64 { + global.Error(errKind, "AsInt64", "Kind", v.Kind()) + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != KindBool { + global.Error(errKind, "AsBool", "Kind", v.Kind()) + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != KindFloat64 { + global.Error(errKind, "AsFloat64", "Kind", v.Kind()) + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + global.Error(errKind, "AsBytes", "Kind", v.Kind()) + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + global.Error(errKind, "AsSlice", "Kind", v.Kind()) + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []KeyValue. +func (v Value) AsMap() []KeyValue { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*KeyValue)(sp), v.num) + } + global.Error(errKind, "AsMap", "Kind", v.Kind()) + return nil +} + +// asMap returns the value held by v as a []KeyValue. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []KeyValue { + return unsafe.Slice((*KeyValue)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() Kind { + switch x := v.any.(type) { + case Kind: + return x + case stringptr: + return KindString + case bytesptr: + return KindBytes + case sliceptr: + return KindSlice + case mapptr: + return KindMap + default: + return KindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == KindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case KindInt64, KindBool: + return v.num == w.num + case KindString: + return v.asString() == w.asString() + case KindFloat64: + return v.asFloat64() == w.asFloat64() + case KindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case KindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, KeyValue.Equal) + case KindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case KindEmpty: + return true + default: + global.Error(errKind, "Equal", "Kind", k1) + return false + } +} + +func sortMap(m []KeyValue) []KeyValue { + sm := make([]KeyValue, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case KindString: + return v.asString() + case KindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case KindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case KindBool: + return strconv.FormatBool(v.asBool()) + case KindBytes: + return fmt.Sprint(v.asBytes()) + case KindMap: + return fmt.Sprint(v.asMap()) + case KindSlice: + return fmt.Sprint(v.asSlice()) + case KindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// A KeyValue is a key-value pair used to represent a log attribute (a +// superset of [go.opentelemetry.io/otel/attribute.KeyValue]) and map item. +type KeyValue struct { + Key string + Value Value +} + +// Equal returns if a is equal to b. +func (a KeyValue) Equal(b KeyValue) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} + +// String returns a KeyValue for a string value. +func String(key, value string) KeyValue { + return KeyValue{key, StringValue(value)} +} + +// Int64 returns a KeyValue for an int64 value. +func Int64(key string, value int64) KeyValue { + return KeyValue{key, Int64Value(value)} +} + +// Int returns a KeyValue for an int value. +func Int(key string, value int) KeyValue { + return KeyValue{key, IntValue(value)} +} + +// Float64 returns a KeyValue for a float64 value. +func Float64(key string, value float64) KeyValue { + return KeyValue{key, Float64Value(value)} +} + +// Bool returns a KeyValue for a bool value. +func Bool(key string, value bool) KeyValue { + return KeyValue{key, BoolValue(value)} +} + +// Bytes returns a KeyValue for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) KeyValue { + return KeyValue{key, BytesValue(value)} +} + +// Slice returns a KeyValue for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) KeyValue { + return KeyValue{key, SliceValue(value...)} +} + +// Map returns a KeyValue for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...KeyValue) KeyValue { + return KeyValue{key, MapValue(value...)} +} + +// Empty returns a KeyValue with an empty value. +func Empty(key string) KeyValue { + return KeyValue{key, Value{}} +} + +// String returns key-value pair as a string, formatted like "key:value". +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (a KeyValue) String() string { + return fmt.Sprintf("%s:%s", a.Key, a.Value) +} + +// ValueFromAttribute converts [attribute.Value] to [Value]. +func ValueFromAttribute(value attribute.Value) Value { + switch value.Type() { + case attribute.INVALID: + return Value{} + case attribute.BOOL: + return BoolValue(value.AsBool()) + case attribute.BOOLSLICE: + val := value.AsBoolSlice() + res := make([]Value, 0, len(val)) + for _, v := range val { + res = append(res, BoolValue(v)) + } + return SliceValue(res...) + case attribute.INT64: + return Int64Value(value.AsInt64()) + case attribute.INT64SLICE: + val := value.AsInt64Slice() + res := make([]Value, 0, len(val)) + for _, v := range val { + res = append(res, Int64Value(v)) + } + return SliceValue(res...) + case attribute.FLOAT64: + return Float64Value(value.AsFloat64()) + case attribute.FLOAT64SLICE: + val := value.AsFloat64Slice() + res := make([]Value, 0, len(val)) + for _, v := range val { + res = append(res, Float64Value(v)) + } + return SliceValue(res...) + case attribute.STRING: + return StringValue(value.AsString()) + case attribute.STRINGSLICE: + val := value.AsStringSlice() + res := make([]Value, 0, len(val)) + for _, v := range val { + res = append(res, StringValue(v)) + } + return SliceValue(res...) + } + // This code should never be reached + // as log attributes are a superset of standard attributes. + panic("unknown attribute type") +} + +// KeyValueFromAttribute converts [attribute.KeyValue] to [KeyValue]. +func KeyValueFromAttribute(kv attribute.KeyValue) KeyValue { + return KeyValue{ + Key: string(kv.Key), + Value: ValueFromAttribute(kv.Value), + } +} diff --git a/vendor/go.opentelemetry.io/otel/log/kind_string.go b/vendor/go.opentelemetry.io/otel/log/kind_string.go new file mode 100644 index 0000000000..bdfaa18665 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/kind_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=Kind -trimprefix=Kind"; DO NOT EDIT. + +package log + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[KindEmpty-0] + _ = x[KindBool-1] + _ = x[KindFloat64-2] + _ = x[KindInt64-3] + _ = x[KindString-4] + _ = x[KindBytes-5] + _ = x[KindSlice-6] + _ = x[KindMap-7] +} + +const _Kind_name = "EmptyBoolFloat64Int64StringBytesSliceMap" + +var _Kind_index = [...]uint8{0, 5, 9, 16, 21, 27, 32, 37, 40} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go new file mode 100644 index 0000000000..1205f08e2c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -0,0 +1,140 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package log // import "go.opentelemetry.io/otel/log" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log/embedded" +) + +// Logger emits log records. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Logger interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Logger + + // Emit emits a log record. + // + // The record may be held by the implementation. Callers should not mutate + // the record after passed. + // + // Implementations of this method need to be safe for a user to call + // concurrently. + Emit(ctx context.Context, record Record) + + // Enabled returns whether the Logger emits for the given context and + // param. + // + // This is useful for users that want to know if a [Record] + // will be processed or dropped before they perform complex operations to + // construct the [Record]. + // + // The passed param is likely to be a partial record information being + // provided (e.g a param with only the Severity set). + // If a Logger needs more information than is provided, it + // is said to be in an indeterminate state (see below). + // + // The returned value will be true when the Logger will emit for the + // provided context and param, and will be false if the Logger will not + // emit. The returned value may be true or false in an indeterminate state. + // An implementation should default to returning true for an indeterminate + // state, but may return false if valid reasons in particular circumstances + // exist (e.g. performance, correctness). + // + // The param should not be held by the implementation. A copy should be + // made if the param needs to be held after the call returns. + // + // Implementations of this method need to be safe for a user to call + // concurrently. + Enabled(ctx context.Context, param EnabledParameters) bool +} + +// LoggerOption applies configuration options to a [Logger]. +type LoggerOption interface { + // applyLogger is used to set a LoggerOption value of a LoggerConfig. + applyLogger(LoggerConfig) LoggerConfig +} + +// LoggerConfig contains options for a [Logger]. +type LoggerConfig struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + version string + schemaURL string + attrs attribute.Set +} + +// NewLoggerConfig returns a new [LoggerConfig] with all the options applied. +func NewLoggerConfig(options ...LoggerOption) LoggerConfig { + var c LoggerConfig + for _, opt := range options { + c = opt.applyLogger(c) + } + return c +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg LoggerConfig) InstrumentationVersion() string { + return cfg.version +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg LoggerConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL returns the schema URL of the library providing instrumentation. +func (cfg LoggerConfig) SchemaURL() string { + return cfg.schemaURL +} + +type loggerOptionFunc func(LoggerConfig) LoggerConfig + +func (fn loggerOptionFunc) applyLogger(cfg LoggerConfig) LoggerConfig { + return fn(cfg) +} + +// WithInstrumentationVersion returns a [LoggerOption] that sets the +// instrumentation version of a [Logger]. +func WithInstrumentationVersion(version string) LoggerOption { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + config.version = version + return config + }) +} + +// WithInstrumentationAttributes returns a [LoggerOption] that sets the +// instrumentation attributes of a [Logger]. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL returns a [LoggerOption] that sets the schema URL for a +// [Logger]. +func WithSchemaURL(schemaURL string) LoggerOption { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + config.schemaURL = schemaURL + return config + }) +} + +// EnabledParameters represents payload for [Logger]'s Enabled method. +type EnabledParameters struct { + Severity Severity +} diff --git a/vendor/go.opentelemetry.io/otel/log/provider.go b/vendor/go.opentelemetry.io/otel/log/provider.go new file mode 100644 index 0000000000..5c8ca328f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/provider.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package log // import "go.opentelemetry.io/otel/log" + +import "go.opentelemetry.io/otel/log/embedded" + +// LoggerProvider provides access to [Logger]. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type LoggerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.LoggerProvider + + // Logger returns a new [Logger] with the provided name and configuration. + // + // The name needs to uniquely identify the source of logged code. It is + // recommended that name is the Go package name of the library using a log + // bridge (note: this is not the name of the bridge package). Most + // commonly, this means a bridge will need to accept this value from its + // users. + // + // If name is empty, implementations need to provide a default name. + // + // The version of the packages using a bridge can be critical information + // to include when logging. The bridge should accept this version + // information and use the [WithInstrumentationVersion] option to configure + // the Logger appropriately. + // + // Implementations of this method need to be safe for a user to call + // concurrently. + Logger(name string, options ...LoggerOption) Logger +} diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go new file mode 100644 index 0000000000..4d2f32d0fb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/record.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package log // import "go.opentelemetry.io/otel/log" + +import ( + "slices" + "time" +) + +// attributesInlineCount is the number of attributes that are efficiently +// stored in an array within a Record. This value is borrowed from slog which +// performed a quantitative survey of log library use and found this value to +// cover 95% of all use-cases (https://go.dev/blog/slog#performance). +const attributesInlineCount = 5 + +// Record represents a log record. +// A log record with non-empty event name is interpreted as an event record. +type Record struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + eventName string + timestamp time.Time + observedTimestamp time.Time + severity Severity + severityText string + body Value + + // The fields below are for optimizing the implementation of Attributes and + // AddAttributes. This design is borrowed from the slog Record type: + // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/log/slog/record.go;l=20 + + // Allocation optimization: an inline array sized to hold + // the majority of log calls (based on examination of open-source + // code). It holds the start of the list of attributes. + front [attributesInlineCount]KeyValue + + // The number of attributes in front. + nFront int + + // The list of attributes except for those in front. + // Invariants: + // - len(back) > 0 if nFront == len(front) + // - Unused array elements are zero-ed. Used to detect mistakes. + back []KeyValue +} + +// EventName returns the event name. +// A log record with non-empty event name is interpreted as an event record. +func (r *Record) EventName() string { + return r.eventName +} + +// SetEventName sets the event name. +// A log record with non-empty event name is interpreted as an event record. +func (r *Record) SetEventName(s string) { + r.eventName = s +} + +// Timestamp returns the time when the log record occurred. +func (r *Record) Timestamp() time.Time { + return r.timestamp +} + +// SetTimestamp sets the time when the log record occurred. +func (r *Record) SetTimestamp(t time.Time) { + r.timestamp = t +} + +// ObservedTimestamp returns the time when the log record was observed. +func (r *Record) ObservedTimestamp() time.Time { + return r.observedTimestamp +} + +// SetObservedTimestamp sets the time when the log record was observed. +func (r *Record) SetObservedTimestamp(t time.Time) { + r.observedTimestamp = t +} + +// Severity returns the [Severity] of the log record. +func (r *Record) Severity() Severity { + return r.severity +} + +// SetSeverity sets the [Severity] level of the log record. +func (r *Record) SetSeverity(level Severity) { + r.severity = level +} + +// SeverityText returns severity (also known as log level) text. This is the +// original string representation of the severity as it is known at the source. +func (r *Record) SeverityText() string { + return r.severityText +} + +// SetSeverityText sets severity (also known as log level) text. This is the +// original string representation of the severity as it is known at the source. +func (r *Record) SetSeverityText(text string) { + r.severityText = text +} + +// Body returns the body of the log record. +func (r *Record) Body() Value { + return r.body +} + +// SetBody sets the body of the log record. +func (r *Record) SetBody(v Value) { + r.body = v +} + +// WalkAttributes walks all attributes the log record holds by calling f for +// each on each [KeyValue] in the [Record]. Iteration stops if f returns false. +func (r *Record) WalkAttributes(f func(KeyValue) bool) { + for i := 0; i < r.nFront; i++ { + if !f(r.front[i]) { + return + } + } + for _, a := range r.back { + if !f(a) { + return + } + } +} + +// AddAttributes adds attributes to the log record. +func (r *Record) AddAttributes(attrs ...KeyValue) { + var i int + for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ { + a := attrs[i] + r.front[r.nFront] = a + r.nFront++ + } + + r.back = slices.Grow(r.back, len(attrs[i:])) + r.back = append(r.back, attrs[i:]...) +} + +// AttributesLen returns the number of attributes in the log record. +func (r *Record) AttributesLen() int { + return r.nFront + len(r.back) +} diff --git a/vendor/go.opentelemetry.io/otel/log/severity.go b/vendor/go.opentelemetry.io/otel/log/severity.go new file mode 100644 index 0000000000..0240fd5acb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/severity.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Severity -linecomment + +package log // import "go.opentelemetry.io/otel/log" + +// Severity represents a log record severity (also known as log level). Smaller +// numerical values correspond to less severe log records (such as debug +// events), larger numerical values correspond to more severe log records (such +// as errors and critical events). +type Severity int + +// Severity values defined by OpenTelemetry. +const ( + // SeverityUndefined represents an unset Severity. + SeverityUndefined Severity = 0 // UNDEFINED + + // A fine-grained debugging log record. Typically disabled in default + // configurations. + SeverityTrace1 Severity = 1 // TRACE + SeverityTrace2 Severity = 2 // TRACE2 + SeverityTrace3 Severity = 3 // TRACE3 + SeverityTrace4 Severity = 4 // TRACE4 + + // A debugging log record. + SeverityDebug1 Severity = 5 // DEBUG + SeverityDebug2 Severity = 6 // DEBUG2 + SeverityDebug3 Severity = 7 // DEBUG3 + SeverityDebug4 Severity = 8 // DEBUG4 + + // An informational log record. Indicates that an event happened. + SeverityInfo1 Severity = 9 // INFO + SeverityInfo2 Severity = 10 // INFO2 + SeverityInfo3 Severity = 11 // INFO3 + SeverityInfo4 Severity = 12 // INFO4 + + // A warning log record. Not an error but is likely more important than an + // informational event. + SeverityWarn1 Severity = 13 // WARN + SeverityWarn2 Severity = 14 // WARN2 + SeverityWarn3 Severity = 15 // WARN3 + SeverityWarn4 Severity = 16 // WARN4 + + // An error log record. Something went wrong. + SeverityError1 Severity = 17 // ERROR + SeverityError2 Severity = 18 // ERROR2 + SeverityError3 Severity = 19 // ERROR3 + SeverityError4 Severity = 20 // ERROR4 + + // A fatal log record such as application or system crash. + SeverityFatal1 Severity = 21 // FATAL + SeverityFatal2 Severity = 22 // FATAL2 + SeverityFatal3 Severity = 23 // FATAL3 + SeverityFatal4 Severity = 24 // FATAL4 + + // Convenience definitions for the base severity of each level. + SeverityTrace = SeverityTrace1 + SeverityDebug = SeverityDebug1 + SeverityInfo = SeverityInfo1 + SeverityWarn = SeverityWarn1 + SeverityError = SeverityError1 + SeverityFatal = SeverityFatal1 +) diff --git a/vendor/go.opentelemetry.io/otel/log/severity_string.go b/vendor/go.opentelemetry.io/otel/log/severity_string.go new file mode 100644 index 0000000000..4c20fa5e8a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/severity_string.go @@ -0,0 +1,47 @@ +// Code generated by "stringer -type=Severity -linecomment"; DO NOT EDIT. + +package log + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SeverityUndefined-0] + _ = x[SeverityTrace1-1] + _ = x[SeverityTrace2-2] + _ = x[SeverityTrace3-3] + _ = x[SeverityTrace4-4] + _ = x[SeverityDebug1-5] + _ = x[SeverityDebug2-6] + _ = x[SeverityDebug3-7] + _ = x[SeverityDebug4-8] + _ = x[SeverityInfo1-9] + _ = x[SeverityInfo2-10] + _ = x[SeverityInfo3-11] + _ = x[SeverityInfo4-12] + _ = x[SeverityWarn1-13] + _ = x[SeverityWarn2-14] + _ = x[SeverityWarn3-15] + _ = x[SeverityWarn4-16] + _ = x[SeverityError1-17] + _ = x[SeverityError2-18] + _ = x[SeverityError3-19] + _ = x[SeverityError4-20] + _ = x[SeverityFatal1-21] + _ = x[SeverityFatal2-22] + _ = x[SeverityFatal3-23] + _ = x[SeverityFatal4-24] +} + +const _Severity_name = "UNDEFINEDTRACETRACE2TRACE3TRACE4DEBUGDEBUG2DEBUG3DEBUG4INFOINFO2INFO3INFO4WARNWARN2WARN3WARN4ERRORERROR2ERROR3ERROR4FATALFATAL2FATAL3FATAL4" + +var _Severity_index = [...]uint8{0, 9, 14, 20, 26, 32, 37, 43, 49, 55, 59, 64, 69, 74, 78, 83, 88, 93, 98, 104, 110, 116, 121, 127, 133, 139} + +func (i Severity) String() string { + if i < 0 || i >= Severity(len(_Severity_index)-1) { + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Severity_name[_Severity_index[i]:_Severity_index[i+1]] +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 751cc8bcc5..21ce8e09dd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -77,8 +77,8 @@ dario.cat/mergo # github.com/Azure/azure-pipeline-go v0.2.3 ## explicit; go 1.14 github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 -## explicit; go 1.18 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 +## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -100,12 +100,12 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 -## explicit; go 1.18 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 +## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 -## explicit; go 1.18 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 +## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo github.com/Azure/azure-sdk-for-go/sdk/internal/exported @@ -166,13 +166,13 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base -github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time @@ -188,6 +188,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/o github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version +github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity github.com/AzureAD/microsoft-authentication-library-for-go/apps/public # github.com/Code-Hex/go-generics-cache v1.5.1 ## explicit; go 1.18 @@ -642,8 +643,8 @@ github.com/dgryski/go-metro # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/digitalocean/godo v1.132.0 -## explicit; go 1.22 +# github.com/digitalocean/godo v1.144.0 +## explicit; go 1.23 github.com/digitalocean/godo github.com/digitalocean/godo/metrics # github.com/dimchansky/utfbom v1.1.1 @@ -921,6 +922,10 @@ github.com/go-playground/validator/v10 github.com/go-redsync/redsync/v4 github.com/go-redsync/redsync/v4/redis github.com/go-redsync/redsync/v4/redis/goredis/v9 +# github.com/go-viper/mapstructure/v2 v2.2.1 +## explicit; go 1.18 +github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors # github.com/go-zookeeper/zk v1.0.4 ## explicit; go 1.13 github.com/go-zookeeper/zk @@ -1045,7 +1050,7 @@ github.com/googleapis/gax-go/v2/internallog github.com/googleapis/gax-go/v2/internallog/grpclog github.com/googleapis/gax-go/v2/internallog/internal github.com/googleapis/gax-go/v2/iterator -# github.com/gophercloud/gophercloud/v2 v2.4.0 +# github.com/gophercloud/gophercloud/v2 v2.7.0 ## explicit; go 1.22 github.com/gophercloud/gophercloud/v2 github.com/gophercloud/gophercloud/v2/openstack @@ -1207,6 +1212,9 @@ github.com/hashicorp/go-sockaddr # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid +# github.com/hashicorp/go-version v1.7.0 +## explicit +github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v1.0.2 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru @@ -1327,6 +1335,15 @@ github.com/klauspost/cpuid/v2 # github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip +# github.com/knadh/koanf/maps v0.1.2 +## explicit; go 1.18 +github.com/knadh/koanf/maps +# github.com/knadh/koanf/providers/confmap v0.1.0 +## explicit; go 1.18 +github.com/knadh/koanf/providers/confmap +# github.com/knadh/koanf/v2 v2.1.2 +## explicit; go 1.18 +github.com/knadh/koanf/v2 # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff @@ -1379,8 +1396,8 @@ github.com/mdlayher/socket # github.com/mdlayher/vsock v1.2.1 ## explicit; go 1.20 github.com/mdlayher/vsock -# github.com/miekg/dns v1.1.63 -## explicit; go 1.19 +# github.com/miekg/dns v1.1.65 +## explicit; go 1.22.0 github.com/miekg/dns # github.com/minio/crc64nvme v1.0.1 ## explicit; go 1.22 @@ -1480,20 +1497,22 @@ github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 -## explicit; go 1.22.0 +# github.com/oklog/ulid/v2 v2.1.0 +## explicit; go 1.15 +github.com/oklog/ulid/v2 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 +## explicit; go 1.23.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 -## explicit; go 1.22.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 +## explicit; go 1.23.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 -## explicit; go 1.22.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 +## explicit; go 1.23.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice @@ -1612,8 +1631,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.62.0 -## explicit; go 1.21 +# github.com/prometheus/common v0.64.0 +## explicit; go 1.23.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates @@ -1624,10 +1643,10 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.13.2 +# github.com/prometheus/exporter-toolkit v0.14.0 ## explicit; go 1.22 github.com/prometheus/exporter-toolkit/web -# github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea +# github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb ## explicit; go 1.23.0 github.com/prometheus/otlptranslator # github.com/prometheus/procfs v0.15.1 @@ -1635,8 +1654,8 @@ github.com/prometheus/otlptranslator github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.302.1 -## explicit; go 1.22.7 +# github.com/prometheus/prometheus v0.304.1 +## explicit; go 1.23.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/aws @@ -1676,7 +1695,6 @@ github.com/prometheus/prometheus/storage github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/storage/remote/azuread github.com/prometheus/prometheus/storage/remote/googleiam -github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite github.com/prometheus/prometheus/template github.com/prometheus/prometheus/tsdb @@ -1693,6 +1711,7 @@ github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wlog github.com/prometheus/prometheus/util/almost github.com/prometheus/prometheus/util/annotations +github.com/prometheus/prometheus/util/compression github.com/prometheus/prometheus/util/convertnhcb github.com/prometheus/prometheus/util/gate github.com/prometheus/prometheus/util/httputil @@ -1710,6 +1729,9 @@ github.com/prometheus/prometheus/web/api/v1 # github.com/prometheus/sigv4 v0.1.2 ## explicit; go 1.21 github.com/prometheus/sigv4 +# github.com/puzpuzpuz/xsync/v3 v3.5.1 +## explicit; go 1.18 +github.com/puzpuzpuz/xsync/v3 # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics @@ -1970,16 +1992,27 @@ go.opencensus.io/tag ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/collector/component v0.118.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/collector/component v1.30.0 +## explicit; go 1.23.0 go.opentelemetry.io/collector/component -# go.opentelemetry.io/collector/config/configtelemetry v0.118.0 -## explicit; go 1.22.0 -go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/consumer v1.24.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/collector/confmap v1.30.0 +## explicit; go 1.23.0 +go.opentelemetry.io/collector/confmap +go.opentelemetry.io/collector/confmap/internal/mapstructure +# go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 +## explicit; go 1.23.0 +go.opentelemetry.io/collector/confmap/xconfmap +# go.opentelemetry.io/collector/consumer v1.30.0 +## explicit; go 1.23.0 go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/internal +# go.opentelemetry.io/collector/featuregate v1.30.0 +## explicit; go 1.23.0 +go.opentelemetry.io/collector/featuregate +# go.opentelemetry.io/collector/internal/telemetry v0.124.0 +## explicit; go 1.23.0 +go.opentelemetry.io/collector/internal/telemetry +go.opentelemetry.io/collector/internal/telemetry/componentattribute # go.opentelemetry.io/collector/pdata v1.34.0 ## explicit; go 1.23.0 go.opentelemetry.io/collector/pdata/internal @@ -2002,16 +2035,20 @@ go.opentelemetry.io/collector/pdata/plog/plogotlp go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace -# go.opentelemetry.io/collector/pipeline v0.118.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/collector/pipeline v0.124.0 +## explicit; go 1.23.0 go.opentelemetry.io/collector/pipeline go.opentelemetry.io/collector/pipeline/internal/globalsignal -# go.opentelemetry.io/collector/processor v0.118.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/collector/processor v1.30.0 +## explicit; go 1.23.0 go.opentelemetry.io/collector/processor -# go.opentelemetry.io/collector/semconv v0.118.0 -## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor/internal +# go.opentelemetry.io/collector/semconv v0.124.0 +## explicit; go 1.23.0 go.opentelemetry.io/collector/semconv/v1.6.1 +# go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 +## explicit; go 1.22.0 +go.opentelemetry.io/contrib/bridges/otelzap # go.opentelemetry.io/contrib/detectors/gcp v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/detectors/gcp @@ -2060,6 +2097,12 @@ go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift +# go.opentelemetry.io/otel/log v0.11.0 +## explicit; go 1.22.0 +go.opentelemetry.io/otel/log +go.opentelemetry.io/otel/log/embedded +go.opentelemetry.io/otel/log/global +go.opentelemetry.io/otel/log/internal/global # go.opentelemetry.io/otel/metric v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric @@ -2625,7 +2668,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.1 +# k8s.io/client-go v0.32.3 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1