diff --git a/clients/pkg/promtail/promtail_test.go b/clients/pkg/promtail/promtail_test.go
index d85781bd05..3a12aa4da7 100644
--- a/clients/pkg/promtail/promtail_test.go
+++ b/clients/pkg/promtail/promtail_test.go
@@ -536,7 +536,7 @@ func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName
case textparse.EntrySeries:
var res labels.Labels
_, _, v := pr.Series()
- pr.Metric(&res)
+ pr.Labels(&res)
switch res.Get(labels.MetricName) {
case metricName:
rb[res.Get(label)] = v
diff --git a/clients/pkg/promtail/wal/wal.go b/clients/pkg/promtail/wal/wal.go
index adf7eeb45e..1dc1f054b9 100644
--- a/clients/pkg/promtail/wal/wal.go
+++ b/clients/pkg/promtail/wal/wal.go
@@ -8,6 +8,7 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/ingester/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -38,7 +39,7 @@ type wrapper struct {
func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) {
// TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk
// yet. This will attest for the lack of buffering in the channel Writer exposes.
- tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone)
+ tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, compression.None)
if err != nil {
return nil, fmt.Errorf("failde to create tsdb WAL: %w", err)
}
diff --git a/go.mod b/go.mod
index 0c624eb58d..a511c530ff 100644
--- a/go.mod
+++ b/go.mod
@@ -84,8 +84,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2
- github.com/prometheus/common v0.62.0
- github.com/prometheus/prometheus v0.302.1
+ github.com/prometheus/common v0.64.0
+ github.com/prometheus/prometheus v0.304.1
github.com/redis/go-redis/v9 v9.10.0
github.com/segmentio/fasthash v1.0.3
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
@@ -138,7 +138,7 @@ require (
github.com/parquet-go/parquet-go v0.25.1
github.com/prometheus/alertmanager v0.28.1
github.com/prometheus/common/sigv4 v0.1.0
- github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea
+ github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb
github.com/prometheus/sigv4 v0.1.2
github.com/richardartoul/molecule v1.0.0
github.com/schollz/progressbar/v3 v3.18.0
@@ -195,15 +195,20 @@ require (
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-redsync/redsync/v4 v4.13.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
- github.com/gophercloud/gophercloud/v2 v2.4.0 // indirect
+ github.com/gophercloud/gophercloud/v2 v2.7.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/grafana/otel-profiling-go v0.5.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/jaegertracing/jaeger-idl v0.5.0 // indirect
github.com/kamstrup/intmap v0.5.1 // indirect
+ github.com/knadh/koanf/maps v0.1.2 // indirect
+ github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
+ github.com/knadh/koanf/v2 v2.1.2 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
@@ -220,14 +225,16 @@ require (
github.com/muesli/termenv v0.16.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/ncw/swift v1.0.53 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 // indirect
+ github.com/oklog/ulid/v2 v2.1.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect
github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
+ github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sahilm/fuzzy v0.1.1 // indirect
@@ -243,15 +250,20 @@ require (
github.com/zeebo/errs v1.4.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/collector/component v0.118.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect
- go.opentelemetry.io/collector/consumer v1.24.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.118.0 // indirect
- go.opentelemetry.io/collector/processor v0.118.0 // indirect
+ go.opentelemetry.io/collector/component v1.30.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.30.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect
+ go.opentelemetry.io/collector/consumer v1.30.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.30.0 // indirect
+ go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.124.0 // indirect
+ go.opentelemetry.io/collector/processor v1.30.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect
go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 // indirect
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
+ go.opentelemetry.io/otel/log v0.11.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
@@ -268,9 +280,9 @@ require (
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 // indirect
@@ -280,7 +292,7 @@ require (
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Code-Hex/go-generics-cache v1.5.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.1 // indirect
@@ -309,7 +321,7 @@ require (
github.com/dennwc/varint v1.0.0 // indirect
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
- github.com/digitalocean/godo v1.132.0 // indirect
+ github.com/digitalocean/godo v1.144.0 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
@@ -379,7 +391,7 @@ require (
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/miekg/dns v1.1.63 // indirect
+ github.com/miekg/dns v1.1.65 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
@@ -393,7 +405,7 @@ require (
github.com/oschwald/maxminddb-golang v1.13.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/exporter-toolkit v0.13.2 // indirect
+ github.com/prometheus/exporter-toolkit v0.14.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rs/xid v1.6.0 // indirect
@@ -413,7 +425,7 @@ require (
go.etcd.io/etcd/client/v3 v3.5.4 // indirect
go.mongodb.org/mongo-driver v1.17.2 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector/semconv v0.118.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.124.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
go.opentelemetry.io/otel v1.36.0
@@ -431,7 +443,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
k8s.io/api v0.32.3 // indirect
- k8s.io/client-go v0.32.1 // indirect
+ k8s.io/client-go v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
rsc.io/binaryregexp v0.2.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
diff --git a/go.sum b/go.sum
index 3394af3858..cc53f59ef2 100644
--- a/go.sum
+++ b/go.sum
@@ -66,14 +66,14 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@@ -113,8 +113,8 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
@@ -355,8 +355,8 @@ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsY
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ=
-github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
+github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0=
+github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
@@ -513,8 +513,8 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA=
github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=
-github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E=
-github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
+github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
+github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
@@ -647,8 +647,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
-github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg=
-github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4=
+github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
+github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
@@ -695,8 +695,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
@@ -763,8 +763,8 @@ github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc
github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY=
github.com/heroku/x v0.4.3 h1:HF1P4Mu79BKDVk4pt+oRDpcOSTRTpHq28RYAOkuJmds=
github.com/heroku/x v0.4.3/go.mod h1:htQnSDQPP7rNbrOQ8rczL7tbdNtQHXCPoSxYomu+eI8=
-github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q=
-github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
+github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84=
+github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -781,8 +781,8 @@ github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68Z
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/influxdata/telegraf v1.34.1 h1:BWnIm52buIBv1hPRoMFNBE/wuoSZ0Yeny4EP0ngMSbE=
github.com/influxdata/telegraf v1.34.1/go.mod h1:F/4F/nmAKRZlDNhrD5aIQi+AaiHaiNKku0kJFsF6iag=
-github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
-github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
+github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw=
+github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE=
github.com/jaegertracing/jaeger-idl v0.5.0/go.mod h1:ON90zFo9eoyXrt9F/KN8YeF3zxcnujaisMweFY/rg5k=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@@ -825,8 +825,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kamstrup/intmap v0.5.1 h1:ENGAowczZA+PJPYYlreoqJvWgQVtAmX1l899WfYFVK0=
github.com/kamstrup/intmap v0.5.1/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
-github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
-github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
+github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
+github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -839,8 +839,8 @@ github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
-github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
+github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
+github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
@@ -870,8 +870,8 @@ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU=
-github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
+github.com/linode/linodego v1.49.0 h1:MNd3qwvQzbXB5mCpvdCqlUIu1RPA9oC+50LyB9kK+GQ=
+github.com/linode/linodego v1.49.0/go.mod h1:B+HAM3//4w1wOS0BwdaQBKwBxlfe6kYJ7bSC6jJ/xtc=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI=
@@ -906,8 +906,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
-github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
-github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
+github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
+github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
@@ -990,6 +990,8 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
+github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -1001,14 +1003,14 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -1035,14 +1037,15 @@ github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o=
-github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
-github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
+github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw=
+github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo=
github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@@ -1101,14 +1104,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
+github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
-github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
-github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
-github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea h1:NacrTIqDsM6iOtfex6OAFvVmtxjbiLC2a34/ba6nM9Q=
-github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea/go.mod h1:v1PzmPjSnNkmZSDvKJ9OmsWcmWMEF5+JdllEcXrRfzM=
+github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
+github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
+github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb h1:wuS7VydG/rDWTbYMp07paPv3R1hiPC9WgingWs+xgi0=
+github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb/go.mod h1:M7gjuJF83qnpgElJIPfhiK+YAHlvot5epcAV+Rie7eo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -1119,10 +1122,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/prometheus v0.302.1 h1:xqVdrwrB4WNpdgJqxsz5loqFWNUZitsK8myqLuSZ6Ag=
-github.com/prometheus/prometheus v0.302.1/go.mod h1:YcyCoTbUR/TM8rY3Aoeqr0AWTu/pu1Ehh+trpX3eRzg=
+github.com/prometheus/prometheus v0.304.1 h1:e4kpJMb2Vh/PcR6LInake+ofcvFYHT+bCfmBvOkaZbY=
+github.com/prometheus/prometheus v0.304.1/go.mod h1:ioGx2SGKTY+fLnJSQCdTHqARVldGNS8OlIe3kvp98so=
github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
+github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
+github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -1149,8 +1154,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA=
github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@@ -1312,38 +1317,44 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w=
-go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M=
-go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE=
-go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504=
-go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8=
-go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU=
-go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM=
-go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE=
-go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w=
-go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec=
-go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU=
-go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s=
-go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY=
-go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ=
-go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg=
+go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls=
+go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E=
+go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4=
+go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38=
+go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw=
+go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E=
+go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM=
+go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA=
+go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8=
+go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0=
+go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs=
+go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0=
+go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0=
+go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg=
+go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0=
+go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4=
+go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8=
+go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
+go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk=
+go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y=
go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8=
go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI=
-go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8=
-go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ=
-go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM=
-go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw=
-go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc=
-go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74=
-go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws=
-go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4=
-go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU=
-go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU=
-go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM=
-go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU=
-go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w=
-go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
+go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE=
+go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo=
+go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY=
+go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o=
+go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw=
+go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
+go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk=
+go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8=
+go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE=
+go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0=
+go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w=
+go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI=
+go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
+go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
+go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU=
+go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0=
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
@@ -1361,12 +1372,14 @@ go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
+go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y=
+go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
@@ -1850,8 +1863,8 @@ k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
-k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
+k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
+k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
diff --git a/pkg/chunkenc/symbols.go b/pkg/chunkenc/symbols.go
index cf1fe25256..28700cca28 100644
--- a/pkg/chunkenc/symbols.go
+++ b/pkg/chunkenc/symbols.go
@@ -45,14 +45,12 @@ type symbolizer struct {
readOnly bool
// Runtime-only map to track which symbols are label names and have been normalized
normalizedNames map[uint32]string
- normalizer *otlptranslator.LabelNamer
}
func newSymbolizer() *symbolizer {
return &symbolizer{
symbolsMap: map[string]uint32{},
normalizedNames: map[uint32]string{},
- normalizer: &otlptranslator.LabelNamer{},
}
}
@@ -125,7 +123,7 @@ func (s *symbolizer) Lookup(syms symbols, buf *log.BufferedLabelsBuilder) labels
} else {
// If we haven't seen this name before, look it up and normalize it
name = s.lookup(symbol.Name)
- normalized := s.normalizer.Build(name)
+ normalized := otlptranslator.NormalizeLabel(name)
s.mtx.Lock()
s.normalizedNames[symbol.Name] = normalized
s.mtx.Unlock()
@@ -340,7 +338,6 @@ func symbolizerFromCheckpoint(b []byte) *symbolizer {
// Labels are key-value pairs, preallocate to half the number to store just the keys,
// likely less memory than the exponential growth Go will do.
normalizedNames: make(map[uint32]string, numLabels/2),
- normalizer: &otlptranslator.LabelNamer{},
}
for i := 0; i < numLabels; i++ {
@@ -371,7 +368,6 @@ func symbolizerFromEnc(b []byte, pool compression.ReaderPool) (*symbolizer, erro
labels: make([]string, 0, numLabels),
// Same as symbolizerFromCheckpoint
normalizedNames: make(map[uint32]string, numLabels/2),
- normalizer: &otlptranslator.LabelNamer{},
compressedSize: len(b),
readOnly: true,
}
diff --git a/pkg/compactor/deletion/job_runner_test.go b/pkg/compactor/deletion/job_runner_test.go
index 0ec39714f4..aa352836a4 100644
--- a/pkg/compactor/deletion/job_runner_test.go
+++ b/pkg/compactor/deletion/job_runner_test.go
@@ -11,7 +11,6 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/compactor/jobqueue"
"github.com/grafana/loki/v3/pkg/compactor/retention"
@@ -20,6 +19,8 @@ import (
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
+
+ "github.com/grafana/loki/pkg/push"
)
type mockChunkClient struct {
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 6786495887..15f75a9b95 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -15,9 +15,16 @@ import (
"time"
"unicode/utf8"
+ otlptranslate "github.com/prometheus/otlptranslator"
+ "go.opentelemetry.io/otel/trace"
+
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/status"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/twmb/franz-go/pkg/kgo"
+ "google.golang.org/grpc/codes"
+
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/kv"
"github.com/grafana/dskit/limiter"
@@ -29,12 +36,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/prometheus/otlptranslator"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/twmb/franz-go/pkg/kgo"
- "go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
- "google.golang.org/grpc/codes"
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/compactor/retention"
@@ -211,9 +213,6 @@ type Distributor struct {
kafkaWriteBytesTotal prometheus.Counter
kafkaWriteLatency prometheus.Histogram
kafkaRecordsPerRequest prometheus.Histogram
-
- // OTLP Label Normalizer
- normalizer *otlptranslator.LabelNamer
}
// New a distributor creates.
@@ -374,7 +373,6 @@ func New(
partitionRing: partitionRing,
ingestLimits: newIngestLimits(limitsFrontendClient, registerer),
numMetadataPartitions: numMetadataPartitions,
- normalizer: &otlptranslator.LabelNamer{},
}
if overrides.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy {
@@ -648,7 +646,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
var normalized string
structuredMetadata := logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)
for i := range entry.StructuredMetadata {
- normalized = d.normalizer.Build(structuredMetadata[i].Name)
+ normalized = otlptranslate.NormalizeLabel(structuredMetadata[i].Name)
if normalized != structuredMetadata[i].Name {
structuredMetadata[i].Name = normalized
d.tenantPushSanitizedStructuredMetadata.WithLabelValues(tenantID).Inc()
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 0df163e4de..0df0a6c344 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -14,6 +14,10 @@ import (
"time"
"unicode/utf8"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+
+ otlptranslate "github.com/prometheus/otlptranslator"
+
"github.com/c2h5oh/datasize"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
@@ -26,9 +30,7 @@ import (
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
- "github.com/prometheus/otlptranslator"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -2067,8 +2069,6 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr
time.Sleep(i.succeedAfter)
}
- normalizer := &otlptranslator.LabelNamer{}
-
i.mu.Lock()
defer i.mu.Unlock()
for _, s := range in.Streams {
@@ -2077,7 +2077,7 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr
if strings.ContainsRune(sm.Value, utf8.RuneError) {
return nil, fmt.Errorf("sm value was not sanitized before being pushed to ignester, invalid utf 8 rune %d", utf8.RuneError)
}
- if sm.Name != normalizer.Build(sm.Name) {
+ if sm.Name != otlptranslate.NormalizeLabel(sm.Name) {
return nil, fmt.Errorf("sm name was not sanitized before being sent to ingester, contained characters %s", sm.Name)
}
diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go
index b8c3d39e2f..73b40f0857 100644
--- a/pkg/ingester/checkpoint.go
+++ b/pkg/ingester/checkpoint.go
@@ -18,6 +18,7 @@ import (
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
prompool "github.com/prometheus/prometheus/util/pool"
"github.com/grafana/loki/v3/pkg/chunkenc"
@@ -348,7 +349,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) {
return false, fmt.Errorf("create checkpoint dir: %w", err)
}
- checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, wlog.CompressionNone)
+ checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, compression.None)
if err != nil {
return false, fmt.Errorf("open checkpoint: %w", err)
}
diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go
index 06bdd2cb21..429f50388f 100644
--- a/pkg/ingester/wal.go
+++ b/pkg/ingester/wal.go
@@ -10,6 +10,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/ingester/wal"
"github.com/grafana/loki/v3/pkg/util/flagext"
@@ -82,7 +83,7 @@ func newWAL(cfg WALConfig, registerer prometheus.Registerer, metrics *ingesterMe
return noopWAL{}, nil
}
- tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, wlog.CompressionNone)
+ tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, compression.None)
if err != nil {
return nil, err
}
diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go
index b68d1b93aa..70c8d92cd3 100644
--- a/pkg/loghttp/push/otlp.go
+++ b/pkg/loghttp/push/otlp.go
@@ -522,13 +522,12 @@ func attributesToLabels(attrs pcommon.Map, prefix string) push.LabelsAdapter {
func attributeToLabels(k string, v pcommon.Value, prefix string) push.LabelsAdapter {
var labelsAdapter push.LabelsAdapter
- normalizer := &otlptranslator.LabelNamer{}
keyWithPrefix := k
if prefix != "" {
keyWithPrefix = prefix + "_" + k
}
- keyWithPrefix = normalizer.Build(keyWithPrefix)
+ keyWithPrefix = otlptranslator.NormalizeLabel(keyWithPrefix)
typ := v.Type()
if typ == pcommon.ValueTypeMap {
diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go
index 214b4124c7..644621d2c5 100644
--- a/pkg/querier/queryrange/queryrangebase/results_cache.go
+++ b/pkg/querier/queryrange/queryrangebase/results_cache.go
@@ -267,7 +267,11 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool {
}
// This resolves the start() and end() used with the @ modifier.
- expr = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd())
+ expr, err = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd())
+ if err != nil {
+ level.Warn(s.logger).Log("msg", "failed to preprocess query, considering @ modifier as not cachable", "query", query, "err", err)
+ return false
+ }
end := r.GetEnd().UnixMilli()
atModCachable := true
diff --git a/pkg/ruler/base/api_test.go b/pkg/ruler/base/api_test.go
index df5a9d74ae..574bd0def8 100644
--- a/pkg/ruler/base/api_test.go
+++ b/pkg/ruler/base/api_test.go
@@ -654,55 +654,19 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) {
"test": {
{
Name: "group1",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{
- Value: "UP_RULE",
- Tag: "!!str",
- Kind: 8,
- Line: 5,
- Column: 19,
- },
- Expr: yaml.Node{
- Value: "up",
- Tag: "!!str",
- Kind: 8,
- Line: 6,
- Column: 17,
- },
+ Record: "UP_RULE",
+ Expr: "up",
},
{
- Alert: yaml.Node{
- Value: "UP_ALERT",
- Tag: "!!str",
- Kind: 8,
- Line: 7,
- Column: 18,
- },
- Expr: yaml.Node{
- Value: "up < 1",
- Tag: "!!str",
- Kind: 8,
- Line: 8,
- Column: 17,
- },
+ Alert: "UP_ALERT",
+ Expr: "up < 1",
Labels: map[string]string{"foo": "bar"},
},
{
- Alert: yaml.Node{
- Value: "DOWN_ALERT",
- Tag: "!!str",
- Kind: 8,
- Line: 11,
- Column: 18,
- },
- Expr: yaml.Node{
- Value: "down < 1",
- Tag: "!!str",
- Kind: 8,
- Line: 12,
- Column: 17,
- },
+ Alert: "DOWN_ALERT",
+ Expr: "down < 1",
Labels: map[string]string{"namespace": "delta"},
},
},
@@ -714,39 +678,15 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) {
"test": {
{
Name: "group1",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "UP_ALERT",
- Tag: "!!str",
- Kind: 8,
- Line: 5,
- Column: 18,
- },
- Expr: yaml.Node{
- Value: "up < 1",
- Tag: "!!str",
- Kind: 8,
- Line: 6,
- Column: 17,
- },
+ Alert: "UP_ALERT",
+ Expr: "up < 1",
Labels: map[string]string{"foo": "bar"},
},
{
- Alert: yaml.Node{
- Value: "DOWN_ALERT",
- Tag: "!!str",
- Kind: 8,
- Line: 9,
- Column: 18,
- },
- Expr: yaml.Node{
- Value: "down < 1",
- Tag: "!!str",
- Kind: 8,
- Line: 10,
- Column: 17,
- },
+ Alert: "DOWN_ALERT",
+ Expr: "down < 1",
Labels: map[string]string{"namespace": "delta"},
},
},
diff --git a/pkg/ruler/base/manager.go b/pkg/ruler/base/manager.go
index 7b3ab42178..df21d55ae3 100644
--- a/pkg/ruler/base/manager.go
+++ b/pkg/ruler/base/manager.go
@@ -20,6 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/context/ctxhttp"
+ "gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/ruler/rulespb"
)
@@ -300,12 +301,17 @@ func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error
}
for i, r := range g.Rules {
- for _, err := range r.Validate() {
+ ruleNode := rulefmt.RuleNode{
+ Record: yaml.Node{Value: r.Record},
+ Alert: yaml.Node{Value: r.Alert},
+ Expr: yaml.Node{Value: r.Expr},
+ }
+ for _, err := range r.Validate(ruleNode) {
var ruleName string
- if r.Alert.Value != "" {
- ruleName = r.Alert.Value
+ if r.Alert != "" {
+ ruleName = r.Alert
} else {
- ruleName = r.Record.Value
+ ruleName = r.Record
}
errs = append(errs, &rulefmt.Error{
Group: g.Name,
diff --git a/pkg/ruler/base/mapper_test.go b/pkg/ruler/base/mapper_test.go
index a5519e1448..4eed3cbe94 100644
--- a/pkg/ruler/base/mapper_test.go
+++ b/pkg/ruler/base/mapper_test.go
@@ -10,7 +10,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
)
var (
@@ -36,31 +35,28 @@ var (
)
func setupRuleSets() {
- recordNode := yaml.Node{}
- recordNode.SetString("example_rule")
- exprNode := yaml.Node{}
- exprNode.SetString("example_expr")
- recordNodeUpdated := yaml.Node{}
- recordNodeUpdated.SetString("example_ruleupdated")
- exprNodeUpdated := yaml.Node{}
- exprNodeUpdated.SetString("example_exprupdated")
+
+ record := "example_rule"
+ expr := "example_expr"
+ recordUpdated := "example_ruleupdated"
+ exprUpdated := "example_exprupdated"
initialRuleSet = map[string][]rulefmt.RuleGroup{
"file /one": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -70,19 +66,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -92,28 +88,28 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_three",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -123,19 +119,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -143,10 +139,10 @@ func setupRuleSets() {
"file /two": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -156,19 +152,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -176,10 +172,10 @@ func setupRuleSets() {
"file /two": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNodeUpdated,
- Expr: exprNodeUpdated,
+ Record: recordUpdated,
+ Expr: exprUpdated,
},
},
},
@@ -189,19 +185,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
{
Name: "rulegroup_two",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
@@ -211,10 +207,10 @@ func setupRuleSets() {
specialCharFile: {
{
Name: "rulegroup_one",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: recordNode,
- Expr: exprNode,
+ Record: record,
+ Expr: expr,
},
},
},
diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index a2bd951b73..d5f7ad4a84 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -236,7 +236,7 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) {
set[g.Name] = struct{}{}
for _, r := range g.Rules {
- if err := validateRuleNode(&r, g.Name); err != nil {
+ if err := validateRule(&r, g.Name); err != nil {
errs = append(errs, err)
}
}
@@ -245,38 +245,38 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) {
return errs
}
-func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
- if r.Record.Value != "" && r.Alert.Value != "" {
+func validateRule(r *rulefmt.Rule, groupName string) error {
+ if r.Record != "" && r.Alert != "" {
return errors.Errorf("only one of 'record' and 'alert' must be set")
}
- if r.Record.Value == "" && r.Alert.Value == "" {
+ if r.Record == "" && r.Alert == "" {
return errors.Errorf("one of 'record' or 'alert' must be set")
}
- if r.Expr.Value == "" {
+ if r.Expr == "" {
return errors.Errorf("field 'expr' must be set in rule")
- } else if _, err := syntax.ParseExpr(r.Expr.Value); err != nil {
- if r.Record.Value != "" {
- return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record.Value, groupName)
+ } else if _, err := syntax.ParseExpr(r.Expr); err != nil {
+ if r.Record != "" {
+ return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record, groupName)
}
- return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert.Value, groupName)
+ return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert, groupName)
}
- if r.Record.Value != "" {
+ if r.Record != "" {
if len(r.Annotations) > 0 {
return errors.Errorf("invalid field 'annotations' in recording rule")
}
if r.For != 0 {
return errors.Errorf("invalid field 'for' in recording rule")
}
- if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
- return errors.Errorf("invalid recording rule name: %s", r.Record.Value)
+ if !model.IsValidLegacyMetricName(r.Record) {
+ return errors.Errorf("invalid recording rule name: %s", r.Record)
}
}
for k, v := range r.Labels {
- if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
+ if !model.LabelName(k).IsValidLegacy() || k == model.MetricNameLabel {
return errors.Errorf("invalid label name: %s", k)
}
@@ -286,7 +286,7 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
}
for k := range r.Annotations {
- if !model.LabelName(k).IsValid() {
+ if !model.LabelName(k).IsValidLegacy() {
return errors.Errorf("invalid annotation name: %s", k)
}
}
@@ -300,8 +300,8 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
// testTemplateParsing checks if the templates used in labels and annotations
// of the alerting rules are parsed correctly.
-func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {
- if rl.Alert.Value == "" {
+func testTemplateParsing(rl *rulefmt.Rule) (errs []error) {
+ if rl.Alert == "" {
// Not an alerting rule.
return errs
}
@@ -317,7 +317,7 @@ func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {
tmpl := template.NewTemplateExpander(
context.TODO(),
strings.Join(append(defs, text), ""),
- "__alert_"+rl.Alert.Value,
+ "__alert_"+rl.Alert,
tmplData,
model.Time(timestamp.FromTime(time.Now())),
nil,
diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go
index a699da5062..0387731b94 100644
--- a/pkg/ruler/compat_test.go
+++ b/pkg/ruler/compat_test.go
@@ -10,8 +10,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
-
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logql"
rulerbase "github.com/grafana/loki/v3/pkg/ruler/base"
@@ -23,14 +21,14 @@ import (
func TestInvalidRuleGroup(t *testing.T) {
ruleGroupValid := rulefmt.RuleGroup{
Name: "test",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "alert-1-name"},
- Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"},
+ Alert: "alert-1-name",
+ Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)",
},
{
- Alert: yaml.Node{Value: "record-1-name"},
- Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"},
+ Alert: "record-1-name",
+ Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)",
},
},
}
@@ -38,14 +36,14 @@ func TestInvalidRuleGroup(t *testing.T) {
ruleGroupInValid := rulefmt.RuleGroup{
Name: "test",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "alert-1-name"},
- Expr: yaml.Node{Value: "bad_value"},
+ Alert: "alert-1-name",
+ Expr: "bad_value",
},
{
- Record: yaml.Node{Value: "record-1-name"},
- Expr: yaml.Node{Value: "bad_value"},
+ Record: "record-1-name",
+ Expr: "bad_value",
},
},
}
@@ -56,21 +54,21 @@ func TestInvalidRuleGroup(t *testing.T) {
// TestInvalidRuleExprParsing tests that a validation error is raised when rule expression is invalid
func TestInvalidRuleExprParsing(t *testing.T) {
expectedAlertErrorMsg := "could not parse expression for alert 'alert-1-name' in group 'test': parse error"
- alertRuleExprInvalid := &rulefmt.RuleNode{
- Alert: yaml.Node{Value: "alert-1-name"},
- Expr: yaml.Node{Value: "bad_value"},
+ alertRuleExprInvalid := &rulefmt.Rule{
+ Alert: "alert-1-name",
+ Expr: "bad_value",
}
- alertErr := validateRuleNode(alertRuleExprInvalid, "test")
+ alertErr := validateRule(alertRuleExprInvalid, "test")
assert.Containsf(t, alertErr.Error(), expectedAlertErrorMsg, "expected error containing '%s', got '%s'", expectedAlertErrorMsg, alertErr)
expectedRecordErrorMsg := "could not parse expression for record 'record-1-name' in group 'test': parse error"
- recordRuleExprInvalid := &rulefmt.RuleNode{
- Record: yaml.Node{Value: "record-1-name"},
- Expr: yaml.Node{Value: "bad_value"},
+ recordRuleExprInvalid := &rulefmt.Rule{
+ Record: "record-1-name",
+ Expr: "bad_value",
}
- recordErr := validateRuleNode(recordRuleExprInvalid, "test")
+ recordErr := validateRule(recordRuleExprInvalid, "test")
assert.Containsf(t, recordErr.Error(), expectedRecordErrorMsg, "expected error containing '%s', got '%s'", expectedRecordErrorMsg, recordErr)
}
diff --git a/pkg/ruler/grouploader.go b/pkg/ruler/grouploader.go
index 37dfde3ecc..58383e7c7b 100644
--- a/pkg/ruler/grouploader.go
+++ b/pkg/ruler/grouploader.go
@@ -109,9 +109,9 @@ func (l *CachingGroupLoader) AlertingRules() []rulefmt.Rule {
for _, g := range group.Groups {
for _, rule := range g.Rules {
rules = append(rules, rulefmt.Rule{
- Record: rule.Record.Value,
- Alert: rule.Alert.Value,
- Expr: rule.Expr.Value,
+ Record: rule.Record,
+ Alert: rule.Alert,
+ Expr: rule.Expr,
For: rule.For,
Labels: rule.Labels,
Annotations: rule.Annotations,
diff --git a/pkg/ruler/grouploader_test.go b/pkg/ruler/grouploader_test.go
index 7685677ddf..06634003c3 100644
--- a/pkg/ruler/grouploader_test.go
+++ b/pkg/ruler/grouploader_test.go
@@ -7,17 +7,11 @@ import (
"testing"
"github.com/pkg/errors"
- "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
)
-func init() {
- model.NameValidationScheme = model.LegacyValidation
-}
-
func Test_GroupLoader(t *testing.T) {
for _, tc := range []struct {
desc string
@@ -364,8 +358,8 @@ var (
ruleGroup1 = &rulefmt.RuleGroups{
Groups: []rulefmt.RuleGroup{
{
- Rules: []rulefmt.RuleNode{
- {Alert: yaml.Node{Value: "alert-1-name"}},
+ Rules: []rulefmt.Rule{
+ {Alert: "alert-1-name"},
},
},
},
@@ -373,8 +367,8 @@ var (
ruleGroup2 = &rulefmt.RuleGroups{
Groups: []rulefmt.RuleGroup{
{
- Rules: []rulefmt.RuleNode{
- {Alert: yaml.Node{Value: "alert-2-name"}},
+ Rules: []rulefmt.Rule{
+ {Alert: "alert-2-name"},
},
},
},
diff --git a/pkg/ruler/rulespb/compat.go b/pkg/ruler/rulespb/compat.go
index 0c9de4185a..dc1f4dd4a3 100644
--- a/pkg/ruler/rulespb/compat.go
+++ b/pkg/ruler/rulespb/compat.go
@@ -6,7 +6,6 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
- "gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/logproto" //lint:ignore faillint allowed to import other protobuf
)
@@ -24,13 +23,13 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc
return &rg
}
-func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc {
+func formattedRuleToProto(rls []rulefmt.Rule) []*RuleDesc {
rules := make([]*RuleDesc, len(rls))
for i := range rls {
rules[i] = &RuleDesc{
- Expr: rls[i].Expr.Value,
- Record: rls[i].Record.Value,
- Alert: rls[i].Alert.Value,
+ Expr: rls[i].Expr,
+ Record: rls[i].Record,
+ Alert: rls[i].Alert,
For: time.Duration(rls[i].For),
Labels: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)),
Annotations: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)),
@@ -45,29 +44,24 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup {
formattedRuleGroup := rulefmt.RuleGroup{
Name: rg.GetName(),
Interval: model.Duration(rg.Interval),
- Rules: make([]rulefmt.RuleNode, len(rg.GetRules())),
+ Rules: make([]rulefmt.Rule, len(rg.GetRules())),
Limit: int(rg.GetLimit()),
}
for i, rl := range rg.GetRules() {
- exprNode := yaml.Node{}
- exprNode.SetString(rl.GetExpr())
+ expr := rl.GetExpr()
- newRule := rulefmt.RuleNode{
- Expr: exprNode,
+ newRule := rulefmt.Rule{
+ Expr: expr,
Labels: logproto.FromLabelAdaptersToLabels(rl.Labels).Map(),
Annotations: logproto.FromLabelAdaptersToLabels(rl.Annotations).Map(),
For: model.Duration(rl.GetFor()),
}
if rl.GetRecord() != "" {
- recordNode := yaml.Node{}
- recordNode.SetString(rl.GetRecord())
- newRule.Record = recordNode
+ newRule.Record = rl.GetRecord()
} else {
- alertNode := yaml.Node{}
- alertNode.SetString(rl.GetAlert())
- newRule.Alert = alertNode
+ newRule.Alert = rl.GetAlert()
}
formattedRuleGroup.Rules[i] = newRule
diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
index 0644238b21..b70fc9fbb4 100644
--- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
+++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
@@ -106,7 +106,7 @@ func TestListRules(t *testing.T) {
func TestLoadRules(t *testing.T) {
runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) {
groups := []testGroup{
- {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.RuleNode{{
+ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.Rule{{
For: model.Duration(5 * time.Minute),
Labels: map[string]string{"label1": "value1"},
}}, Limit: 10}},
diff --git a/pkg/ruler/rulestore/local/local_test.go b/pkg/ruler/rulestore/local/local_test.go
index ee6abc5b8e..6a2e35e287 100644
--- a/pkg/ruler/rulestore/local/local_test.go
+++ b/pkg/ruler/rulestore/local/local_test.go
@@ -30,10 +30,10 @@ func TestClient_LoadAllRuleGroups(t *testing.T) {
{
Name: "rule",
Interval: model.Duration(100 * time.Second),
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Kind: yaml.ScalarNode, Value: "test_rule"},
- Expr: yaml.Node{Kind: yaml.ScalarNode, Value: "up"},
+ Record: "test_rule",
+ Expr: "up",
},
},
},
diff --git a/pkg/ruler/storage/instance/instance.go b/pkg/ruler/storage/instance/instance.go
index 25501f9a51..eeaa2243be 100644
--- a/pkg/ruler/storage/instance/instance.go
+++ b/pkg/ruler/storage/instance/instance.go
@@ -284,6 +284,10 @@ func (n noopScrapeManager) Get() (*scrape.Manager, error) {
return nil, errors.New("No-op Scrape manager not ready")
}
+func (n noopScrapeManager) Ready() bool {
+ return false
+}
+
// initialize sets up the various Prometheus components with their initial
// settings. initialize will be called each time the Instance is run. Prometheus
// components cannot be reused after they are stopped so we need to recreate them
@@ -304,7 +308,7 @@ func (i *Instance) initialize(_ context.Context, reg prometheus.Registerer, cfg
// Setup the remote storage
remoteLogger := log.With(i.logger, "component", "remote")
- i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{}, false)
+ i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{})
err = i.remoteStore.ApplyConfig(&config.Config{
RemoteWriteConfigs: cfg.RemoteWrite,
})
diff --git a/pkg/ruler/storage/wal/wal.go b/pkg/ruler/storage/wal/wal.go
index 38a2a07134..7ed7d0ba39 100644
--- a/pkg/ruler/storage/wal/wal.go
+++ b/pkg/ruler/storage/wal/wal.go
@@ -28,6 +28,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
"go.uber.org/atomic"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -70,7 +71,7 @@ type Storage struct {
// NewStorage makes a new Storage.
func NewStorage(logger log.Logger, metrics *Metrics, registerer prometheus.Registerer, path string, enableReplay bool) (*Storage, error) {
- w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, wlog.CompressionSnappy)
+ w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, compression.Snappy)
if err != nil {
return nil, err
}
@@ -373,7 +374,7 @@ func (w *Storage) Truncate(mint int64) error {
return nil
}
- keep := func(id chunks.HeadSeriesRef) bool {
+ keep := func(id chunks.HeadSeriesRef, _ int) bool {
if w.series.getByID(id) != nil {
return true
}
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go b/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go
index 66c18a5189..3a77f744fe 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go
@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/util/encoding"
@@ -205,7 +206,7 @@ func newHeadWAL(log log.Logger, dir string, t time.Time) (*headWAL, error) {
// NB: if we use a non-nil Prometheus Registerer, ensure
// that the underlying metrics won't conflict with existing WAL metrics in the ingester.
// Likely, this can be done by adding extra label(s)
- wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, wlog.CompressionNone)
+ wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, compression.None)
if err != nil {
return nil, err
}
diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go
index b91da0f324..57a03a7ba6 100644
--- a/pkg/tool/commands/rules.go
+++ b/pkg/tool/commands/rules.go
@@ -628,7 +628,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error {
}
// Do not apply the aggregation label to excluded rule groups.
- applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
+ applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
_, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name]
return !excluded
}
@@ -749,11 +749,11 @@ func checkDuplicates(groups []rwrulefmt.RuleGroup) []compareRuleType {
return duplicates
}
-func ruleMetric(rule rulefmt.RuleNode) string {
- if rule.Alert.Value != "" {
- return rule.Alert.Value
+func ruleMetric(rule rulefmt.Rule) string {
+ if rule.Alert != "" {
+ return rule.Alert
}
- return rule.Record.Value
+ return rule.Record
}
// End taken from https://github.com/prometheus/prometheus/blob/8c8de46003d1800c9d40121b4a5e5de8582ef6e1/cmd/promtool/main.go#L403
diff --git a/pkg/tool/commands/rules_test.go b/pkg/tool/commands/rules_test.go
index fe27da35f9..3abfca6596 100644
--- a/pkg/tool/commands/rules_test.go
+++ b/pkg/tool/commands/rules_test.go
@@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/assert"
- "gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
@@ -21,14 +20,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==1"},
+ Record: "up",
+ Expr: "up==1",
},
{
- Record: yaml.Node{Value: "down"},
- Expr: yaml.Node{Value: "up==0"},
+ Record: "down",
+ Expr: "up==0",
},
},
},
@@ -41,14 +40,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==1"},
+ Record: "up",
+ Expr: "up==1",
},
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==0"},
+ Record: "up",
+ Expr: "up==0",
},
},
},
diff --git a/pkg/tool/rules/compare.go b/pkg/tool/rules/compare.go
index 78e105c347..de9493bf26 100644
--- a/pkg/tool/rules/compare.go
+++ b/pkg/tool/rules/compare.go
@@ -102,10 +102,10 @@ func CompareGroups(groupOne, groupTwo rwrulefmt.RuleGroup) error {
return nil
}
-func rulesEqual(a, b *rulefmt.RuleNode) bool {
- if a.Alert.Value != b.Alert.Value ||
- a.Record.Value != b.Record.Value ||
- a.Expr.Value != b.Expr.Value ||
+func rulesEqual(a, b *rulefmt.Rule) bool {
+ if a.Alert != b.Alert ||
+ a.Record != b.Record ||
+ a.Expr != b.Expr ||
a.For != b.For {
return false
}
diff --git a/pkg/tool/rules/compare_test.go b/pkg/tool/rules/compare_test.go
index 4df1aa2ee6..7e3cae7e4f 100644
--- a/pkg/tool/rules/compare_test.go
+++ b/pkg/tool/rules/compare_test.go
@@ -4,7 +4,6 @@ import (
"testing"
"github.com/prometheus/prometheus/model/rulefmt"
- yaml "gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
@@ -12,21 +11,21 @@ import (
func Test_rulesEqual(t *testing.T) {
tests := []struct {
name string
- a *rulefmt.RuleNode
- b *rulefmt.RuleNode
+ a *rulefmt.Rule
+ b *rulefmt.Rule
want bool
}{
{
name: "rule_node_identical",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"c": "d", "a": "b"},
Labels: nil,
},
@@ -34,53 +33,53 @@ func Test_rulesEqual(t *testing.T) {
},
{
name: "rule_node_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "two"},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "two",
+ Expr: "up",
},
want: false,
},
{
name: "rule_node_annotations_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b"},
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"c": "d"},
},
want: false,
},
{
name: "rule_node_annotations_nil_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b"},
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: nil,
},
want: false,
},
{
name: "rule_node_yaml_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
want: true,
},
@@ -106,10 +105,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -119,10 +118,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -136,10 +135,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -149,16 +148,16 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -172,10 +171,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -188,10 +187,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -208,10 +207,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -224,10 +223,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -245,10 +244,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -261,10 +260,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
diff --git a/pkg/tool/rules/parser_test.go b/pkg/tool/rules/parser_test.go
index 35db097486..7348f39197 100644
--- a/pkg/tool/rules/parser_test.go
+++ b/pkg/tool/rules/parser_test.go
@@ -28,7 +28,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@@ -51,7 +51,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@@ -81,7 +81,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@@ -96,7 +96,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "other_testgrp2",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go
index 4ac84f7da9..e05e994a29 100644
--- a/pkg/tool/rules/rules.go
+++ b/pkg/tool/rules/rules.go
@@ -7,6 +7,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/promql/parser"
log "github.com/sirupsen/logrus"
+ "gopkg.in/yaml.v3"
logql "github.com/grafana/loki/v3/pkg/logql/syntax"
@@ -40,13 +41,13 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
for i, group := range r.Groups {
for j, rule := range group.Rules {
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("linting %s", queryLanguage)
- exp, err := parseFn(rule.Expr.Value)
+ exp, err := parseFn(rule.Expr)
if err != nil {
return count, mod, err
}
count++
- if rule.Expr.Value != exp.String() {
+ if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
@@ -54,7 +55,7 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
}).Debugf("expression differs")
mod++
- r.Groups[i].Rules[j].Expr.Value = exp.String()
+ r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@@ -75,10 +76,10 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
for _, group := range r.Groups {
for _, rule := range group.Rules {
// Assume if there is a rule.Record that this is a recording rule.
- if rule.Record.Value == "" {
+ if rule.Record == "" {
continue
}
- name = rule.Record.Value
+ name = rule.Record
log.WithFields(log.Fields{"rule": name}).Debugf("linting recording rule name")
chunks := strings.Split(name, ":")
if len(chunks) < reqChunks {
@@ -98,7 +99,7 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
// AggregateBy modifies the aggregation rules in groups to include a given Label.
// If the applyTo function is provided, the aggregation is applied only to rules
// for which the applyTo function returns true.
-func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool) (int, int, error) {
+func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool) (int, int, error) {
// `count` represents the number of rules we evaluated.
// `mod` represents the number of rules we modified - a modification can either be a lint or adding the
// label in the aggregation.
@@ -118,7 +119,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
}
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("evaluating...")
- exp, err := parser.ParseExpr(rule.Expr.Value)
+ exp, err := parser.ParseExpr(rule.Expr)
if err != nil {
return count, mod, err
}
@@ -130,14 +131,14 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
parser.Inspect(exp, f)
// Only modify the ones that actually changed.
- if rule.Expr.Value != exp.String() {
+ if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
"afterExpr": exp.String(),
}).Debugf("expression differs")
mod++
- r.Groups[i].Rules[j].Expr.Value = exp.String()
+ r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@@ -147,7 +148,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
// exprNodeInspectorFunc returns a PromQL inspector.
// It modifies most PromQL expressions to include a given label.
-func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error {
+func exprNodeInspectorFunc(rule rulefmt.Rule, label string) func(node parser.Node, path []parser.Node) error {
return func(node parser.Node, _ []parser.Node) error {
var err error
switch n := node.(type) {
@@ -239,12 +240,17 @@ func (r RuleNamespace) Validate() []error {
func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
var errs []error
for i, r := range g.Rules {
- for _, err := range r.Validate() {
+ ruleNode := rulefmt.RuleNode{
+ Record: yaml.Node{Value: r.Record},
+ Alert: yaml.Node{Value: r.Alert},
+ Expr: yaml.Node{Value: r.Expr},
+ }
+ for _, err := range r.Validate(ruleNode) {
var ruleName string
- if r.Alert.Value != "" {
- ruleName = r.Alert.Value
+ if r.Alert != "" {
+ ruleName = r.Alert
} else {
- ruleName = r.Record.Value
+ ruleName = r.Record
}
errs = append(errs, &rulefmt.Error{
Group: g.Name,
@@ -258,10 +264,10 @@ func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
return errs
}
-func getRuleName(r rulefmt.RuleNode) string {
- if r.Record.Value != "" {
- return r.Record.Value
+func getRuleName(r rulefmt.Rule) string {
+ if r.Record != "" {
+ return r.Record
}
- return r.Alert.Value
+ return r.Alert
}
diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go
index 8c24a7d8ab..b06463b786 100644
--- a/pkg/tool/rules/rules_test.go
+++ b/pkg/tool/rules/rules_test.go
@@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
"gotest.tools/assert"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
@@ -15,7 +14,7 @@ func TestAggregateBy(t *testing.T) {
tt := []struct {
name string
rn RuleNamespace
- applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool
+ applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool
expectedExpr []string
count, modified int
expect error
@@ -31,8 +30,8 @@ func TestAggregateBy(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Name: "WithoutAggregation", Rules: []rulefmt.RuleNode{
- {Alert: yaml.Node{Value: "WithoutAggregation"}, Expr: yaml.Node{Value: "up != 1"}},
+ Name: "WithoutAggregation", Rules: []rulefmt.Rule{
+ {Alert: "WithoutAggregation", Expr: "up != 1"},
},
},
},
@@ -48,11 +47,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "SkipWithout",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "SkipWithout"},
- Expr: yaml.Node{
- Value: `
+ Alert: "SkipWithout",
+ Expr: `
min without (alertmanager) (
rate(prometheus_notifications_errors_total{job="default/prometheus"}[5m])
/
@@ -60,7 +58,6 @@ func TestAggregateBy(t *testing.T) {
)
* 100
> 3`,
- },
},
},
},
@@ -77,16 +74,14 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "WithAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "WithAggregation"},
- Expr: yaml.Node{
- Value: `
+ Alert: "WithAggregation",
+ Expr: `
sum(rate(cortex_prometheus_rule_evaluation_failures_total[1m])) by (namespace, job)
/
sum(rate(cortex_prometheus_rule_evaluations_total[1m])) by (namespace, job)
> 0.01`,
- },
},
},
},
@@ -103,15 +98,11 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountAggregation",
- },
- Expr: yaml.Node{
- Value: `
+ Alert: "CountAggregation",
+ Expr: `
count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1`,
- },
},
},
},
@@ -128,10 +119,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "BinaryExpressions",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "VectorMatching"},
- Expr: yaml.Node{Value: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`},
+ Alert: "VectorMatching",
+ Expr: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`,
},
},
},
@@ -148,35 +139,27 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountAggregation",
- },
- Expr: yaml.Node{
- Value: `count by (namespace) (test_series) > 1`,
- },
+ Alert: "CountAggregation",
+ Expr: `count by (namespace) (test_series) > 1`,
},
},
},
}, {
RuleGroup: rulefmt.RuleGroup{
Name: "CountSkipped",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountSkipped",
- },
- Expr: yaml.Node{
- Value: `count by (namespace) (test_series) > 1`,
- },
+ Alert: "CountSkipped",
+ Expr: `count by (namespace) (test_series) > 1`,
},
},
},
},
},
},
- applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
+ applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
return group.Name != "CountSkipped"
},
expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`},
@@ -196,7 +179,7 @@ func TestAggregateBy(t *testing.T) {
expectedIdx := 0
for _, g := range tc.rn.Groups {
for _, r := range g.Rules {
- require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr.Value)
+ require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr)
expectedIdx++
}
}
@@ -255,10 +238,10 @@ func TestLintExpressions(t *testing.T) {
r := RuleNamespace{Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "AName"},
- Expr: yaml.Node{Value: tc.expr},
+ Alert: "AName",
+ Expr: tc.expr,
},
},
},
@@ -267,7 +250,7 @@ func TestLintExpressions(t *testing.T) {
}
c, m, err := r.LintExpressions()
- rexpr := r.Groups[0].Rules[0].Expr.Value
+ rexpr := r.Groups[0].Rules[0].Expr
require.Equal(t, tc.count, c)
require.Equal(t, tc.modified, m)
@@ -325,10 +308,11 @@ func TestCheckRecordingRules(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: tc.ruleName},
- Expr: yaml.Node{Value: "rate(some_metric_total)[5m]"}},
+ Record: tc.ruleName,
+ Expr: "rate(some_metric_total)[5m]",
+ },
},
},
},
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index 988de1695d..e5a9937c06 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -106,7 +106,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.21.1 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/redis/go-redis/v9 v9.7.3 // indirect
@@ -131,14 +131,14 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect
- golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
golang.org/x/mod v0.22.0 // indirect
- golang.org/x/net v0.38.0 // indirect
- golang.org/x/oauth2 v0.28.0 // indirect
- golang.org/x/sync v0.12.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/text v0.23.0 // indirect
+ golang.org/x/net v0.40.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sync v0.14.0 // indirect
+ golang.org/x/sys v0.33.0 // indirect
+ golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index bc303f1bd8..492eb7238e 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -416,8 +416,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
@@ -531,8 +531,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
+golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -573,13 +573,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
+golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
-golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -588,8 +588,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
+golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -622,16 +622,16 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
+golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index cf422304e7..926ed3882c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,19 @@
# Release History
+## 1.18.0 (2025-04-03)
+
+### Features Added
+
+* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token
+
+
+## 1.17.1 (2025-03-20)
+
+### Other Changes
+
+* Upgraded to Go 1.23
+* Upgraded dependencies
+
## 1.17.0 (2025-01-07)
### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
index f2b296b6dc..460170034a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
@@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
- Token string
+ // Token is the access token
+ Token string
+ // ExpiresOn indicates when the token expires
ExpiresOn time.Time
+ // RefreshOn is a suggested time to refresh the token.
+ // Clients should ignore this value when it's zero.
+ RefreshOn time.Time
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 44ab00d400..85514db3b8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.17.0"
+ Version = "v1.18.0"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index b26db920b0..1950a2e5b3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -51,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne
return tk, tk.ExpiresOn, nil
}
+// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it.
+var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool {
+ if tk.RefreshOn.IsZero() {
+ return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now())
+ }
+ // no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn
+ return tk.RefreshOn.Before(time.Now())
+}
+
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
@@ -69,11 +78,14 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
return authNZ(policy.TokenRequestOptions{Scopes: scopes})
}
}
+ mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{
+ ShouldRefresh: shouldRefresh,
+ })
return &BearerTokenPolicy{
authzHandler: ah,
cred: cred,
scopes: scopes,
- mainResource: temporal.NewResource(acquire),
+ mainResource: mr,
allowHTTP: opts.InsecureAllowCredentialWithHTTP,
}
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index 11c64eb294..485224197e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,17 @@
# Release History
+## 1.9.0 (2025-04-08)
+
+### Features Added
+* `GetToken()` sets `AccessToken.RefreshOn` when the token provider specifies a value
+
+### Other Changes
+* `NewManagedIdentityCredential` logs the configured user-assigned identity, if any
+* Deprecated `UsernamePasswordCredential` because it can't support multifactor
+ authentication (MFA), which Microsoft Entra ID requires for most tenants. See
+ https://aka.ms/azsdk/identity/mfa for migration guidance.
+* Updated dependencies
+
## 1.8.2 (2025-02-12)
### Other Changes
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index 5cc64c08f2..069bc688d5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -21,7 +21,7 @@ go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
## Prerequisites
- an [Azure subscription](https://azure.microsoft.com/free/)
-- Go 1.18
+- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go
### Authenticating during local development
@@ -146,7 +146,6 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|-|-
|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
-|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
### Authenticating via Development Tools
@@ -159,7 +158,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
-#### Service principal with secret
+### Service principal with secret
|variable name|value
|-|-
@@ -167,7 +166,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_SECRET`|one of the application's client secrets
-#### Service principal with certificate
+### Service principal with certificate
|variable name|value
|-|-
@@ -176,16 +175,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
-#### Username and password
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_USERNAME`|a username (usually an email address)
-|`AZURE_PASSWORD`|that user's password
-
-Configuration is attempted in the above order. For example, if values for a
-client secret and certificate are both present, the client secret will be used.
+Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used.
## Token caching
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
index 8fc7c64aa3..dd3f8e5b21 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
@@ -22,12 +22,11 @@ Some credential types support opt-in persistent token caching (see [the below ta
Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
-| Operating system | Encryption facility |
-| ---------------- | ---------------------------------------------- |
-| Linux | kernel key retention service (keyctl) |
-| macOS | Keychain (requires cgo and native build tools) |
-| Windows | Data Protection API (DPAPI) |
-
+| Operating system | Encryption facility | Limitations |
+| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. |
+| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). |
+| Windows | Data Protection API (DPAPI) | No specific limitations. |
Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example].
### Credentials supporting token caching
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
index 045f87acd5..4118f99ef2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_c55452bbf6"
+ "Tag": "go/azidentity_191110b0dd"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
index 40a94154c6..bd196ddd32 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
)
@@ -208,6 +209,10 @@ type msalConfidentialClient interface {
AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error)
}
+type msalManagedIdentityClient interface {
+ AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error)
+}
+
// enables fakes for test scenarios
type msalPublicClient interface {
AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
index 92f508094d..58c4b585c1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
@@ -118,7 +118,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", "))
log.Write(EventAuthentication, msg)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
index b30f5474f5..ec1eab05c5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -60,7 +60,10 @@ type EnvironmentCredentialOptions struct {
// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this
// function isn't able to parse your certificate, use [ClientCertificateCredential] instead.
//
-// # User with username and password
+// # Deprecated: User with username and password
+//
+// User password authentication is deprecated because it can't support multifactor authentication. See
+// [Entra ID documentation] for migration guidance.
//
// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
//
@@ -75,6 +78,8 @@ type EnvironmentCredentialOptions struct {
// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants
// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set
// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type EnvironmentCredential struct {
cred azcore.TokenCredential
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
index 04ea962b42..6dd5b3d64d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
@@ -1,4 +1,4 @@
-go 1.18
+go 1.23.0
use (
.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
index 1c3791777a..edd56f9d57 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
@@ -9,7 +9,7 @@
}
},
"GoVersion": [
- "1.22.1"
+ "env:GO_VERSION_PREVIOUS"
],
"IDENTITY_IMDS_AVAILABLE": "1"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
index cc07fd7015..b3a0f85883 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -8,24 +8,18 @@ package azidentity
import (
"context"
- "encoding/json"
"errors"
"fmt"
"net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
)
const (
@@ -41,59 +35,20 @@ const (
msiResID = "msi_res_id"
msiSecret = "MSI_SECRET"
imdsAPIVersion = "2018-02-01"
- azureArcAPIVersion = "2019-08-15"
+ azureArcAPIVersion = "2020-06-01"
qpClientID = "client_id"
serviceFabricAPIVersion = "2019-07-01-preview"
)
var imdsProbeTimeout = time.Second
-type msiType int
-
-const (
- msiTypeAppService msiType = iota
- msiTypeAzureArc
- msiTypeAzureML
- msiTypeCloudShell
- msiTypeIMDS
- msiTypeServiceFabric
-)
-
type managedIdentityClient struct {
- azClient *azcore.Client
- endpoint string
- id ManagedIDKind
- msiType msiType
- probeIMDS bool
+ azClient *azcore.Client
+ imds, probeIMDS, userAssigned bool
// chained indicates whether the client is part of a credential chain. If true, the client will return
// a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response.
- chained bool
-}
-
-// arcKeyDirectory returns the directory expected to contain Azure Arc keys
-var arcKeyDirectory = func() (string, error) {
- switch runtime.GOOS {
- case "linux":
- return "/var/opt/azcmagent/tokens", nil
- case "windows":
- pd := os.Getenv("ProgramData")
- if pd == "" {
- return "", errors.New("environment variable ProgramData has no value")
- }
- return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil
- default:
- return "", fmt.Errorf("unsupported OS %q", runtime.GOOS)
- }
-}
-
-type wrappedNumber json.Number
-
-func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
- c := string(b)
- if c == "\"\"" {
- return nil
- }
- return json.Unmarshal(b, (*json.Number)(n))
+ chained bool
+ msalClient msalManagedIdentityClient
}
// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
@@ -141,51 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
options = &ManagedIdentityCredentialOptions{}
}
cp := options.ClientOptions
- c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
- env := "IMDS"
- if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
- if _, ok := os.LookupEnv(identityHeader); ok {
- if _, ok := os.LookupEnv(identityServerThumbprint); ok {
- if options.ID != nil {
- return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
- }
- env = "Service Fabric"
- c.endpoint = endpoint
- c.msiType = msiTypeServiceFabric
- } else {
- env = "App Service"
- c.endpoint = endpoint
- c.msiType = msiTypeAppService
- }
- } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
- if options.ID != nil {
- return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime")
- }
- env = "Azure Arc"
- c.endpoint = endpoint
- c.msiType = msiTypeAzureArc
- }
- } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
- c.endpoint = endpoint
- if _, ok := os.LookupEnv(msiSecret); ok {
- if options.ID != nil && options.ID.idKind() != miClientID {
- return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only")
- }
- env = "Azure ML"
- c.msiType = msiTypeAzureML
- } else {
- if options.ID != nil {
- return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities")
- }
- env = "Cloud Shell"
- c.msiType = msiTypeCloudShell
- }
- } else {
+ c := managedIdentityClient{}
+ source, err := managedidentity.GetSource()
+ if err != nil {
+ return nil, err
+ }
+ env := string(source)
+ if source == managedidentity.DefaultToIMDS {
+ env = "IMDS"
+ c.imds = true
c.probeIMDS = options.dac
setIMDSRetryOptionDefaults(&cp.Retry)
}
- client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{
+ c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{
Tracing: azruntime.TracingOptions{
Namespace: traceNamespace,
},
@@ -193,28 +117,53 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
if err != nil {
return nil, err
}
- c.azClient = client
+
+ id := managedidentity.SystemAssigned()
+ if options.ID != nil {
+ c.userAssigned = true
+ switch s := options.ID.String(); options.ID.idKind() {
+ case miClientID:
+ id = managedidentity.UserAssignedClientID(s)
+ case miObjectID:
+ id = managedidentity.UserAssignedObjectID(s)
+ case miResourceID:
+ id = managedidentity.UserAssignedResourceID(s)
+ }
+ }
+ msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled())
+ if err != nil {
+ return nil, err
+ }
+ c.msalClient = &msalClient
if log.Should(EventAuthentication) {
- log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
+ msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env)
+ if options.ID != nil {
+ kind := "client"
+ switch options.ID.(type) {
+ case ObjectID:
+ kind = "object"
+ case ResourceID:
+ kind = "resource"
+ }
+ msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String())
+ }
+ log.Write(EventAuthentication, msg)
}
return &c, nil
}
-// provideToken acquires a token for MSAL's confidential.Client, which caches the token
-func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) {
- result := confidential.TokenProviderResult{}
- tk, err := c.authenticate(ctx, c.id, params.Scopes)
- if err == nil {
- result.AccessToken = tk.Token
- result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds())
- }
- return result, err
+func (*managedIdentityClient) CloseIdleConnections() {
+ // do nothing
+}
+
+func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) {
+ return doForClient(c.azClient, r)
}
// authenticate acquires an access token
-func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
+func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
// no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
// and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
if c.probeIMDS {
@@ -222,7 +171,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
defer cancel()
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
- req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
+ req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint)
if err != nil {
return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
}
@@ -237,32 +186,26 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
c.probeIMDS = false
}
- msg, err := c.createAuthRequest(ctx, id, scopes)
- if err != nil {
- return azcore.AccessToken{}, err
- }
-
- resp, err := c.azClient.Pipeline().Do(msg)
- if err != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil)
- }
-
- if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
- tk, err := c.createAccessToken(resp)
- if err != nil && c.chained && c.msiType == msiTypeIMDS {
- // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at
+ ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims))
+ if err == nil {
+ msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", "))
+ log.Write(EventAuthentication, msg)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
+ }
+ if c.imds {
+ var ije msalerrors.InvalidJsonErr
+ if c.chained && errors.As(err, &ije) {
+ // an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at
// the same address. Return a credentialUnavailableError so credential chains continue to their next credential
- err = newCredentialUnavailableError(credNameManagedIdentity, err.Error())
+ return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
+ }
+ resp := getResponseFromError(err)
+ if resp == nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
}
- return tk, err
- }
-
- if c.msiType == msiTypeIMDS {
switch resp.StatusCode {
case http.StatusBadRequest:
- if id != nil {
- // return authenticationFailedError, halting any encompassing credential chain,
- // because the explicit user-assigned identity implies the developer expected this to work
+ if c.userAssigned {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
}
msg := "failed to authenticate a system assigned identity"
@@ -278,237 +221,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body)))
}
}
- if c.chained {
- // the response may be from something other than IMDS, for example a proxy returning
- // 404. Return credentialUnavailableError so credential chains continue to their
- // next credential, include the response in the error message to help debugging
- err = newAuthenticationFailedError(credNameManagedIdentity, "", resp)
- return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
- }
- }
-
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp)
-}
-
-func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
- value := struct {
- // these are the only fields that we use
- Token string `json:"access_token,omitempty"`
- RefreshToken string `json:"refresh_token,omitempty"`
- ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
- ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
- }{}
- if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res)
- }
- if value.ExpiresIn != "" {
- expiresIn, err := json.Number(value.ExpiresIn).Int64()
- if err != nil {
- return azcore.AccessToken{}, err
- }
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
- }
- switch v := value.ExpiresOn.(type) {
- case float64:
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
- case string:
- if expiresOn, err := strconv.Atoi(v); err == nil {
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
- }
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res)
- default:
- msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res)
- }
-}
-
-func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- switch c.msiType {
- case msiTypeIMDS:
- return c.createIMDSAuthRequest(ctx, id, scopes)
- case msiTypeAppService:
- return c.createAppServiceAuthRequest(ctx, id, scopes)
- case msiTypeAzureArc:
- // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
- key, err := c.getAzureArcSecretKey(ctx, scopes)
- if err != nil {
- msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
- return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil)
- }
- return c.createAzureArcAuthRequest(ctx, scopes, key)
- case msiTypeAzureML:
- return c.createAzureMLAuthRequest(ctx, id, scopes)
- case msiTypeServiceFabric:
- return c.createServiceFabricAuthRequest(ctx, scopes)
- case msiTypeCloudShell:
- return c.createCloudShellAuthRequest(ctx, scopes)
- default:
- return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
- }
-}
-
-func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Set("api-version", imdsAPIVersion)
- q.Set("resource", strings.Join(scopes, " "))
- if id != nil {
- switch id.idKind() {
- case miClientID:
- q.Set(qpClientID, id.String())
- case miObjectID:
- q.Set("object_id", id.String())
- case miResourceID:
- q.Set(msiResID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
- q := request.Raw().URL.Query()
- q.Set("api-version", "2019-08-01")
- q.Set("resource", scopes[0])
- if id != nil {
- switch id.idKind() {
- case miClientID:
- q.Set(qpClientID, id.String())
- case miObjectID:
- q.Set("principal_id", id.String())
- case miResourceID:
- q.Set(miResID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("secret", os.Getenv(msiSecret))
- q := request.Raw().URL.Query()
- q.Set("api-version", "2017-09-01")
- q.Set("resource", strings.Join(scopes, " "))
- q.Set("clientid", os.Getenv(defaultIdentityClientID))
- if id != nil {
- switch id.idKind() {
- case miClientID:
- q.Set("clientid", id.String())
- case miObjectID:
- return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil)
- case miResourceID:
- return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil)
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- q := request.Raw().URL.Query()
- request.Raw().Header.Set("Accept", "application/json")
- request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
- q.Set("api-version", serviceFabricAPIVersion)
- q.Set("resource", strings.Join(scopes, " "))
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
- // create the request to retreive the secret key challenge provided by the HIMDS service
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return "", err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Set("api-version", azureArcAPIVersion)
- q.Set("resource", strings.Join(resources, " "))
- request.Raw().URL.RawQuery = q.Encode()
- // send the initial request to get the short-lived secret key
- response, err := c.azClient.Pipeline().Do(request)
- if err != nil {
- return "", err
- }
- // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
- // of the secret key file. Any other status code indicates an error in the request.
- if response.StatusCode != 401 {
- msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
- return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response)
- }
- header := response.Header.Get("WWW-Authenticate")
- if len(header) == 0 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil)
- }
- // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
- _, p, found := strings.Cut(header, "=")
- if !found {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil)
- }
- expected, err := arcKeyDirectory()
- if err != nil {
- return "", err
- }
- if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil)
- }
- f, err := os.Stat(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil)
- }
- if s := f.Size(); s > 4096 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil)
- }
- key, err := os.ReadFile(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil)
- }
- return string(key), nil
-}
-
-func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
- q := request.Raw().URL.Query()
- q.Set("api-version", azureArcAPIVersion)
- q.Set("resource", strings.Join(resources, " "))
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- data := url.Values{}
- data.Set("resource", strings.Join(scopes, " "))
- dataEncoded := data.Encode()
- body := streaming.NopCloser(strings.NewReader(dataEncoded))
- if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
- return nil, err
}
- return request, nil
+ err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
+ return azcore.AccessToken{}, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
index 1d53579cf3..11b686ccda 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
@@ -14,7 +14,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
const credNameManagedIdentity = "ManagedIdentityCredential"
@@ -110,8 +109,7 @@ type ManagedIdentityCredentialOptions struct {
//
// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
- client *confidentialClient
- mic *managedIdentityClient
+ mic *managedIdentityClient
}
// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
@@ -123,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
if err != nil {
return nil, err
}
- cred := confidential.NewCredFromTokenProvider(mic.provideToken)
-
- // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key.
- // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL.
- clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY"
- if options.ID != nil {
- clientID = options.ID.String()
- }
- // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
- c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
- ClientOptions: options.ClientOptions,
- })
- if err != nil {
- return nil, err
- }
- return &ManagedIdentityCredential{client: c, mic: mic}, nil
+ return &ManagedIdentityCredential{mic: mic}, nil
}
// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
if len(opts.Scopes) != 1 {
err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
return azcore.AccessToken{}, err
}
- // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
+ // managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
+ return c.mic.GetToken(ctx, opts)
}
var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
index ef5e4d7212..053d1785f8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
@@ -243,7 +243,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke
} else {
err = newAuthenticationFailedErrorFromMSAL(p.name, err)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
index efa8c6d3eb..67f97fbb2b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
@@ -72,6 +72,7 @@ az container create -g $rg -n $aciName --image $image `
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--cpu 1 `
+ --ip-address Public `
--memory 1.0 `
--os-type Linux `
--role "Storage Blob Data Reader" `
@@ -82,7 +83,8 @@ az container create -g $rg -n $aciName --image $image `
AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) `
FUNCTIONS_CUSTOMHANDLER_PORT=80
-Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
+$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv
+Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
Write-Host "Deploying to Azure Functions"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
index 740abd4709..5791e7d224 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
@@ -17,6 +17,11 @@ import (
const credNameUserPassword = "UsernamePasswordCredential"
// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
+//
+// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor
+// authentication. See [Entra ID documentation] for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredentialOptions struct {
azcore.ClientOptions
@@ -43,8 +48,13 @@ type UsernamePasswordCredentialOptions struct {
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
-// with any form of multi-factor authentication, and the application must already have user or admin consent.
+// with any form of multifactor authentication, and the application must already have user or admin consent.
// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
+//
+// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation]
+// for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredential struct {
client *publicClient
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
index fec0419ca7..584aabe1cb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
- version = "v1.8.2"
+ version = "v1.9.0"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
index 4f1dcf1b78..76dadf7d35 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
@@ -44,7 +44,7 @@ func Should(cls Event) bool {
if log.lst == nil {
return false
}
- if log.cls == nil || len(log.cls) == 0 {
+ if len(log.cls) == 0 {
return true
}
for _, c := range log.cls {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
index 238ef42ed0..02aa1fb3bc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
@@ -11,9 +11,17 @@ import (
"time"
)
+// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it.
+var backoff = func(now, lastAttempt time.Time) bool {
+ return lastAttempt.Add(30 * time.Second).After(now)
+}
+
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
+// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration.
+type ShouldRefresh[TResource, TState any] func(TResource, TState) bool
+
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
@@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct {
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
+ // shouldRefresh indicates whether the resource should be refreshed before expiration
+ shouldRefresh ShouldRefresh[TResource, TState]
+
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
- return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
+ r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})}
+ r.shouldRefresh = r.expiringSoon
+ return r
+}
+
+// ResourceOptions contains optional configuration for Resource
+type ResourceOptions[TResource, TState any] struct {
+ // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite
+ // the currently held resource not having expired. [Resource.Get] ignores all errors from
+ // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh
+ // when the resource has expired (it unconditionally updates expired resources). When
+ // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5
+ // minutes.
+ ShouldRefresh ShouldRefresh[TResource, TState]
+}
+
+// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing.
+func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] {
+ r := NewResource(ar)
+ if opts.ShouldRefresh != nil {
+ r.shouldRefresh = opts.ShouldRefresh
+ }
+ return r
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
- // If the resource is expiring within this time window, update it eagerly.
- // This allows other threads/goroutines to keep running by using the not-yet-expired
- // resource value while one thread/goroutine updates the resource.
- const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
- const backoff = 30 * time.Second // Minimum wait time between eager update attempts
-
now, acquire, expired := time.Now(), false, false
// acquire exclusive lock
@@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
- } else if er.expiration.Add(-window).Before(now) {
- // The resource is valid but is expiring within the time window
- if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
+ } else if er.shouldRefresh(resource, state) {
+ if !(er.acquiring || backoff(now, er.lastAttempt)) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
@@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() {
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}
+
+func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool {
+ // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter
+ return er.expiration.Add(-5 * time.Minute).Before(time.Now())
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
index 22c17d2012..549d68ab99 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
@@ -65,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
// must contain the public certificate and the private key. If a PEM block is encrypted and
// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
@@ -639,7 +646,7 @@ func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
- return cca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
@@ -733,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
if err != nil {
return AuthResult{}, err
}
- return cca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
index c9b8dbed08..b5cbb57217 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
@@ -64,11 +64,20 @@ type CallErr struct {
Err error
}
+type InvalidJsonErr struct {
+ Err error
+}
+
// Errors implements error.Error().
func (e CallErr) Error() string {
return e.Err.Error()
}
+// Errors implements error.Error().
+func (e InvalidJsonErr) Error() string {
+ return e.Err.Error()
+}
+
// Verbose prints a versbose error message with the request or response.
func (e CallErr) Verbose() string {
e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
index e473d1267d..61c1c4cec1 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
@@ -5,16 +5,17 @@ package base
import (
"context"
- "errors"
"fmt"
"net/url"
"reflect"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
@@ -94,6 +95,7 @@ type AuthResult struct {
// AuthResultMetadata which contains meta data for the AuthResult
type AuthResultMetadata struct {
+ RefreshOn time.Time
TokenSource TokenSource
}
@@ -101,9 +103,8 @@ type TokenSource int
// These are all the types of token flows.
const (
- SourceUnknown TokenSource = 0
- IdentityProvider TokenSource = 1
- Cache TokenSource = 2
+ TokenSourceIdentityProvider TokenSource = 0
+ TokenSourceCache TokenSource = 1
)
// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
@@ -111,7 +112,6 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
if err := storageTokenResponse.AccessToken.Validate(); err != nil {
return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
}
-
account := storageTokenResponse.Account
accessToken := storageTokenResponse.AccessToken.Secret
grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
@@ -132,7 +132,8 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
GrantedScopes: grantedScopes,
DeclinedScopes: nil,
Metadata: AuthResultMetadata{
- TokenSource: Cache,
+ TokenSource: TokenSourceCache,
+ RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T,
},
}, nil
}
@@ -146,10 +147,11 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco
Account: account,
IDToken: tokenResponse.IDToken,
AccessToken: tokenResponse.AccessToken,
- ExpiresOn: tokenResponse.ExpiresOn.T,
+ ExpiresOn: tokenResponse.ExpiresOn,
GrantedScopes: tokenResponse.GrantedScopes.Slice,
Metadata: AuthResultMetadata{
- TokenSource: IdentityProvider,
+ TokenSource: TokenSourceIdentityProvider,
+ RefreshOn: tokenResponse.RefreshOn.T,
},
}, nil
}
@@ -165,6 +167,8 @@ type Client struct {
AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
cacheAccessor cache.ExportReplace
cacheAccessorMu *sync.RWMutex
+ canRefresh map[string]*atomic.Value
+ canRefreshMu *sync.Mutex
}
// Option is an optional argument to the New constructor.
@@ -241,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O
cacheAccessorMu: &sync.RWMutex{},
manager: storage.New(token),
pmanager: storage.NewPartitionedManager(token),
+ canRefresh: make(map[string]*atomic.Value),
+ canRefreshMu: &sync.Mutex{},
}
for _, o := range options {
if err = o(&client); err != nil {
@@ -345,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if silent.Claims == "" {
ar, err = AuthResultFromStorage(storageTokenResponse)
if err == nil {
+ if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) {
+ b.canRefreshMu.Lock()
+ refreshValue, ok := b.canRefresh[tenant]
+ if !ok {
+ refreshValue = &atomic.Value{}
+ refreshValue.Store(false)
+ b.canRefresh[tenant] = refreshValue
+ }
+ b.canRefreshMu.Unlock()
+ if refreshValue.CompareAndSwap(false, true) {
+ defer refreshValue.Store(false)
+ // Added a check to see if the token is still same because there is a chance
+ // that the token is already refreshed by another thread.
+ // If the token is not same, we don't need to refresh it.
+ // Which means it refreshed.
+ if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken {
+ if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil {
+ return b.AuthResultFromToken(ctx, authParams, tr)
+ }
+ }
+ }
+ }
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
@@ -362,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if err != nil {
return ar, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
@@ -391,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui
return AuthResult{}, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
@@ -420,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq
authParams.UserAssertion = onBehalfOfParams.UserAssertion
token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
if err == nil {
- ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
+ ar, err = b.AuthResultFromToken(ctx, authParams, token)
}
return ar, err
}
-func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
- if !cacheWrite {
- return NewAuthResult(token, shared.Account{})
- }
+func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
var m manager = b.manager
if authParams.AuthorizationType == authority.ATOnBehalfOf {
m = b.pmanager
@@ -458,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
return ar, err
}
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var Now = time.Now
+
func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
if b.cacheAccessor != nil {
b.cacheAccessorMu.RLock()
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
similarity index 95%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
index f9be90276d..7379e2233c 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
@@ -72,6 +72,7 @@ type AccessToken struct {
ClientID string `json:"client_id,omitempty"`
Secret string `json:"secret,omitempty"`
Scopes string `json:"target,omitempty"`
+ RefreshOn internalTime.Unix `json:"refresh_on,omitempty"`
ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
@@ -83,7 +84,7 @@ type AccessToken struct {
}
// NewAccessToken is the constructor for AccessToken.
-func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
+func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
return AccessToken{
HomeAccountID: homeID,
Environment: env,
@@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
Secret: token,
Scopes: scopes,
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
+ RefreshOn: internalTime.Unix{T: refreshOn.UTC()},
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
TokenType: tokenType,
@@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
// Key outputs the key that can be used to uniquely look up this entry in a map.
func (a AccessToken) Key() string {
+ ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}
key := strings.Join(
- []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
+ ks,
shared.CacheKeySeparator,
)
// add token type to key for new access tokens types. skip for bearer token type to
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
similarity index 99%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
index c093183306..ff07d4b5a4 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
@@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
realm,
clientID,
cachedAt,
- tokenResponse.ExpiresOn.T,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
similarity index 98%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
index 2221e60c43..84a234967f 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
@@ -173,6 +173,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
environment := authParameters.AuthorityInfo.Host
realm := authParameters.AuthorityInfo.Tenant
clientID := authParameters.ClientID
+
target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
cachedAt := time.Now()
authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
@@ -193,7 +194,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
realm,
clientID,
cachedAt,
- tokenResponse.ExpiresOn.T,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
@@ -265,6 +267,9 @@ func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo author
}
func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ if m.requests == nil {
+ return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil")
+ }
m.aadCacheMu.Lock()
defer m.aadCacheMu.Unlock()
discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
@@ -459,6 +464,7 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s
func (m *Manager) writeAccount(account shared.Account) error {
key := account.Key()
+
m.contractMu.Lock()
defer m.contractMu.Unlock()
m.contract.Accounts[key] = account
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
index 7b673e3fe1..de1bf381f4 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
@@ -31,4 +31,6 @@ type TokenProviderResult struct {
AccessToken string
// ExpiresInSeconds is the lifetime of the token in seconds
ExpiresInSeconds int
+ // RefreshInSeconds indicates the suggested time to refresh the token, if any
+ RefreshInSeconds int
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
index fda5d7dd33..cda678e334 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
@@ -146,7 +146,8 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
// Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
// change this to s.error() and make s.error() write the failPage instead of an error code.
_, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
- s.putResult(Result{Err: fmt.Errorf(desc)})
+ s.putResult(Result{Err: fmt.Errorf("%s", desc)})
+
return
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
index e065313444..738a29eb9d 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
@@ -111,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
Scopes: scopes,
TenantID: authParams.AuthorityInfo.Tenant,
}
- tr, err := cred.TokenProvider(ctx, params)
+ pr, err := cred.TokenProvider(ctx, params)
if err != nil {
if len(scopes) == 0 {
err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
@@ -119,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
}
return accesstokens.TokenResponse{}, err
}
- return accesstokens.TokenResponse{
- TokenType: authParams.AuthnScheme.AccessTokenType(),
- AccessToken: tr.AccessToken,
- ExpiresOn: internalTime.DurationTime{
- T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
- },
+ tr := accesstokens.TokenResponse{
+ TokenType: authParams.AuthnScheme.AccessTokenType(),
+ AccessToken: pr.AccessToken,
+ ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second),
GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
- }, nil
+ }
+ if pr.RefreshInSeconds > 0 {
+ tr.RefreshOn = internalTime.DurationTime{
+ T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second),
+ }
+ }
+ return tr, nil
}
if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
index a7b7b0742d..d738c7591e 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -17,6 +17,7 @@ import (
/* #nosec */
"crypto/sha1"
+ "crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
@@ -68,7 +69,7 @@ type DeviceCodeResponse struct {
UserCode string `json:"user_code"`
DeviceCode string `json:"device_code"`
- VerificationURL string `json:"verification_url"`
+ VerificationURL string `json:"verification_uri"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
Message string `json:"message"`
@@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
}
return c.AssertionCallback(ctx, options)
}
-
- token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
+ claims := jwt.MapClaims{
"aud": authParams.Endpoints.TokenEndpoint,
"exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
"iss": authParams.ClientID,
"jti": uuid.New().String(),
"nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
"sub": authParams.ClientID,
- })
+ }
+
+ isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS ||
+ authParams.AuthorityInfo.AuthorityType == authority.DSTS
+
+ var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256
+ thumbprintKey := "x5t#S256"
+
+ if isADFSorDSTS {
+ signingMethod = jwt.SigningMethodRS256
+ thumbprintKey = "x5t"
+ }
+
+ token := jwt.NewWithClaims(signingMethod, claims)
token.Header = map[string]interface{}{
- "alg": "RS256",
- "typ": "JWT",
- "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
+ "alg": signingMethod.Alg(),
+ "typ": "JWT",
+ thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())),
}
if authParams.SendX5C {
@@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
assertion, err := token.SignedString(c.Key)
if err != nil {
- return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
+ return "", fmt.Errorf("unable to sign JWT token: %w", err)
}
+
return assertion, nil
}
// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
// https://tools.ietf.org/html/rfc7517#section-4.8
-func thumbprint(cert *x509.Certificate) []byte {
- /* #nosec */
- a := sha1.Sum(cert.Raw)
- return a[:]
+func thumbprint(cert *x509.Certificate, alg string) []byte {
+ switch alg {
+ case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this
+ hash := sha1.Sum(cert.Raw) /* #nosec */
+ return hash[:]
+ default:
+ hash := sha256.Sum256(cert.Raw)
+ return hash[:]
+ }
}
// Client represents the REST calls to get tokens from token generator backends.
@@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A
qv.Set(clientID, authParameters.ClientID)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromClientSecret(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
@@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth
qv.Set(clientInfo, clientInfoVal)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromAssertion(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
index 3107b45c11..32dde7b76b 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"reflect"
+ "strconv"
"strings"
"time"
@@ -173,14 +174,75 @@ type TokenResponse struct {
FamilyID string `json:"foci"`
IDToken IDToken `json:"id_token"`
ClientInfo ClientInfo `json:"client_info"`
- ExpiresOn internalTime.DurationTime `json:"expires_in"`
+ RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"`
+ ExpiresOn time.Time `json:"-"`
ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
GrantedScopes Scopes `json:"scope"`
DeclinedScopes []string // This is derived
AdditionalFields map[string]interface{}
+ scopesComputed bool
+}
+
+func (tr *TokenResponse) UnmarshalJSON(data []byte) error {
+ type Alias TokenResponse
+ aux := &struct {
+ ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"`
+ ExpiresOn any `json:"expires_on,omitempty"`
+ *Alias
+ }{
+ Alias: (*Alias)(tr),
+ }
+
+ // Unmarshal the JSON data into the aux struct
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ // Function to parse different date formats
+ // This is a workaround for the issue described here:
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963
+ parseExpiresOn := func(expiresOn string) (time.Time, error) {
+ var formats = []string{
+ "01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss
+ "2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss
+ time.RFC3339Nano, // ISO 8601 (with nanosecond precision)
+ }
+
+ for _, format := range formats {
+ if t, err := time.Parse(format, expiresOn); err == nil {
+ return t, nil
+ }
+ }
+ return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn)
+ }
- scopesComputed bool
+ if expiresOnStr, ok := aux.ExpiresOn.(string); ok {
+ if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil {
+ tr.ExpiresOn = time.Unix(ts, 0)
+ return nil
+ }
+ if expiresOnStr != "" {
+ if t, err := parseExpiresOn(expiresOnStr); err != nil {
+ return err
+ } else {
+ tr.ExpiresOn = t
+ return nil
+ }
+ }
+ }
+
+ // Check if ExpiresOn is a number (Unix timestamp or ISO 8601)
+ if expiresOnNum, ok := aux.ExpiresOn.(float64); ok {
+ tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0)
+ return nil
+ }
+
+ if !aux.ExpiresIn.T.IsZero() {
+ tr.ExpiresOn = aux.ExpiresIn.T
+ return nil
+ }
+ return errors.New("expires_in and expires_on are both missing or invalid")
}
// ComputeScope computes the final scopes based on what was granted by the server and
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
index d62aac74eb..7906803669 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
@@ -98,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))}
}
}
return nil
@@ -221,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values
}
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))}
}
}
return nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
index eb16b405c4..5e551abc83 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
@@ -5,4 +5,4 @@
package version
// Version is the version of this client package that is communicated to the server.
-const Version = "1.2.0"
+const Version = "1.4.2"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
new file mode 100644
index 0000000000..d7cffc295e
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
@@ -0,0 +1,28 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("secret", os.Getenv(msiSecretEnvVar))
+ q := req.URL.Query()
+ q.Set(apiVersionQueryParameterName, azureMLAPIVersion)
+ q.Set(resourceQueryParameterName, resource)
+ q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID"))
+ if cid, ok := id.(UserAssignedClientID); ok {
+ q.Set("clientid", string(cid))
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
new file mode 100644
index 0000000000..be9a0bca38
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
@@ -0,0 +1,37 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiEndpointParsed, err := url.Parse(msiEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err)
+ }
+
+ data := url.Values{}
+ data.Set(resourceQueryParameterName, resource)
+ msiDataEncoded := data.Encode()
+ body := io.NopCloser(strings.NewReader(msiDataEncoded))
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+
+ req.Header.Set(metaHTTPHeaderName, "true")
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
new file mode 100644
index 0000000000..ca3de4325f
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
@@ -0,0 +1,717 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package managedidentity provides a client for retrieval of Managed Identity applications.
+The Managed Identity Client is used to acquire a token for managed identity assigned to
+an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token
+without using credentials.
+*/
+package managedidentity
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// AuthResult contains the results of one token acquisition operation.
+// For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult = base.AuthResult
+
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
+const (
+ // DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set.
+ DefaultToIMDS Source = "DefaultToIMDS"
+ AzureArc Source = "AzureArc"
+ ServiceFabric Source = "ServiceFabric"
+ CloudShell Source = "CloudShell"
+ AzureML Source = "AzureML"
+ AppService Source = "AppService"
+
+ // General request query parameter names
+ metaHTTPHeaderName = "Metadata"
+ apiVersionQueryParameterName = "api-version"
+ resourceQueryParameterName = "resource"
+ wwwAuthenticateHeaderName = "www-authenticate"
+
+ // UAMI query parameter name
+ miQueryParameterClientId = "client_id"
+ miQueryParameterObjectId = "object_id"
+ miQueryParameterPrincipalId = "principal_id"
+ miQueryParameterResourceIdIMDS = "msi_res_id"
+ miQueryParameterResourceId = "mi_res_id"
+
+ // IMDS
+ imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+ imdsAPIVersion = "2018-02-01"
+ systemAssignedManagedIdentity = "system_assigned_managed_identity"
+
+ // Azure Arc
+ azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token"
+ azureArcAPIVersion = "2020-06-01"
+ azureArcFileExtension = ".key"
+ azureArcMaxFileSizeBytes int64 = 4096
+ linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101
+ linuxHimdsPath = "/opt/azcmagent/bin/himds"
+ azureConnectedMachine = "AzureConnectedMachineAgent"
+ himdsExecutableName = "himds.exe"
+ tokenName = "Tokens"
+
+ // App Service
+ appServiceAPIVersion = "2019-08-01"
+
+ // AzureML
+ azureMLAPIVersion = "2017-09-01"
+ // Service Fabric
+ serviceFabricAPIVersion = "2019-07-01-preview"
+
+ // Environment Variables
+ identityEndpointEnvVar = "IDENTITY_ENDPOINT"
+ identityHeaderEnvVar = "IDENTITY_HEADER"
+ azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST"
+ imdsEndVar = "IMDS_ENDPOINT"
+ msiEndpointEnvVar = "MSI_ENDPOINT"
+ msiSecretEnvVar = "MSI_SECRET"
+ identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT"
+
+ defaultRetryCount = 3
+)
+
+var retryCodesForIMDS = []int{
+ http.StatusNotFound, // 404
+ http.StatusGone, // 410
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusNotImplemented, // 501
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ http.StatusHTTPVersionNotSupported, // 505
+ http.StatusVariantAlsoNegotiates, // 506
+ http.StatusInsufficientStorage, // 507
+ http.StatusLoopDetected, // 508
+ http.StatusNotExtended, // 510
+ http.StatusNetworkAuthenticationRequired, // 511
+}
+
+var retryStatusCodes = []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+}
+
+var getAzureArcPlatformPath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName)
+ case "linux":
+ return linuxTokenPath
+ default:
+ return ""
+ }
+}
+
+var getAzureArcHimdsFilePath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName)
+ case "linux":
+ return linuxHimdsPath
+ default:
+ return ""
+ }
+}
+
+type Source string
+
+type ID interface {
+ value() string
+}
+
+type systemAssignedValue string // its private for a reason to make the input consistent.
+type UserAssignedClientID string
+type UserAssignedObjectID string
+type UserAssignedResourceID string
+
+func (s systemAssignedValue) value() string { return string(s) }
+func (c UserAssignedClientID) value() string { return string(c) }
+func (o UserAssignedObjectID) value() string { return string(o) }
+func (r UserAssignedResourceID) value() string { return string(r) }
+func SystemAssigned() ID {
+ return systemAssignedValue(systemAssignedManagedIdentity)
+}
+
+// cache never uses the client because instance discovery is always disabled.
+var cacheManager *storage.Manager = storage.New(nil)
+
+type Client struct {
+ httpClient ops.HTTPClient
+ miType ID
+ source Source
+ authParams authority.AuthParams
+ retryPolicyEnabled bool
+ canRefresh *atomic.Value
+}
+
+type AcquireTokenOptions struct {
+ claims string
+}
+
+type ClientOption func(*Client)
+
+type AcquireTokenOption func(o *AcquireTokenOptions)
+
+// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies.
+// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
+func WithClaims(claims string) AcquireTokenOption {
+ return func(o *AcquireTokenOptions) {
+ o.claims = claims
+ }
+}
+
+// WithHTTPClient allows for a custom HTTP client to be set.
+func WithHTTPClient(httpClient ops.HTTPClient) ClientOption {
+ return func(c *Client) {
+ c.httpClient = httpClient
+ }
+}
+
+func WithRetryPolicyDisabled() ClientOption {
+ return func(c *Client) {
+ c.retryPolicyEnabled = false
+ }
+}
+
+// Client to be used to acquire tokens for managed identity.
+// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID]
+//
+// Options: [WithHTTPClient]
+func New(id ID, options ...ClientOption) (Client, error) {
+ source, err := GetSource()
+ if err != nil {
+ return Client{}, err
+ }
+
+ // Check for user-assigned restrictions based on the source
+ switch source {
+ case AzureArc:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities")
+ }
+ case AzureML:
+ switch id.(type) {
+ case UserAssignedObjectID, UserAssignedResourceID:
+ return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only")
+ }
+ case CloudShell:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities")
+ }
+ case ServiceFabric:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
+ }
+ }
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedResourceID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedObjectID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case systemAssignedValue:
+ default:
+ return Client{}, fmt.Errorf("unsupported type %T", id)
+ }
+ zero := atomic.Value{}
+ zero.Store(false)
+ client := Client{
+ miType: id,
+ httpClient: shared.DefaultClient,
+ retryPolicyEnabled: true,
+ source: source,
+ canRefresh: &zero,
+ }
+ for _, option := range options {
+ option(&client)
+ }
+ fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true)
+ if err != nil {
+ return Client{}, err
+ }
+ client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo)
+ return client, nil
+}
+
+// GetSource detects and returns the managed identity source available on the environment.
+func GetSource() (Source, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ identityHeader := os.Getenv(identityHeaderEnvVar)
+ identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar)
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiSecret := os.Getenv(msiSecretEnvVar)
+ imdsEndpoint := os.Getenv(imdsEndVar)
+
+ if identityEndpoint != "" && identityHeader != "" {
+ if identityServerThumbprint != "" {
+ return ServiceFabric, nil
+ }
+ return AppService, nil
+ } else if msiEndpoint != "" {
+ if msiSecret != "" {
+ return AzureML, nil
+ } else {
+ return CloudShell, nil
+ }
+ } else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) {
+ return AzureArc, nil
+ }
+
+ return DefaultToIMDS, nil
+}
+
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var now = time.Now
+
+// Acquires tokens from the configured managed identity on an azure resource.
+//
+// Resource: scopes application is requesting access to
+// Options: [WithClaims]
+func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) {
+ resource = strings.TrimSuffix(resource, "/.default")
+ o := AcquireTokenOptions{}
+ for _, option := range options {
+ option(&o)
+ }
+ c.authParams.Scopes = []string{resource}
+
+ // ignore cached access tokens when given claims
+ if o.claims == "" {
+ stResp, err := cacheManager.Read(ctx, c.authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar, err := base.AuthResultFromStorage(stResp)
+ if err == nil {
+ if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) {
+ defer c.canRefresh.Store(false)
+ if tr, er := c.getToken(ctx, resource); er == nil {
+ return tr, nil
+ }
+ }
+ ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+ }
+ }
+ return c.getToken(ctx, resource)
+}
+
+func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) {
+ switch c.source {
+ case AzureArc:
+ return c.acquireTokenForAzureArc(ctx, resource)
+ case AzureML:
+ return c.acquireTokenForAzureML(ctx, resource)
+ case CloudShell:
+ return c.acquireTokenForCloudShell(ctx, resource)
+ case DefaultToIMDS:
+ return c.acquireTokenForIMDS(ctx, resource)
+ case AppService:
+ return c.acquireTokenForAppService(ctx, resource)
+ case ServiceFabric:
+ return c.acquireTokenForServiceFabric(ctx, resource)
+ default:
+ return AuthResult{}, fmt.Errorf("unsupported source %q", c.source)
+ }
+}
+
+func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAppServiceAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createIMDSAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createCloudShellAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureMLAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createServiceFabricAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureArcAuthRequest(ctx, resource, "")
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ response, err := c.httpClient.Do(req)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ defer response.Body.Close()
+
+ if response.StatusCode != http.StatusUnauthorized {
+ return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode)
+ }
+
+ secret, err := c.getAzureArcSecretKey(response, runtime.GOOS)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret))
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ tokenResponse, err := c.getTokenForRequest(secondRequest, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
+ if cacheManager == nil {
+ return AuthResult{}, errors.New("cache instance is nil")
+ }
+ account, err := cacheManager.Write(authParams, token)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ // if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away
+ if token.RefreshOn.T.IsZero() {
+ if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour {
+ token.RefreshOn.T = time.Now().Add(lifetime / 2)
+ }
+ }
+ ar, err := base.NewAuthResult(token, account)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+}
+
+// contains checks if the element is present in the list.
+func contains[T comparable](list []T, element T) bool {
+ for _, v := range list {
+ if v == element {
+ return true
+ }
+ }
+ return false
+}
+
+// retry performs an HTTP request with retries based on the provided options.
+func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute)
+ defer tryCancel()
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+ cloneReq := req.Clone(tryCtx)
+ resp, err = c.httpClient.Do(cloneReq)
+ retrylist := retryStatusCodes
+ if c.source == DefaultToIMDS {
+ retrylist = retryCodesForIMDS
+ }
+ if err == nil && !contains(retrylist, resp.StatusCode) {
+ return resp, nil
+ }
+ select {
+ case <-time.After(time.Second):
+ case <-req.Context().Done():
+ err = req.Context().Err()
+ return resp, err
+ }
+ }
+ return resp, err
+}
+
+func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) {
+ r := accesstokens.TokenResponse{}
+ var resp *http.Response
+ var err error
+
+ if c.retryPolicyEnabled {
+ resp, err = c.retry(defaultRetryCount, req)
+ } else {
+ resp, err = c.httpClient.Do(req)
+ }
+ if err != nil {
+ return r, err
+ }
+ responseBytes, err := io.ReadAll(resp.Body)
+ defer resp.Body.Close()
+ if err != nil {
+ return r, err
+ }
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusAccepted:
+ default:
+ sd := strings.TrimSpace(string(responseBytes))
+ if sd != "" {
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s",
+ req.URL.String(),
+ req.Method,
+ resp.StatusCode,
+ sd),
+ }
+ }
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode),
+ }
+ }
+
+ err = json.Unmarshal(responseBytes, &r)
+ if err != nil {
+ return r, errors.InvalidJsonErr{
+ Err: fmt.Errorf("error parsing the json error: %s", err),
+ }
+ }
+ r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource)
+
+ return r, err
+}
+
+func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", appServiceAPIVersion)
+ q.Set("resource", resource)
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ q.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ q.Set(miQueryParameterResourceId, string(t))
+ case UserAssignedObjectID:
+ q.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue:
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
+
+func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ msiEndpoint, err := url.Parse(imdsDefaultEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err)
+ }
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ msiParameters.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ msiParameters.Set(miQueryParameterResourceIdIMDS, string(t))
+ case UserAssignedObjectID:
+ msiParameters.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue: // not adding anything
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+ return req, nil
+}
+
+func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ if identityEndpoint == "" {
+ identityEndpoint = azureArcEndpoint
+ }
+ msiEndpoint, parseErr := url.Parse(identityEndpoint)
+
+ if parseErr != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr)
+ }
+
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+
+ if key != "" {
+ req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
+ }
+
+ return req, nil
+}
+
+func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool {
+ if identityEndpoint != "" && imdsEndpoint != "" {
+ return true
+ }
+ himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS)
+ if himdsFilePath != "" {
+ if _, err := os.Stat(himdsFilePath); err == nil {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) {
+ wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName)
+
+ if len(wwwAuthenticateHeader) == 0 {
+ return "", errors.New("response has no www-authenticate header")
+ }
+
+ // check if the platform is supported
+ expectedSecretFilePath := getAzureArcPlatformPath(platform)
+ if expectedSecretFilePath == "" {
+ return "", errors.New("platform not supported, expected linux or windows")
+ }
+
+ parts := strings.Split(wwwAuthenticateHeader, "Basic realm=")
+ if len(parts) < 2 {
+ return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader)
+ }
+
+ secretFilePath := parts
+
+ // check that the file in the file path is a .key file
+ fileName := filepath.Base(secretFilePath[1])
+ if !strings.HasSuffix(fileName, azureArcFileExtension) {
+ return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName))
+ }
+
+ // check that file path from header matches the expected file path for the platform
+ if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) {
+ return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1]))
+ }
+
+ fileInfo, err := os.Stat(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err)
+ }
+
+ // Throw an error if the secret file's size is greater than 4096 bytes
+ if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes {
+ return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s)
+ }
+
+ // Attempt to read the contents of the secret file
+ secret, err := os.ReadFile(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err)
+ }
+
+ return string(secret), nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
new file mode 100644
index 0000000000..535065e9d9
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
@@ -0,0 +1,25 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", serviceFabricAPIVersion)
+ q.Set("resource", resource)
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
index 392e5e43f7..7beed26174 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
// clientOptions configures the Client's behavior.
@@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type DeviceCodeResult = accesstokens.DeviceCodeResult
@@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error
if err != nil {
return AuthResult{}, err
}
- return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
+ return d.client.base.AuthResultFromToken(ctx, d.authParams, token)
}
// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
@@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type interactiveAuthResult struct {
diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md
index 64ae168931..70b50206e6 100644
--- a/vendor/github.com/digitalocean/godo/CHANGELOG.md
+++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md
@@ -1,5 +1,72 @@
# Change Log
+## [v1.144.0] - 2025-04-24
+
+- #818 - @dweinshenker - Support Valkey in DatabaseOptions
+
+## [v1.143.0] - 2025-04-22
+
+- #815 - @StephenVarela - Support Load Balancers tls-cipher-policy
+
+## [v1.142.0] - 2025-03-27
+
+- #813 - @lfundaro-do - partner-network-connect: fix typo
+- #811 - @lfundaro-do - fix partner attachment rename
+- #810 - @apinonformoso - VPC-4359: remove custom unmarshaler for PNCs
+- #809 - @apinonformoso - hotfix: json field name
+- #808 - @apinonformoso - fix partner network connect json tags
+- #807 - @bentranter - Bump Go version to v1.23
+
+## [v1.141.0] - 2025-03-20
+
+- #805 - @singhsaubhikdo - BLOCK-4316: Adds region param in ListSnapshot for resource type volume
+- #802 - @apinonformoso - VPC-4312: rename partner interconnect attachment to partner network connect
+- #774 - @blesswinsamuel - APPS-10284 Remove "closed beta" note in archive feature to prep for GA release
+- #797 - @kperath - add support for cluster status messages
+
+## [v1.140.0] - 2025-03-14
+
+- #800 - @lee-aaron - support Spaces Keys GET by Access Key ID
+
+## [v1.139.0] - 2025-03-12
+
+- #798 - @dylanrhysscott - Fix: Update godo to use simplified template response and provide consistent struct naming
+- #796 - @apinonformoso - fix partner interconnect attachment json request response
+- #795 - @dylanrhysscott - CON-11904 Ensure taints are correctly returned via node template endpoint
+- #794 - @brunograsselli - Update partner interconnect attachment comments
+- #793 - @apinonformoso - add auth_key field
+- #789 - @guptado - [VPC-3917] Update get service key response model
+
+## [v1.138.0] - 2025-02-18
+
+- #785 - @guptado - Support partner interconnect GetBgpAuthKey and RegenerateServiceKey operations
+- #787 - @andrewsomething - ci: upgrade to actions/cache@v4
+- #786 - @m3co-code - add flags for doks routing-agent plugin
+- #784 - @asaha2 - Support name and id filters for list op
+
+## [v1.137.0] - 2025-02-12
+
+- #782 - @apinonformoso - fix partner interconnect json tag
+- #781 - @dylanrhysscott - CON-11810 Implement GetNodePoolTemplate endpoint for DOKS godo client
+
+## [v1.136.0] - 2025-01-28
+
+- #776 - @danaelhe - Databases: Support online-migrations
+- #777 - @apinonformoso - update bgp to be a pointer
+
+## [v1.135.0] - 2025-01-27
+- #766 - @dhij - kubernetes: add cluster autoscaler config
+- #775 - @jvasilevsky - LBASA-3620: add network_stack field to load balancers model
+- #773 - @blesswinsamuel - Add field to customize the offline page during app maintenance
+
+## [v1.134.0] - 2025-01-15
+- #771 - @d-honeybadger - add ID field to KubernetesClusterUser response
+- #768 - @lee-aaron - support Spaces Keys API
+
+## [v1.133.0] - 2025-01-10
+- #769 - @guptado - support partner interconnect attachment operations
+- #767 - @loosla - [kubernetes]: make kubernetes maintenance_policy day case insensitive
+
## [v1.132.0] - 2024-12-17
- #764 - @greeshmapill - APPS-9365: Add bitbucket source to App Spec
diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go
index 63457cda44..99fc2e90ce 100644
--- a/vendor/github.com/digitalocean/godo/apps.gen.go
+++ b/vendor/github.com/digitalocean/godo/apps.gen.go
@@ -468,8 +468,10 @@ type AppLogDestinationSpecPapertrail struct {
type AppMaintenanceSpec struct {
// Indicates whether maintenance mode should be enabled for the app.
Enabled bool `json:"enabled,omitempty"`
- // Indicates whether the app should be archived. Setting this to true implies that enabled is set to true. Note that this feature is currently in closed beta.
+ // Indicates whether the app should be archived. Setting this to true implies that enabled is set to true.
Archive bool `json:"archive,omitempty"`
+ // A custom offline page to display when maintenance mode is enabled or the app is archived.
+ OfflinePageURL string `json:"offline_page_url,omitempty"`
}
// AppRouteSpec struct for AppRouteSpec
diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go
index 4d9a214ae3..9a5bf60122 100644
--- a/vendor/github.com/digitalocean/godo/apps_accessors.go
+++ b/vendor/github.com/digitalocean/godo/apps_accessors.go
@@ -1453,6 +1453,14 @@ func (a *AppMaintenanceSpec) GetEnabled() bool {
return a.Enabled
}
+// GetOfflinePageURL returns the OfflinePageURL field.
+func (a *AppMaintenanceSpec) GetOfflinePageURL() string {
+ if a == nil {
+ return ""
+ }
+ return a.OfflinePageURL
+}
+
// GetAppID returns the AppID field.
func (a *AppProposeRequest) GetAppID() string {
if a == nil {
diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go
index 1217ef05e8..3a09fd72ec 100644
--- a/vendor/github.com/digitalocean/godo/databases.go
+++ b/vendor/github.com/digitalocean/godo/databases.go
@@ -42,6 +42,8 @@ const (
databaseIndexPath = databaseBasePath + "/%s/indexes/%s"
databaseLogsinkPath = databaseBasePath + "/%s/logsink/%s"
databaseLogsinksPath = databaseBasePath + "/%s/logsink"
+ databaseOnlineMigrationsPath = databaseBasePath + "/%s/online-migration"
+ databaseOnlineMigrationPath = databaseBasePath + "/%s/online-migration/%s"
)
// SQL Mode constants allow for MySQL-specific SQL flavor configuration.
@@ -179,6 +181,9 @@ type DatabasesService interface {
ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error)
UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error)
DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error)
+ StartOnlineMigration(ctx context.Context, databaseID string, onlineMigrationRequest *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error)
+ StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error)
+ GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error)
}
// DatabasesServiceOp handles communication with the Databases related methods
@@ -366,6 +371,13 @@ type DatabaseLogsink struct {
Config *DatabaseLogsinkConfig `json:"config,omitempty"`
}
+// DatabaseOnlineMigrationStatus represents an online migration status
+type DatabaseOnlineMigrationStatus struct {
+ ID string `json:"id"`
+ Status string `json:"status"`
+ CreatedAt string `json:"created_at"`
+}
+
// TopicPartition represents the state of a Kafka topic partition
type TopicPartition struct {
EarliestOffset uint64 `json:"earliest_offset,omitempty"`
@@ -515,6 +527,13 @@ type DatabaseFirewallRule struct {
CreatedAt time.Time `json:"created_at"`
}
+// DatabaseStartOnlineMigrationRequest is used to start an online migration for a database cluster
+type DatabaseStartOnlineMigrationRequest struct {
+ Source *DatabaseOnlineMigrationConfig `json:"source"`
+ DisableSSL bool `json:"disable_ssl,omitempty"`
+ IgnoreDBs []string `json:"ignore_dbs,omitempty"`
+}
+
// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster
type DatabaseCreateLogsinkRequest struct {
Name string `json:"sink_name"`
@@ -544,6 +563,15 @@ type DatabaseLogsinkConfig struct {
Cert string `json:"cert,omitempty"`
}
+// DatabaseOnlineMigrationConfig represents the configuration options for database online migrations.
+type DatabaseOnlineMigrationConfig struct {
+ Host string `json:"host,omitempty"`
+ Port int `json:"port,omitempty"`
+ DatabaseName string `json:"dbname,omitempty"`
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+}
+
// PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters.
type PostgreSQLConfig struct {
AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"`
@@ -871,6 +899,7 @@ type DatabaseOptions struct {
RedisOptions DatabaseEngineOptions `json:"redis"`
KafkaOptions DatabaseEngineOptions `json:"kafka"`
OpensearchOptions DatabaseEngineOptions `json:"opensearch"`
+ ValkeyOptions DatabaseEngineOptions `json:"valkey"`
}
// DatabaseEngineOptions represents the configuration options that are available for a given database engine
@@ -1975,3 +2004,50 @@ func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, lo
}
return resp, nil
}
+
+// StartOnlineMigration starts an online migration for a database. Migrating a cluster establishes a connection with an existing cluster
+// and replicates its contents to the target cluster. Online migration is only available for MySQL, PostgreSQL, and Redis clusters.
+func (svc *DatabasesServiceOp) StartOnlineMigration(ctx context.Context, databaseID string, onlineMigration *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error) {
+ path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID)
+ req, err := svc.client.NewRequest(ctx, http.MethodPut, path, onlineMigration)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(DatabaseOnlineMigrationStatus)
+ resp, err := svc.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ return root, resp, nil
+}
+
+// GetOnlineMigrationStatus retrieves the status of the most recent online migration
+func (svc *DatabasesServiceOp) GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error) {
+ path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID)
+ req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(DatabaseOnlineMigrationStatus)
+ resp, err := svc.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ return root, resp, nil
+}
+
+// StopOnlineMigration stops an online migration
+func (svc *DatabasesServiceOp) StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error) {
+ path := fmt.Sprintf(databaseOnlineMigrationPath, databaseID, migrationID)
+ req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := svc.client.Do(ctx, req, nil)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go
index 2469c14a08..2aedbcdb62 100644
--- a/vendor/github.com/digitalocean/godo/godo.go
+++ b/vendor/github.com/digitalocean/godo/godo.go
@@ -21,7 +21,7 @@ import (
)
const (
- libraryVersion = "1.132.0"
+ libraryVersion = "1.144.0"
defaultBaseURL = "https://api.digitalocean.com/"
userAgent = "godo/" + libraryVersion
mediaType = "application/json"
@@ -88,11 +88,13 @@ type Client struct {
ReservedIPV6Actions ReservedIPV6ActionsService
Sizes SizesService
Snapshots SnapshotsService
+ SpacesKeys SpacesKeysService
Storage StorageService
StorageActions StorageActionsService
Tags TagsService
UptimeChecks UptimeChecksService
VPCs VPCsService
+ PartnerAttachment PartnerAttachmentService
// Optional function called after every successful request made to the DO APIs
onRequestCompleted RequestCompletionCallback
@@ -302,11 +304,13 @@ func NewClient(httpClient *http.Client) *Client {
c.ReservedIPV6Actions = &ReservedIPV6ActionsServiceOp{client: c}
c.Sizes = &SizesServiceOp{client: c}
c.Snapshots = &SnapshotsServiceOp{client: c}
+ c.SpacesKeys = &SpacesKeysServiceOp{client: c}
c.Storage = &StorageServiceOp{client: c}
c.StorageActions = &StorageActionsServiceOp{client: c}
c.Tags = &TagsServiceOp{client: c}
c.UptimeChecks = &UptimeChecksServiceOp{client: c}
c.VPCs = &VPCsServiceOp{client: c}
+ c.PartnerAttachment = &PartnerAttachmentServiceOp{client: c}
c.headers = make(map[string]string)
diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go
index 9b3bcfa1a6..9d97432172 100644
--- a/vendor/github.com/digitalocean/godo/kubernetes.go
+++ b/vendor/github.com/digitalocean/godo/kubernetes.go
@@ -40,6 +40,7 @@ type KubernetesService interface {
CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error)
GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error)
+ GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error)
ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error)
UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error)
// RecycleNodePoolNodes is DEPRECATED please use DeleteNode
@@ -54,6 +55,8 @@ type KubernetesService interface {
RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error)
GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error)
+
+ GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error)
}
var _ KubernetesService = &KubernetesServiceOp{}
@@ -78,20 +81,24 @@ type KubernetesClusterCreateRequest struct {
NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"`
- MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
- AutoUpgrade bool `json:"auto_upgrade"`
- SurgeUpgrade bool `json:"surge_upgrade"`
- ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
+ AutoUpgrade bool `json:"auto_upgrade"`
+ SurgeUpgrade bool `json:"surge_upgrade"`
+ ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
+ RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
}
// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster.
type KubernetesClusterUpdateRequest struct {
- Name string `json:"name,omitempty"`
- Tags []string `json:"tags,omitempty"`
- MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
- AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
- SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
- ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ Name string `json:"name,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
+ AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
+ SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
+ ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
+ RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
// Convert cluster to run highly available control plane
HA *bool `json:"ha,omitempty"`
@@ -187,6 +194,19 @@ type KubernetesGetClusterlintRequest struct {
RunId string `json:"run_id"`
}
+type clusterStatusMessagesRoot struct {
+ Messages []*KubernetesClusterStatusMessage `json:"messages"`
+}
+
+type KubernetesClusterStatusMessage struct {
+ Message string `json:"message"`
+ Timestamp time.Time `json:"timestamp"`
+}
+
+type KubernetesGetClusterStatusMessagesRequest struct {
+ Since *time.Time `json:"since"`
+}
+
// KubernetesCluster represents a Kubernetes cluster.
type KubernetesCluster struct {
ID string `json:"id,omitempty"`
@@ -205,11 +225,13 @@ type KubernetesCluster struct {
NodePools []*KubernetesNodePool `json:"node_pools,omitempty"`
- MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
- AutoUpgrade bool `json:"auto_upgrade,omitempty"`
- SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
- RegistryEnabled bool `json:"registry_enabled,omitempty"`
- ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
+ AutoUpgrade bool `json:"auto_upgrade,omitempty"`
+ SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
+ RegistryEnabled bool `json:"registry_enabled,omitempty"`
+ ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
+ ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
+ RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
Status *KubernetesClusterStatus `json:"status,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
@@ -223,6 +245,7 @@ func (kc KubernetesCluster) URN() string {
// KubernetesClusterUser represents a Kubernetes cluster user.
type KubernetesClusterUser struct {
+ ID string `json:"id,omitempty"`
Username string `json:"username,omitempty"`
Groups []string `json:"groups,omitempty"`
}
@@ -251,6 +274,17 @@ type KubernetesControlPlaneFirewall struct {
AllowedAddresses []string `json:"allowed_addresses"`
}
+// KubernetesRoutingAgent represents information about the routing-agent cluster plugin.
+type KubernetesRoutingAgent struct {
+ Enabled *bool `json:"enabled"`
+}
+
+// KubernetesClusterAutoscalerConfiguration represents Kubernetes cluster autoscaler configuration.
+type KubernetesClusterAutoscalerConfiguration struct {
+ ScaleDownUtilizationThreshold *float64 `json:"scale_down_utilization_threshold"`
+ ScaleDownUnneededTime *string `json:"scale_down_unneeded_time"`
+}
+
// KubernetesMaintenancePolicyDay represents the possible days of a maintenance
// window
type KubernetesMaintenancePolicyDay int
@@ -315,7 +349,7 @@ var (
// KubernetesMaintenanceToDay returns the appropriate KubernetesMaintenancePolicyDay for the given string.
func KubernetesMaintenanceToDay(day string) (KubernetesMaintenancePolicyDay, error) {
- d, ok := toDay[day]
+ d, ok := toDay[strings.ToLower(day)]
if !ok {
return 0, fmt.Errorf("unknown day: %q", day)
}
@@ -416,6 +450,20 @@ type KubernetesNodePool struct {
Nodes []*KubernetesNode `json:"nodes,omitempty"`
}
+// KubernetesNodePool represents the node pool template data for a given pool.
+type KubernetesNodePoolTemplate struct {
+ Template *KubernetesNodeTemplate
+}
+
+// KubernetesNodePoolResources represents the resources within a given template for a node pool
+// This follows https://pkg.go.dev/k8s.io/kubernetes@v1.32.1/pkg/scheduler/framework#Resource to represent
+// node resources within the node object.
+type KubernetesNodePoolResources struct {
+ CPU int64 `json:"cpu,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ Pods int64 `json:"pods,omitempty"`
+}
+
// KubernetesNode represents a Node in a node pool in a Kubernetes cluster.
type KubernetesNode struct {
ID string `json:"id,omitempty"`
@@ -427,6 +475,17 @@ type KubernetesNode struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
+// KubernetesNodeTemplate represents a template in a node pool in a Kubernetes cluster.
+type KubernetesNodeTemplate struct {
+ ClusterUUID string `json:"cluster_uuid,omitempty"`
+ Name string `json:"name,omitempty"`
+ Slug string `json:"slug,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ Taints []string `json:"taints,omitempty"`
+ Capacity *KubernetesNodePoolResources `json:"capacity,omitempty"`
+ Allocatable *KubernetesNodePoolResources `json:"allocatable,omitempty"`
+}
+
// KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster.
type KubernetesNodeStatus struct {
State string `json:"state,omitempty"`
@@ -794,6 +853,24 @@ func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, pool
return root.NodePool, resp, nil
}
+// GetNodePoolTemplate retrieves the template used for a given node pool to scale up from zero.
+func (svc *KubernetesServiceOp) GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error) {
+ path, err := url.JoinPath(kubernetesClustersPath, clusterID, "node_pools_template", nodePoolName)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ root := new(KubernetesNodePoolTemplate)
+ resp, err := svc.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ return root, resp, nil
+}
+
// ListNodePools lists all the node pools found in a Kubernetes cluster.
func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID)
@@ -980,3 +1057,28 @@ func (svc *KubernetesServiceOp) GetClusterlintResults(ctx context.Context, clust
}
return root.Diagnostics, resp, nil
}
+
+func (svc *KubernetesServiceOp) GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error) {
+ path := fmt.Sprintf("%s/%s/status_messages", kubernetesClustersPath, clusterID)
+
+ if req != nil {
+ v := make(url.Values)
+ if req.Since != nil {
+ v.Set("since", req.Since.Format(time.RFC3339))
+ }
+ if query := v.Encode(); query != "" {
+ path = path + "?" + query
+ }
+ }
+
+ request, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ root := new(clusterStatusMessagesRoot)
+ resp, err := svc.client.Do(ctx, request, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ return root.Messages, resp, nil
+}
diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go
index a12729dd63..11d8d35bc3 100644
--- a/vendor/github.com/digitalocean/godo/load_balancers.go
+++ b/vendor/github.com/digitalocean/godo/load_balancers.go
@@ -22,6 +22,14 @@ const (
// Load Balancer network types
LoadBalancerNetworkTypeExternal = "EXTERNAL"
LoadBalancerNetworkTypeInternal = "INTERNAL"
+
+ // Load Balancer network_stack types
+ LoadBalancerNetworkStackIPv4 = "IPV4"
+ LoadBalancerNetworkStackDualstack = "DUALSTACK"
+
+ // Supported TLS Cipher policies
+ LoadBalancerTLSCipherPolicyDefault = "DEFAULT"
+ LoadBalancerTLSCipherPolicyStrong = "STRONG"
)
// LoadBalancersService is an interface for managing load balancers with the DigitalOcean API.
@@ -29,6 +37,8 @@ const (
type LoadBalancersService interface {
Get(context.Context, string) (*LoadBalancer, *Response, error)
List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error)
+ ListByNames(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error)
+ ListByUUIDs(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error)
Create(context.Context, *LoadBalancerRequest) (*LoadBalancer, *Response, error)
Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error)
Delete(ctx context.Context, lbID string) (*Response, error)
@@ -74,6 +84,8 @@ type LoadBalancer struct {
GLBSettings *GLBSettings `json:"glb_settings,omitempty"`
TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"`
Network string `json:"network,omitempty"`
+ NetworkStack string `json:"network_stack,omitempty"`
+ TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"`
}
// String creates a human-readable description of a LoadBalancer.
@@ -108,6 +120,8 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest {
HTTPIdleTimeoutSeconds: l.HTTPIdleTimeoutSeconds,
TargetLoadBalancerIDs: append([]string(nil), l.TargetLoadBalancerIDs...),
Network: l.Network,
+ NetworkStack: l.NetworkStack,
+ TLSCipherPolicy: l.TLSCipherPolicy,
}
if l.DisableLetsEncryptDNSRecords != nil {
@@ -247,6 +261,8 @@ type LoadBalancerRequest struct {
GLBSettings *GLBSettings `json:"glb_settings,omitempty"`
TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"`
Network string `json:"network,omitempty"`
+ NetworkStack string `json:"network_stack,omitempty"`
+ TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"`
}
// String creates a human-readable description of a LoadBalancerRequest.
@@ -396,6 +412,72 @@ func (l *LoadBalancersServiceOp) List(ctx context.Context, opt *ListOptions) ([]
return root.LoadBalancers, resp, err
}
+// ListByNames lists load balancers filtered by resource names, with optional pagination.
+func (l *LoadBalancersServiceOp) ListByNames(ctx context.Context, names []string, opt *ListOptions) ([]LoadBalancer, *Response, error) {
+ path, err := addOptions(loadBalancersBasePath, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ q := req.URL.Query()
+ for _, name := range names {
+ q.Add("names", name)
+ }
+ req.URL.RawQuery = q.Encode()
+
+ root := new(loadBalancersRoot)
+ resp, err := l.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+
+ return root.LoadBalancers, resp, err
+}
+
+// ListByUUIDs lists load balancers filtered by resource UUIDs, with optional pagination.
+func (l *LoadBalancersServiceOp) ListByUUIDs(ctx context.Context, uuids []string, opt *ListOptions) ([]LoadBalancer, *Response, error) {
+ path, err := addOptions(loadBalancersBasePath, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ q := req.URL.Query()
+ for _, uuid := range uuids {
+ q.Add("uuids", uuid)
+ }
+ req.URL.RawQuery = q.Encode()
+
+ root := new(loadBalancersRoot)
+ resp, err := l.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+
+ return root.LoadBalancers, resp, err
+}
+
// Create a new load balancer with a given configuration.
func (l *LoadBalancersServiceOp) Create(ctx context.Context, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) {
req, err := l.client.NewRequest(ctx, http.MethodPost, loadBalancersBasePath, lbr)
diff --git a/vendor/github.com/digitalocean/godo/partner_network_connect.go b/vendor/github.com/digitalocean/godo/partner_network_connect.go
new file mode 100644
index 0000000000..37f508cc87
--- /dev/null
+++ b/vendor/github.com/digitalocean/godo/partner_network_connect.go
@@ -0,0 +1,415 @@
+package godo
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+)
+
+const partnerNetworkConnectBasePath = "/v2/partner_network_connect/attachments"
+
+// PartnerAttachmentService is an interface for managing Partner Attachments with the
+// DigitalOcean API.
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/PartnerNetworkConnect
+type PartnerAttachmentService interface {
+ List(context.Context, *ListOptions) ([]*PartnerAttachment, *Response, error)
+ Create(context.Context, *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error)
+ Get(context.Context, string) (*PartnerAttachment, *Response, error)
+ Update(context.Context, string, *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error)
+ Delete(context.Context, string) (*Response, error)
+ GetServiceKey(context.Context, string) (*ServiceKey, *Response, error)
+ SetRoutes(context.Context, string, *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error)
+ ListRoutes(context.Context, string, *ListOptions) ([]*RemoteRoute, *Response, error)
+ GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error)
+ RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error)
+}
+
+var _ PartnerAttachmentService = &PartnerAttachmentServiceOp{}
+
+// PartnerAttachmentServiceOp interfaces with the Partner Attachment endpoints in the DigitalOcean API.
+type PartnerAttachmentServiceOp struct {
+ client *Client
+}
+
+// PartnerAttachmentCreateRequest represents a request to create a Partner Attachment.
+type PartnerAttachmentCreateRequest struct {
+ // Name is the name of the Partner Attachment
+ Name string `json:"name,omitempty"`
+ // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
+ ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
+ // Region is the region where the Partner Attachment is created
+ Region string `json:"region,omitempty"`
+ // NaaSProvider is the name of the Network as a Service provider
+ NaaSProvider string `json:"naas_provider,omitempty"`
+ // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
+ VPCIDs []string `json:"vpc_ids,omitempty"`
+ // BGP is the BGP configuration of the Partner Attachment
+ BGP BGP `json:"bgp,omitempty"`
+}
+
+type partnerAttachmentRequestBody struct {
+ // Name is the name of the Partner Attachment
+ Name string `json:"name,omitempty"`
+ // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
+ ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
+ // Region is the region where the Partner Attachment is created
+ Region string `json:"region,omitempty"`
+ // NaaSProvider is the name of the Network as a Service provider
+ NaaSProvider string `json:"naas_provider,omitempty"`
+ // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
+ VPCIDs []string `json:"vpc_ids,omitempty"`
+ // BGP is the BGP configuration of the Partner Attachment
+ BGP *BGPInput `json:"bgp,omitempty"`
+}
+
+func (req *PartnerAttachmentCreateRequest) buildReq() *partnerAttachmentRequestBody {
+ request := &partnerAttachmentRequestBody{
+ Name: req.Name,
+ ConnectionBandwidthInMbps: req.ConnectionBandwidthInMbps,
+ Region: req.Region,
+ NaaSProvider: req.NaaSProvider,
+ VPCIDs: req.VPCIDs,
+ }
+
+ if req.BGP != (BGP{}) {
+ request.BGP = &BGPInput{
+ LocalASN: req.BGP.LocalASN,
+ LocalRouterIP: req.BGP.LocalRouterIP,
+ PeerASN: req.BGP.PeerASN,
+ PeerRouterIP: req.BGP.PeerRouterIP,
+ AuthKey: req.BGP.AuthKey,
+ }
+ }
+
+ return request
+}
+
+// PartnerAttachmentUpdateRequest represents a request to update a Partner Attachment.
+type PartnerAttachmentUpdateRequest struct {
+ // Name is the name of the Partner Attachment
+ Name string `json:"name,omitempty"`
+ //VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
+ VPCIDs []string `json:"vpc_ids,omitempty"`
+}
+
+type PartnerAttachmentSetRoutesRequest struct {
+ // Routes is the list of routes to be used for the Partner Attachment
+ Routes []string `json:"routes,omitempty"`
+}
+
+// BGP represents the BGP configuration of a Partner Attachment.
+type BGP struct {
+ // LocalASN is the local ASN
+ LocalASN int `json:"local_asn,omitempty"`
+ // LocalRouterIP is the local router IP
+ LocalRouterIP string `json:"local_router_ip,omitempty"`
+ // PeerASN is the peer ASN
+ PeerASN int `json:"peer_asn,omitempty"`
+ // PeerRouterIP is the peer router IP
+ PeerRouterIP string `json:"peer_router_ip,omitempty"`
+ // AuthKey is the authentication key
+ AuthKey string `json:"auth_key,omitempty"`
+}
+
+func (b *BGP) UnmarshalJSON(data []byte) error {
+ type Alias BGP
+ aux := &struct {
+ LocalASN *int `json:"local_asn,omitempty"`
+ LocalRouterASN *int `json:"local_router_asn,omitempty"`
+ PeerASN *int `json:"peer_asn,omitempty"`
+ PeerRouterASN *int `json:"peer_router_asn,omitempty"`
+ *Alias
+ }{
+ Alias: (*Alias)(b),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ if aux.LocalASN != nil {
+ b.LocalASN = *aux.LocalASN
+ } else if aux.LocalRouterASN != nil {
+ b.LocalASN = *aux.LocalRouterASN
+ }
+
+ if aux.PeerASN != nil {
+ b.PeerASN = *aux.PeerASN
+ } else if aux.PeerRouterASN != nil {
+ b.PeerASN = *aux.PeerRouterASN
+ }
+ return nil
+}
+
+// BGPInput represents the BGP configuration of a Partner Attachment.
+type BGPInput struct {
+ // LocalASN is the local ASN
+ LocalASN int `json:"local_router_asn,omitempty"`
+ // LocalRouterIP is the local router IP
+ LocalRouterIP string `json:"local_router_ip,omitempty"`
+ // PeerASN is the peer ASN
+ PeerASN int `json:"peer_router_asn,omitempty"`
+ // PeerRouterIP is the peer router IP
+ PeerRouterIP string `json:"peer_router_ip,omitempty"`
+ // AuthKey is the authentication key
+ AuthKey string `json:"auth_key,omitempty"`
+}
+
+// ServiceKey represents the service key of a Partner Attachment.
+type ServiceKey struct {
+ Value string `json:"value,omitempty"`
+ State string `json:"state,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+}
+
+// RemoteRoute represents a route for a Partner Attachment.
+type RemoteRoute struct {
+ // ID is the generated ID of the Route
+ ID string `json:"id,omitempty"`
+ // Cidr is the CIDR of the route
+ Cidr string `json:"cidr,omitempty"`
+}
+
+// PartnerAttachment represents a DigitalOcean Partner Attachment.
+type PartnerAttachment struct {
+ // ID is the generated ID of the Partner Attachment
+ ID string `json:"id,omitempty"`
+ // Name is the name of the Partner Attachment
+ Name string `json:"name,omitempty"`
+ // State is the state of the Partner Attachment
+ State string `json:"state,omitempty"`
+ // ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
+ ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
+ // Region is the region where the Partner Attachment is created
+ Region string `json:"region,omitempty"`
+ // NaaSProvider is the name of the Network as a Service provider
+ NaaSProvider string `json:"naas_provider,omitempty"`
+ // VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
+ VPCIDs []string `json:"vpc_ids,omitempty"`
+ // BGP is the BGP configuration of the Partner Attachment
+ BGP BGP `json:"bgp,omitempty"`
+ // CreatedAt is time when this Partner Attachment was first created
+ CreatedAt time.Time `json:"created_at,omitempty"`
+}
+
+type partnerNetworkConnectAttachmentRoot struct {
+ PartnerAttachment *PartnerAttachment `json:"partner_attachment"`
+}
+
+type partnerNetworkConnectAttachmentsRoot struct {
+ PartnerAttachments []*PartnerAttachment `json:"partner_attachments"`
+ Links *Links `json:"links"`
+ Meta *Meta `json:"meta"`
+}
+
+type serviceKeyRoot struct {
+ ServiceKey *ServiceKey `json:"service_key"`
+}
+
+type remoteRoutesRoot struct {
+ RemoteRoutes []*RemoteRoute `json:"remote_routes"`
+ Links *Links `json:"links"`
+ Meta *Meta `json:"meta"`
+}
+
+type BgpAuthKey struct {
+ Value string `json:"value"`
+}
+
+type bgpAuthKeyRoot struct {
+ BgpAuthKey *BgpAuthKey `json:"bgp_auth_key"`
+}
+
+type RegenerateServiceKey struct {
+}
+
+type regenerateServiceKeyRoot struct {
+ RegenerateServiceKey *RegenerateServiceKey `json:"-"`
+}
+
+// List returns a list of all Partner Attachment, with optional pagination.
+func (s *PartnerAttachmentServiceOp) List(ctx context.Context, opt *ListOptions) ([]*PartnerAttachment, *Response, error) {
+ path, err := addOptions(partnerNetworkConnectBasePath, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(partnerNetworkConnectAttachmentsRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+ return root.PartnerAttachments, resp, nil
+}
+
+// Create creates a new Partner Attachment.
+func (s *PartnerAttachmentServiceOp) Create(ctx context.Context, create *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error) {
+ path := partnerNetworkConnectBasePath
+
+ req, err := s.client.NewRequest(ctx, http.MethodPost, path, create.buildReq())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(partnerNetworkConnectAttachmentRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.PartnerAttachment, resp, nil
+}
+
+// Get returns the details of a Partner Attachment.
+func (s *PartnerAttachmentServiceOp) Get(ctx context.Context, id string) (*PartnerAttachment, *Response, error) {
+ path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(partnerNetworkConnectAttachmentRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.PartnerAttachment, resp, nil
+}
+
+// Update updates a Partner Attachment properties.
+func (s *PartnerAttachmentServiceOp) Update(ctx context.Context, id string, update *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error) {
+ path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
+ req, err := s.client.NewRequest(ctx, http.MethodPatch, path, update)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(partnerNetworkConnectAttachmentRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.PartnerAttachment, resp, nil
+}
+
+// Delete deletes a Partner Attachment.
+func (s *PartnerAttachmentServiceOp) Delete(ctx context.Context, id string) (*Response, error) {
+ path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
+ req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := s.client.Do(ctx, req, nil)
+ if err != nil {
+ return resp, err
+ }
+
+ return resp, nil
+}
+
+func (s *PartnerAttachmentServiceOp) GetServiceKey(ctx context.Context, id string) (*ServiceKey, *Response, error) {
+ path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, id)
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(serviceKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.ServiceKey, resp, nil
+}
+
+// ListRoutes lists all remote routes for a Partner Attachment.
+func (s *PartnerAttachmentServiceOp) ListRoutes(ctx context.Context, id string, opt *ListOptions) ([]*RemoteRoute, *Response, error) {
+ path, err := addOptions(fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id), opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(remoteRoutesRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+ if l := root.Links; l != nil {
+ resp.Links = l
+ }
+ if m := root.Meta; m != nil {
+ resp.Meta = m
+ }
+
+ return root.RemoteRoutes, resp, nil
+}
+
+// SetRoutes updates specific properties of a Partner Attachment.
+func (s *PartnerAttachmentServiceOp) SetRoutes(ctx context.Context, id string, set *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error) {
+ path := fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id)
+ req, err := s.client.NewRequest(ctx, http.MethodPut, path, set)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(partnerNetworkConnectAttachmentRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.PartnerAttachment, resp, nil
+}
+
+// GetBGPAuthKey returns Partner Attachment bgp auth key
+func (s *PartnerAttachmentServiceOp) GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error) {
+ path := fmt.Sprintf("%s/%s/bgp_auth_key", partnerNetworkConnectBasePath, iaID)
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(bgpAuthKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.BgpAuthKey, resp, nil
+}
+
+// RegenerateServiceKey regenerates the service key of a Partner Attachment.
+func (s *PartnerAttachmentServiceOp) RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error) {
+ path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, iaID)
+ req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(regenerateServiceKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.RegenerateServiceKey, resp, nil
+}
diff --git a/vendor/github.com/digitalocean/godo/snapshots.go b/vendor/github.com/digitalocean/godo/snapshots.go
index 13a06ca316..31fd494c4c 100644
--- a/vendor/github.com/digitalocean/godo/snapshots.go
+++ b/vendor/github.com/digitalocean/godo/snapshots.go
@@ -14,6 +14,7 @@ const snapshotBasePath = "v2/snapshots"
type SnapshotsService interface {
List(context.Context, *ListOptions) ([]Snapshot, *Response, error)
ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error)
+ ListVolumeSnapshotByRegion(context.Context, string, *ListOptions) ([]Snapshot, *Response, error)
ListDroplet(context.Context, *ListOptions) ([]Snapshot, *Response, error)
Get(context.Context, string) (*Snapshot, *Response, error)
Delete(context.Context, string) (*Response, error)
@@ -52,6 +53,7 @@ type snapshotsRoot struct {
type listSnapshotOptions struct {
ResourceType string `url:"resource_type,omitempty"`
+ Region string `url:"region,omitempty"`
}
func (s Snapshot) String() string {
@@ -75,6 +77,12 @@ func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) (
return s.list(ctx, opt, &listOpt)
}
+// ListVolumeSnapshotByRegion lists all the volume snapshot for given region
+func (s *SnapshotsServiceOp) ListVolumeSnapshotByRegion(ctx context.Context, region string, opt *ListOptions) ([]Snapshot, *Response, error) {
+ listOpt := listSnapshotOptions{ResourceType: "volume", Region: region}
+ return s.list(ctx, opt, &listOpt)
+}
+
// Get retrieves a snapshot by id.
func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) {
return s.get(ctx, snapshotID)
diff --git a/vendor/github.com/digitalocean/godo/spaces_keys.go b/vendor/github.com/digitalocean/godo/spaces_keys.go
new file mode 100644
index 0000000000..8aee31dbba
--- /dev/null
+++ b/vendor/github.com/digitalocean/godo/spaces_keys.go
@@ -0,0 +1,186 @@
+package godo
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+)
+
+const spacesKeysBasePath = "v2/spaces/keys"
+
+// SpacesKeysService is an interface for managing Spaces keys with the DigitalOcean API.
+type SpacesKeysService interface {
+ List(context.Context, *ListOptions) ([]*SpacesKey, *Response, error)
+ Update(context.Context, string, *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error)
+ Create(context.Context, *SpacesKeyCreateRequest) (*SpacesKey, *Response, error)
+ Delete(context.Context, string) (*Response, error)
+ Get(context.Context, string) (*SpacesKey, *Response, error)
+}
+
+// SpacesKeysServiceOp handles communication with the Spaces key related methods of the
+// DigitalOcean API.
+type SpacesKeysServiceOp struct {
+ client *Client
+}
+
+var _ SpacesKeysService = &SpacesKeysServiceOp{}
+
+// SpacesKeyPermission represents a permission for a Spaces grant
+type SpacesKeyPermission string
+
+const (
+ // SpacesKeyRead grants read-only access to the Spaces bucket
+ SpacesKeyRead SpacesKeyPermission = "read"
+ // SpacesKeyReadWrite grants read and write access to the Spaces bucket
+ SpacesKeyReadWrite SpacesKeyPermission = "readwrite"
+ // SpacesKeyFullAccess grants full access to the Spaces bucket
+ SpacesKeyFullAccess SpacesKeyPermission = "fullaccess"
+)
+
+// Grant represents a Grant for a Spaces key
+type Grant struct {
+ Bucket string `json:"bucket"`
+ Permission SpacesKeyPermission `json:"permission"`
+}
+
+// SpacesKey represents a DigitalOcean Spaces key
+type SpacesKey struct {
+ Name string `json:"name"`
+ AccessKey string `json:"access_key"`
+ SecretKey string `json:"secret_key"`
+ Grants []*Grant `json:"grants"`
+ CreatedAt string `json:"created_at"`
+}
+
+// SpacesKeyRoot represents a response from the DigitalOcean API
+type spacesKeyRoot struct {
+ Key *SpacesKey `json:"key"`
+}
+
+// SpacesKeyCreateRequest represents a request to create a Spaces key.
+type SpacesKeyCreateRequest struct {
+ Name string `json:"name"`
+ Grants []*Grant `json:"grants"`
+}
+
+// SpacesKeyUpdateRequest represents a request to update a Spaces key.
+type SpacesKeyUpdateRequest struct {
+ Name string `json:"name"`
+ Grants []*Grant `json:"grants"`
+}
+
+// spacesListKeysRoot represents a response from the DigitalOcean API
+type spacesListKeysRoot struct {
+ Keys []*SpacesKey `json:"keys,omitempty"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta"`
+}
+
+// Create creates a new Spaces key.
+func (s *SpacesKeysServiceOp) Create(ctx context.Context, createRequest *SpacesKeyCreateRequest) (*SpacesKey, *Response, error) {
+ if createRequest == nil {
+ return nil, nil, NewArgError("createRequest", "cannot be nil")
+ }
+
+ req, err := s.client.NewRequest(ctx, http.MethodPost, spacesKeysBasePath, createRequest)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(spacesKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.Key, resp, nil
+}
+
+// Delete deletes a Spaces key.
+func (s *SpacesKeysServiceOp) Delete(ctx context.Context, accessKey string) (*Response, error) {
+ if accessKey == "" {
+ return nil, NewArgError("accessKey", "cannot be empty")
+ }
+
+ path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
+ req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := s.client.Do(ctx, req, nil)
+ if err != nil {
+ return resp, err
+ }
+
+ return resp, nil
+}
+
+// Update updates a Spaces key.
+func (s *SpacesKeysServiceOp) Update(ctx context.Context, accessKey string, updateRequest *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error) {
+ if accessKey == "" {
+ return nil, nil, NewArgError("accessKey", "cannot be empty")
+ }
+ if updateRequest == nil {
+ return nil, nil, NewArgError("updateRequest", "cannot be nil")
+ }
+
+ path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
+ req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest)
+ if err != nil {
+ return nil, nil, err
+ }
+ root := new(spacesKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.Key, resp, nil
+}
+
+// List returns a list of Spaces keys.
+func (s *SpacesKeysServiceOp) List(ctx context.Context, opts *ListOptions) ([]*SpacesKey, *Response, error) {
+ path, err := addOptions(spacesKeysBasePath, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ root := new(spacesListKeysRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ if root.Links != nil {
+ resp.Links = root.Links
+ }
+ if root.Meta != nil {
+ resp.Meta = root.Meta
+ }
+
+ return root.Keys, resp, nil
+}
+
+// Get retrieves a Spaces key.
+func (s *SpacesKeysServiceOp) Get(ctx context.Context, accessKey string) (*SpacesKey, *Response, error) {
+ if accessKey == "" {
+ return nil, nil, NewArgError("accessKey", "cannot be empty")
+ }
+
+ path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
+ req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ root := new(spacesKeyRoot)
+ resp, err := s.client.Do(ctx, req, root)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return root.Key, resp, nil
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
new file mode 100644
index 0000000000..1f664d13a5
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
@@ -0,0 +1,18 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
+
+[{Makefile,*.mk}]
+indent_style = tab
+
+[*.nix]
+indent_size = 2
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc
new file mode 100644
index 0000000000..2e0f9f5f71
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc
@@ -0,0 +1,4 @@
+if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
+ source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
+fi
+use flake . --impure
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore
new file mode 100644
index 0000000000..470e7ca2bd
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore
@@ -0,0 +1,6 @@
+/.devenv/
+/.direnv/
+/.pre-commit-config.yaml
+/bin/
+/build/
+/var/
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
new file mode 100644
index 0000000000..763143aa77
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
@@ -0,0 +1,23 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/go-viper/mapstructure)
+ golint:
+ min-confidence: 0
+ goimports:
+ local-prefixes: github.com/go-viper/maptstructure
+
+linters:
+ disable-all: true
+ enable:
+ - gci
+ - gofmt
+ - gofumpt
+ - goimports
+ - staticcheck
+ # - stylecheck
diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
new file mode 100644
index 0000000000..afd44e5f5f
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
@@ -0,0 +1,104 @@
+> [!WARNING]
+> As of v2 of this library, change log can be found in GitHub releases.
+
+## 1.5.1
+
+* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
+* Fix map of slices not decoding properly in certain cases. [GH-266]
+
+## 1.5.0
+
+* New option `IgnoreUntaggedFields` to ignore decoding to any fields
+ without `mapstructure` (or the configured tag name) set [GH-277]
+* New option `ErrorUnset` which makes it an error if any fields
+ in a target struct are not set by the decoding process. [GH-225]
+* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
+* Decoding to slice from array no longer crashes [GH-265]
+* Decode nested struct pointers to map [GH-271]
+* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
+* Fix issue where fields with `,omitempty` would sometimes decode
+ into a map with an empty string key [GH-281]
+
+## 1.4.3
+
+* Fix cases where `json.Number` didn't decode properly [GH-261]
+
+## 1.4.2
+
+* Custom name matchers to support any sort of casing, formatting, etc. for
+ field names. [GH-250]
+* Fix possible panic in ComposeDecodeHookFunc [GH-251]
+
+## 1.4.1
+
+* Fix regression where `*time.Time` value would be set to empty and not be sent
+ to decode hooks properly [GH-232]
+
+## 1.4.0
+
+* A new decode hook type `DecodeHookFuncValue` has been added that has
+ access to the full values. [GH-183]
+* Squash is now supported with embedded fields that are struct pointers [GH-205]
+* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
+
+## 1.3.3
+
+* Decoding maps from maps creates a settable value for decode hooks [GH-203]
+
+## 1.3.2
+
+* Decode into interface type with a struct value is supported [GH-187]
+
+## 1.3.1
+
+* Squash should only squash embedded structs. [GH-194]
+
+## 1.3.0
+
+* Added `",omitempty"` support. This will ignore zero values in the source
+ structure when encoding. [GH-145]
+
+## 1.2.3
+
+* Fix duplicate entries in Keys list with pointer values. [GH-185]
+
+## 1.2.2
+
+* Do not add unsettable (unexported) values to the unused metadata key
+ or "remain" value. [GH-150]
+
+## 1.2.1
+
+* Go modules checksum mismatch fix
+
+## 1.2.0
+
+* Added support to capture unused values in a field using the `",remain"` value
+ in the mapstructure tag. There is an example to showcase usage.
+* Added `DecoderConfig` option to always squash embedded structs
+* `json.Number` can decode into `uint` types
+* Empty slices are preserved and not replaced with nil slices
+* Fix panic that can occur in when decoding a map into a nil slice of structs
+* Improved package documentation for godoc
+
+## 1.1.2
+
+* Fix error when decode hook decodes interface implementation into interface
+ type. [GH-140]
+
+## 1.1.1
+
+* Fix panic that can happen in `decodePtr`
+
+## 1.1.0
+
+* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
+* Support struct to struct decoding [GH-137]
+* If source map value is nil, then destination map value is nil (instead of empty)
+* If source slice value is nil, then destination slice value is nil (instead of empty)
+* If source pointer is nil, then destination pointer is set to nil (instead of
+ allocated zero value of type)
+
+## 1.0.0
+
+* Initial tagged stable release.
diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE
new file mode 100644
index 0000000000..f9c841a51e
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md
new file mode 100644
index 0000000000..dd5ec69ddf
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/README.md
@@ -0,0 +1,80 @@
+# mapstructure
+
+[](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
+[](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
+
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+```shell
+go get github.com/go-viper/mapstructure/v2
+```
+
+## Migrating from `github.com/mitchellh/mapstructure`
+
+[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
+
+You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
+The API is the same, so you don't need to change anything else.
+
+Here is a script that can help you with the migration:
+
+```shell
+sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
+```
+
+If you need more time to migrate your code, that is absolutely fine.
+
+Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
+
+```shell
+replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
+```
+
+## Usage & Example
+
+For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
+
+## Credits
+
+Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
+This is a maintained fork of the original library.
+
+Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
+
+## License
+
+The project is licensed under the [MIT License](LICENSE).
diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
new file mode 100644
index 0000000000..1f3c69d4b8
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
@@ -0,0 +1,630 @@
+package mapstructure
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+ var f3 DecodeHookFuncValue
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2, f3}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into a closure to be used directly
+// if the type fails to convert we return a closure always erroring to keep the previous behaviour
+func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from.Type(), to.Type(), from.Interface())
+ }
+ case DecodeHookFuncKind:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from.Kind(), to.Kind(), from.Interface())
+ }
+ case DecodeHookFuncValue:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return f(from, to)
+ }
+ default:
+ return func(from reflect.Value, to reflect.Value) (interface{}, error) {
+ return nil, errors.New("invalid decode hook signature")
+ }
+ }
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Value, to reflect.Value,
+) (interface{}, error) {
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from.Type(), to.Type(), from.Interface())
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), from.Interface())
+ case DecodeHookFuncValue:
+ return f(from, to)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs))
+ for _, f := range fs {
+ cached = append(cached, cachedDecodeHook(f))
+ }
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ var err error
+ data := f.Interface()
+
+ newFrom := f
+ for _, c := range cached {
+ data, err = c(newFrom, t)
+ if err != nil {
+ return nil, err
+ }
+ newFrom = reflect.ValueOf(data)
+ }
+
+ return data, nil
+ }
+}
+
+// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
+// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
+func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
+ cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff))
+ for _, f := range ff {
+ cached = append(cached, cachedDecodeHook(f))
+ }
+ return func(a, b reflect.Value) (interface{}, error) {
+ var allErrs string
+ var out interface{}
+ var err error
+
+ for _, c := range cached {
+ out, err = c(a, b)
+ if err != nil {
+ allErrs += err.Error() + "\n"
+ continue
+ }
+
+ return out, nil
+ }
+
+ return nil, errors.New(allErrs)
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.SliceOf(f) {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+// StringToURLHookFunc returns a DecodeHookFunc that converts
+// strings to *url.URL.
+func StringToURLHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(&url.URL{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return url.Parse(data.(string))
+ }
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IP{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ ip := net.ParseIP(data.(string))
+ if ip == nil {
+ return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+ }
+
+ return ip, nil
+ }
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IPNet{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ _, net, err := net.ParseCIDR(data.(string))
+ return net, err
+ }
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Time{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.Parse(layout, data.(string))
+ }
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{},
+) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ }
+ return "0", nil
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
+
+func RecursiveStructToMapHookFunc() DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ if f.Kind() != reflect.Struct {
+ return f.Interface(), nil
+ }
+
+ var i interface{} = struct{}{}
+ if t.Type() != reflect.TypeOf(&i).Elem() {
+ return f.Interface(), nil
+ }
+
+ m := make(map[string]interface{})
+ t.Set(reflect.ValueOf(m))
+
+ return f.Interface(), nil
+ }
+}
+
+// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+// strings to the UnmarshalText function, when the target type
+// implements the encoding.TextUnmarshaler interface
+func TextUnmarshallerHookFunc() DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+ str, ok := data.(string)
+ if !ok {
+ str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
+ }
+ if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
+
+// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
+// strings to netip.Addr.
+func StringToNetIPAddrHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(netip.Addr{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return netip.ParseAddr(data.(string))
+ }
+}
+
+// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
+// strings to netip.AddrPort.
+func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(netip.AddrPort{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return netip.ParseAddrPort(data.(string))
+ }
+}
+
+// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
+// strings to basic types.
+// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
+func StringToBasicTypeHookFunc() DecodeHookFunc {
+ return ComposeDecodeHookFunc(
+ StringToInt8HookFunc(),
+ StringToUint8HookFunc(),
+ StringToInt16HookFunc(),
+ StringToUint16HookFunc(),
+ StringToInt32HookFunc(),
+ StringToUint32HookFunc(),
+ StringToInt64HookFunc(),
+ StringToUint64HookFunc(),
+ StringToIntHookFunc(),
+ StringToUintHookFunc(),
+ StringToFloat32HookFunc(),
+ StringToFloat64HookFunc(),
+ StringToBoolHookFunc(),
+ // byte and rune are aliases for uint8 and int32 respectively
+ // StringToByteHookFunc(),
+ // StringToRuneHookFunc(),
+ StringToComplex64HookFunc(),
+ StringToComplex128HookFunc(),
+ )
+}
+
+// StringToInt8HookFunc returns a DecodeHookFunc that converts
+// strings to int8.
+func StringToInt8HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 8)
+ return int8(i64), err
+ }
+}
+
+// StringToUint8HookFunc returns a DecodeHookFunc that converts
+// strings to uint8.
+func StringToUint8HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 8)
+ return uint8(u64), err
+ }
+}
+
+// StringToInt16HookFunc returns a DecodeHookFunc that converts
+// strings to int16.
+func StringToInt16HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 16)
+ return int16(i64), err
+ }
+}
+
+// StringToUint16HookFunc returns a DecodeHookFunc that converts
+// strings to uint16.
+func StringToUint16HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 16)
+ return uint16(u64), err
+ }
+}
+
+// StringToInt32HookFunc returns a DecodeHookFunc that converts
+// strings to int32.
+func StringToInt32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 32)
+ return int32(i64), err
+ }
+}
+
+// StringToUint32HookFunc returns a DecodeHookFunc that converts
+// strings to uint32.
+func StringToUint32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 32)
+ return uint32(u64), err
+ }
+}
+
+// StringToInt64HookFunc returns a DecodeHookFunc that converts
+// strings to int64.
+func StringToInt64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseInt(data.(string), 0, 64)
+ }
+}
+
+// StringToUint64HookFunc returns a DecodeHookFunc that converts
+// strings to uint64.
+func StringToUint64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseUint(data.(string), 0, 64)
+ }
+}
+
+// StringToIntHookFunc returns a DecodeHookFunc that converts
+// strings to int.
+func StringToIntHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Int {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ i64, err := strconv.ParseInt(data.(string), 0, 0)
+ return int(i64), err
+ }
+}
+
+// StringToUintHookFunc returns a DecodeHookFunc that converts
+// strings to uint.
+func StringToUintHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ u64, err := strconv.ParseUint(data.(string), 0, 0)
+ return uint(u64), err
+ }
+}
+
+// StringToFloat32HookFunc returns a DecodeHookFunc that converts
+// strings to float32.
+func StringToFloat32HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ f64, err := strconv.ParseFloat(data.(string), 32)
+ return float32(f64), err
+ }
+}
+
+// StringToFloat64HookFunc returns a DecodeHookFunc that converts
+// strings to float64.
+func StringToFloat64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseFloat(data.(string), 64)
+ }
+}
+
+// StringToBoolHookFunc returns a DecodeHookFunc that converts
+// strings to bool.
+func StringToBoolHookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseBool(data.(string))
+ }
+}
+
+// StringToByteHookFunc returns a DecodeHookFunc that converts
+// strings to byte.
+func StringToByteHookFunc() DecodeHookFunc {
+ return StringToUint8HookFunc()
+}
+
+// StringToRuneHookFunc returns a DecodeHookFunc that converts
+// strings to rune.
+func StringToRuneHookFunc() DecodeHookFunc {
+ return StringToInt32HookFunc()
+}
+
+// StringToComplex64HookFunc returns a DecodeHookFunc that converts
+// strings to complex64.
+func StringToComplex64HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ c128, err := strconv.ParseComplex(data.(string), 64)
+ return complex64(c128), err
+ }
+}
+
+// StringToComplex128HookFunc returns a DecodeHookFunc that converts
+// strings to complex128.
+func StringToComplex128HookFunc() DecodeHookFunc {
+ return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return strconv.ParseComplex(data.(string), 128)
+ }
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock
new file mode 100644
index 0000000000..4bea8154e0
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock
@@ -0,0 +1,472 @@
+{
+ "nodes": {
+ "cachix": {
+ "inputs": {
+ "devenv": "devenv_2",
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "pre-commit-hooks": [
+ "devenv",
+ "pre-commit-hooks"
+ ]
+ },
+ "locked": {
+ "lastModified": 1712055811,
+ "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
+ "owner": "cachix",
+ "repo": "cachix",
+ "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "cachix",
+ "type": "github"
+ }
+ },
+ "devenv": {
+ "inputs": {
+ "cachix": "cachix",
+ "flake-compat": "flake-compat_2",
+ "nix": "nix_2",
+ "nixpkgs": "nixpkgs_2",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1717245169,
+ "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "devenv_2": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "cachix",
+ "flake-compat"
+ ],
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "poetry2nix": "poetry2nix",
+ "pre-commit-hooks": [
+ "devenv",
+ "cachix",
+ "pre-commit-hooks"
+ ]
+ },
+ "locked": {
+ "lastModified": 1708704632,
+ "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "ref": "python-rewrite",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-compat_2": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1696426674,
+ "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1717285511,
+ "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1689068808,
+ "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "flake-utils_2": {
+ "inputs": {
+ "systems": "systems_2"
+ },
+ "locked": {
+ "lastModified": 1710146030,
+ "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1709087332,
+ "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1712911606,
+ "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "devenv-2.21",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nix-github-actions": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "poetry2nix",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1688870561,
+ "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
+ "owner": "nix-community",
+ "repo": "nix-github-actions",
+ "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "nix-github-actions",
+ "type": "github"
+ }
+ },
+ "nix_2": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression_2"
+ },
+ "locked": {
+ "lastModified": 1712911606,
+ "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "devenv-2.21",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1692808169,
+ "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "lastModified": 1717284937,
+ "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
+ },
+ "original": {
+ "type": "tarball",
+ "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression_2": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1710695816,
+ "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "614b4613980a522ba49f0d194531beddbb7220d3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.11",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1713361204,
+ "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
+ "owner": "cachix",
+ "repo": "devenv-nixpkgs",
+ "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "ref": "rolling",
+ "repo": "devenv-nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_3": {
+ "locked": {
+ "lastModified": 1717112898,
+ "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "poetry2nix": {
+ "inputs": {
+ "flake-utils": "flake-utils",
+ "nix-github-actions": "nix-github-actions",
+ "nixpkgs": [
+ "devenv",
+ "cachix",
+ "devenv",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1692876271,
+ "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
+ "owner": "nix-community",
+ "repo": "poetry2nix",
+ "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "poetry2nix",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils_2",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1713775815,
+ "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs_3"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "systems_2": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix
new file mode 100644
index 0000000000..4ed0f53311
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix
@@ -0,0 +1,39 @@
+{
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ devenv.url = "github:cachix/devenv";
+ };
+
+ outputs = inputs@{ flake-parts, ... }:
+ flake-parts.lib.mkFlake { inherit inputs; } {
+ imports = [
+ inputs.devenv.flakeModule
+ ];
+
+ systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
+
+ perSystem = { config, self', inputs', pkgs, system, ... }: rec {
+ devenv.shells = {
+ default = {
+ languages = {
+ go.enable = true;
+ };
+
+ pre-commit.hooks = {
+ nixpkgs-fmt.enable = true;
+ };
+
+ packages = with pkgs; [
+ golangci-lint
+ ];
+
+ # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
+ containers = pkgs.lib.mkForce { };
+ };
+
+ ci = devenv.shells.default;
+ };
+ };
+ };
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
new file mode 100644
index 0000000000..d1c15e474f
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
@@ -0,0 +1,11 @@
+package errors
+
+import "errors"
+
+func New(text string) error {
+ return errors.New(text)
+}
+
+func As(err error, target interface{}) bool {
+ return errors.As(err, target)
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
new file mode 100644
index 0000000000..d74e3a0b5a
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
@@ -0,0 +1,9 @@
+//go:build go1.20
+
+package errors
+
+import "errors"
+
+func Join(errs ...error) error {
+ return errors.Join(errs...)
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
new file mode 100644
index 0000000000..700b40229c
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
@@ -0,0 +1,61 @@
+//go:build !go1.20
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+// Join returns an error that wraps the given errors.
+// Any nil error values are discarded.
+// Join returns nil if every value in errs is nil.
+// The error formats as the concatenation of the strings obtained
+// by calling the Error method of each element of errs, with a newline
+// between each string.
+//
+// A non-nil error returned by Join implements the Unwrap() []error method.
+func Join(errs ...error) error {
+ n := 0
+ for _, err := range errs {
+ if err != nil {
+ n++
+ }
+ }
+ if n == 0 {
+ return nil
+ }
+ e := &joinError{
+ errs: make([]error, 0, n),
+ }
+ for _, err := range errs {
+ if err != nil {
+ e.errs = append(e.errs, err)
+ }
+ }
+ return e
+}
+
+type joinError struct {
+ errs []error
+}
+
+func (e *joinError) Error() string {
+ // Since Join returns nil if every value in errs is nil,
+ // e.errs cannot be empty.
+ if len(e.errs) == 1 {
+ return e.errs[0].Error()
+ }
+
+ b := []byte(e.errs[0].Error())
+ for _, err := range e.errs[1:] {
+ b = append(b, '\n')
+ b = append(b, err.Error()...)
+ }
+ // At this point, b has at least one byte '\n'.
+ // return unsafe.String(&b[0], len(b))
+ return string(b)
+}
+
+func (e *joinError) Unwrap() []error {
+ return e.errs
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
new file mode 100644
index 0000000000..e77e63ba38
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
@@ -0,0 +1,1620 @@
+// Package mapstructure exposes functionality to convert one arbitrary
+// Go type into another, typically to convert a map[string]interface{}
+// into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+//
+// The simplest function to start with is Decode.
+//
+// # Field Tags
+//
+// When decoding to a struct, mapstructure will use the field name by
+// default to perform the mapping. For example, if a struct has a field
+// "Username" then mapstructure will look for a key in the source value
+// of "username" (case insensitive).
+//
+// type User struct {
+// Username string
+// }
+//
+// You can change the behavior of mapstructure by using struct tags.
+// The default struct tag that mapstructure looks for is "mapstructure"
+// but you can customize it using DecoderConfig.
+//
+// # Renaming Fields
+//
+// To rename the key that mapstructure looks for, use the "mapstructure"
+// tag and set a value directly. For example, to change the "username" example
+// above to "user":
+//
+// type User struct {
+// Username string `mapstructure:"user"`
+// }
+//
+// # Embedded Structs and Squashing
+//
+// Embedded structs are treated as if they're another field with that name.
+// By default, the two structs below are equivalent when decoding with
+// mapstructure:
+//
+// type Person struct {
+// Name string
+// }
+//
+// type Friend struct {
+// Person
+// }
+//
+// type Friend struct {
+// Person Person
+// }
+//
+// This would require an input that looks like below:
+//
+// map[string]interface{}{
+// "person": map[string]interface{}{"name": "alice"},
+// }
+//
+// If your "person" value is NOT nested, then you can append ",squash" to
+// your tag value and mapstructure will treat it as if the embedded struct
+// were part of the struct directly. Example:
+//
+// type Friend struct {
+// Person `mapstructure:",squash"`
+// }
+//
+// Now the following input would be accepted:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// When decoding from a struct to a map, the squash tag squashes the struct
+// fields into a single map. Using the example structs from above:
+//
+// Friend{Person: Person{Name: "alice"}}
+//
+// Will be decoded into a map:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// DecoderConfig has a field that changes the behavior of mapstructure
+// to always squash embedded structs.
+//
+// # Remainder Values
+//
+// If there are any unmapped keys in the source value, mapstructure by
+// default will silently ignore them. You can error by setting ErrorUnused
+// in DecoderConfig. If you're using Metadata you can also maintain a slice
+// of the unused keys.
+//
+// You can also use the ",remain" suffix on your tag to collect all unused
+// values in a map. The field with this tag MUST be a map type and should
+// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
+// See example below:
+//
+// type Friend struct {
+// Name string
+// Other map[string]interface{} `mapstructure:",remain"`
+// }
+//
+// Given the input below, Other would be populated with the other
+// values that weren't used (everything but "name"):
+//
+// map[string]interface{}{
+// "name": "bob",
+// "address": "123 Maple St.",
+// }
+//
+// # Omit Empty Values
+//
+// When decoding from a struct to any other value, you may use the
+// ",omitempty" suffix on your tag to omit that value if it equates to
+// the zero value. The zero value of all types is specified in the Go
+// specification.
+//
+// For example, the zero type of a numeric type is zero ("0"). If the struct
+// field value is zero and a numeric type, the field is empty, and it won't
+// be encoded into the destination type.
+//
+// type Source struct {
+// Age int `mapstructure:",omitempty"`
+// }
+//
+// # Unexported fields
+//
+// Since unexported (private) struct fields cannot be set outside the package
+// where they are defined, the decoder will simply skip them.
+//
+// For this output type definition:
+//
+// type Exported struct {
+// private string // this unexported field will be skipped
+// Public string
+// }
+//
+// Using this map as input:
+//
+// map[string]interface{}{
+// "private": "I will be ignored",
+// "Public": "I made it through!",
+// }
+//
+// The following struct will be decoded:
+//
+// type Exported struct {
+// private: "" // field is left with an empty string (zero value)
+// Public: "I made it through!"
+// }
+//
+// # Other Configuration
+//
+// mapstructure is highly configurable. See the DecoderConfig struct
+// for other features and options that are supported.
+package mapstructure
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/go-viper/mapstructure/v2/internal/errors"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
+// DecodeHookFuncValue.
+// Values are a superset of Types (Values can return types), and Types are a
+// superset of Kinds (Types can return Kinds) and are generally a richer thing
+// to use, but Kinds are simpler if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
+// values.
+type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+ // DecodeHook, if set, will be called before any decoding and any
+ // type conversion (if WeaklyTypedInput is on). This lets you modify
+ // the values before they're set down onto the resulting struct. The
+ // DecodeHook is called for every map and value in the input. This means
+ // that if a struct has embedded fields with squash tags the decode hook
+ // is called only once with all of the input data, not once for each
+ // embedded struct.
+ //
+ // If an error is returned, the entire decode will fail with that error.
+ DecodeHook DecodeHookFunc
+
+ // If ErrorUnused is true, then it is an error for there to exist
+ // keys in the original map that were unused in the decoding process
+ // (extra keys).
+ ErrorUnused bool
+
+ // If ErrorUnset is true, then it is an error for there to exist
+ // fields in the result that were not set in the decoding process
+ // (extra fields). This only applies to decoding to a struct. This
+ // will affect all nested structs as well.
+ ErrorUnset bool
+
+ // ZeroFields, if set to true, will zero fields before writing them.
+ // For example, a map will be emptied before decoded values are put in
+ // it. If this is false, a map will be merged.
+ ZeroFields bool
+
+ // If WeaklyTypedInput is true, the decoder will make the following
+ // "weak" conversions:
+ //
+ // - bools to string (true = "1", false = "0")
+ // - numbers to string (base 10)
+ // - bools to int/uint (true = 1, false = 0)
+ // - strings to int/uint (base implied by prefix)
+ // - int to bool (true if value != 0)
+ // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+ // FALSE, false, False. Anything else is an error)
+ // - empty array = empty map and vice versa
+ // - negative numbers to overflowed uint values (base 10)
+ // - slice of maps to a merged map
+ // - single values are converted to slices if required. Each
+ // element is weakly decoded. For example: "4" can become []int{4}
+ // if the target type is an int slice.
+ //
+ WeaklyTypedInput bool
+
+ // Squash will squash embedded structs. A squash tag may also be
+ // added to an individual struct field using a tag. For example:
+ //
+ // type Parent struct {
+ // Child `mapstructure:",squash"`
+ // }
+ Squash bool
+
+ // Metadata is the struct that will contain extra metadata about
+ // the decoding. If this is nil, then no metadata will be tracked.
+ Metadata *Metadata
+
+ // Result is a pointer to the struct that will contain the decoded
+ // value.
+ Result interface{}
+
+ // The tag name that mapstructure reads for field names. This
+ // defaults to "mapstructure"
+ TagName string
+
+ // The option of the value in the tag that indicates a field should
+ // be squashed. This defaults to "squash".
+ SquashTagOption string
+
+ // IgnoreUntaggedFields ignores all struct fields without explicit
+ // TagName, comparable to `mapstructure:"-"` as default behaviour.
+ IgnoreUntaggedFields bool
+
+ // MatchName is the function used to match the map key to the struct
+ // field name or tag. Defaults to `strings.EqualFold`. This can be used
+ // to implement case-sensitive tag values, support snake casing, etc.
+ MatchName func(mapKey, fieldName string) bool
+
+ // DecodeNil, if set to true, will cause the DecodeHook (if present) to run
+ // even if the input is nil. This can be used to provide default values.
+ DecodeNil bool
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+ config *DecoderConfig
+ cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error)
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+ // Keys are the keys of the structure which were successfully decoded
+ Keys []string
+
+ // Unused is a slice of keys that were found in the raw value but
+ // weren't decoded since there was no matching field in the result interface
+ Unused []string
+
+ // Unset is a slice of field names that were found in the result interface
+ // but weren't set in the decoding process since there was no matching value
+ // in the input
+ Unset []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+ val := reflect.ValueOf(config.Result)
+ if val.Kind() != reflect.Ptr {
+ return nil, errors.New("result must be a pointer")
+ }
+
+ val = val.Elem()
+ if !val.CanAddr() {
+ return nil, errors.New("result must be addressable (a pointer)")
+ }
+
+ if config.Metadata != nil {
+ if config.Metadata.Keys == nil {
+ config.Metadata.Keys = make([]string, 0)
+ }
+
+ if config.Metadata.Unused == nil {
+ config.Metadata.Unused = make([]string, 0)
+ }
+
+ if config.Metadata.Unset == nil {
+ config.Metadata.Unset = make([]string, 0)
+ }
+ }
+
+ if config.TagName == "" {
+ config.TagName = "mapstructure"
+ }
+
+ if config.SquashTagOption == "" {
+ config.SquashTagOption = "squash"
+ }
+
+ if config.MatchName == nil {
+ config.MatchName = strings.EqualFold
+ }
+
+ result := &Decoder{
+ config: config,
+ }
+ if config.DecodeHook != nil {
+ result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook)
+ }
+
+ return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+ err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+
+ // Retain some of the original behavior when multiple errors ocurr
+ var joinedErr interface{ Unwrap() []error }
+ if errors.As(err, &joinedErr) {
+ return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err)
+ }
+
+ return err
+}
+
+// isNil returns true if the input is nil or a typed nil pointer.
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ val := reflect.ValueOf(input)
+ return val.Kind() == reflect.Ptr && val.IsNil()
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+ var (
+ inputVal = reflect.ValueOf(input)
+ outputKind = getKind(outVal)
+ decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil
+ )
+ if isNil(input) {
+ // Typed nils won't match the "input == nil" below, so reset input.
+ input = nil
+ }
+ if input == nil {
+ // If the data is nil, then we don't set anything, unless ZeroFields is set
+ // to true.
+ if d.config.ZeroFields {
+ outVal.Set(reflect.Zero(outVal.Type()))
+
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ }
+ if !decodeNil {
+ return nil
+ }
+ }
+ if !inputVal.IsValid() {
+ if !decodeNil {
+ // If the input value is invalid, then we just set the value
+ // to be the zero value.
+ outVal.Set(reflect.Zero(outVal.Type()))
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ return nil
+ }
+ // Hooks need a valid inputVal, so reset it to zero value of outVal type.
+ switch outputKind {
+ case reflect.Struct, reflect.Map:
+ var mapVal map[string]interface{}
+ inputVal = reflect.ValueOf(mapVal) // create nil map pointer
+ case reflect.Slice, reflect.Array:
+ var sliceVal []interface{}
+ inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer
+ default:
+ inputVal = reflect.Zero(outVal.Type())
+ }
+ }
+
+ if d.cachedDecodeHook != nil {
+ // We have a DecodeHook, so let's pre-process the input.
+ var err error
+ input, err = d.cachedDecodeHook(inputVal, outVal)
+ if err != nil {
+ return fmt.Errorf("error decoding '%s': %w", name, err)
+ }
+ }
+ if isNil(input) {
+ return nil
+ }
+
+ var err error
+ addMetaKey := true
+ switch outputKind {
+ case reflect.Bool:
+ err = d.decodeBool(name, input, outVal)
+ case reflect.Interface:
+ err = d.decodeBasic(name, input, outVal)
+ case reflect.String:
+ err = d.decodeString(name, input, outVal)
+ case reflect.Int:
+ err = d.decodeInt(name, input, outVal)
+ case reflect.Uint:
+ err = d.decodeUint(name, input, outVal)
+ case reflect.Float32:
+ err = d.decodeFloat(name, input, outVal)
+ case reflect.Complex64:
+ err = d.decodeComplex(name, input, outVal)
+ case reflect.Struct:
+ err = d.decodeStruct(name, input, outVal)
+ case reflect.Map:
+ err = d.decodeMap(name, input, outVal)
+ case reflect.Ptr:
+ addMetaKey, err = d.decodePtr(name, input, outVal)
+ case reflect.Slice:
+ err = d.decodeSlice(name, input, outVal)
+ case reflect.Array:
+ err = d.decodeArray(name, input, outVal)
+ case reflect.Func:
+ err = d.decodeFunc(name, input, outVal)
+ default:
+ // If we reached this point then we weren't able to decode it
+ return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+ }
+
+ // If we reached here, then we successfully decoded SOMETHING, so
+ // mark the key as used if we're tracking metainput.
+ if addMetaKey && d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+
+ return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+ if val.IsValid() && val.Elem().IsValid() {
+ elem := val.Elem()
+
+ // If we can't address this element, then its not writable. Instead,
+ // we make a copy of the value (which is a pointer and therefore
+ // writable), decode into that, and replace the whole value.
+ copied := false
+ if !elem.CanAddr() {
+ copied = true
+
+ // Make *T
+ copy := reflect.New(elem.Type())
+
+ // *T = elem
+ copy.Elem().Set(elem)
+
+ // Set elem so we decode into it
+ elem = copy
+ }
+
+ // Decode. If we have an error then return. We also return right
+ // away if we're not a copy because that means we decoded directly.
+ if err := d.decode(name, data, elem); err != nil || !copied {
+ return err
+ }
+
+ // If we're a copy, we need to set te final result
+ val.Set(elem.Elem())
+ return nil
+ }
+
+ dataVal := reflect.ValueOf(data)
+
+ // If the input data is a pointer, and the assigned type is the dereference
+ // of that exact pointer, then indirect it so that we can assign it.
+ // Example: *string to string
+ if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
+ dataVal = reflect.Indirect(dataVal)
+ }
+
+ if !dataVal.IsValid() {
+ dataVal = reflect.Zero(val.Type())
+ }
+
+ dataValType := dataVal.Type()
+ if !dataValType.AssignableTo(val.Type()) {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got '%s'",
+ name, val.Type(), dataValType)
+ }
+
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ converted := true
+ switch {
+ case dataKind == reflect.String:
+ val.SetString(dataVal.String())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetString("1")
+ } else {
+ val.SetString("0")
+ }
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+ dataKind == reflect.Array && d.config.WeaklyTypedInput:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ switch elemKind {
+ case reflect.Uint8:
+ var uints []uint8
+ if dataKind == reflect.Array {
+ uints = make([]uint8, dataVal.Len(), dataVal.Len())
+ for i := range uints {
+ uints[i] = dataVal.Index(i).Interface().(uint8)
+ }
+ } else {
+ uints = dataVal.Interface().([]uint8)
+ }
+ val.SetString(string(uints))
+ default:
+ converted = false
+ }
+ default:
+ converted = false
+ }
+
+ if !converted {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetInt(dataVal.Int())
+ case dataKind == reflect.Uint:
+ val.SetInt(int64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetInt(int64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetInt(1)
+ } else {
+ val.SetInt(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseInt(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetInt(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Int64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetInt(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ i := dataVal.Int()
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
+ case dataKind == reflect.Uint:
+ val.SetUint(dataVal.Uint())
+ case dataKind == reflect.Float32:
+ f := dataVal.Float()
+ if f < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %f overflows uint",
+ name, f)
+ }
+ val.SetUint(uint64(f))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetUint(1)
+ } else {
+ val.SetUint(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseUint(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetUint(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := strconv.ParseUint(string(jn), 0, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetUint(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Bool:
+ val.SetBool(dataVal.Bool())
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Int() != 0)
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Uint() != 0)
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Float() != 0)
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ b, err := strconv.ParseBool(dataVal.String())
+ if err == nil {
+ val.SetBool(b)
+ } else if dataVal.String() == "" {
+ val.SetBool(false)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'",
+ name, val, dataVal, data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetFloat(float64(dataVal.Int()))
+ case dataKind == reflect.Uint:
+ val.SetFloat(float64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetFloat(dataVal.Float())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetFloat(1)
+ } else {
+ val.SetFloat(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ f, err := strconv.ParseFloat(str, val.Type().Bits())
+ if err == nil {
+ val.SetFloat(f)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Float64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetFloat(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Complex64:
+ val.SetComplex(dataVal.Complex())
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // By default we overwrite keys in the current map
+ valMap := val
+
+ // If the map is nil or we're purposely zeroing fields, make a new map
+ if valMap.IsNil() || d.config.ZeroFields {
+ // Make a new map to hold our result
+ mapType := reflect.MapOf(valKeyType, valElemType)
+ valMap = reflect.MakeMap(mapType)
+ }
+
+ dataVal := reflect.ValueOf(data)
+
+ // Resolve any levels of indirection
+ for dataVal.Kind() == reflect.Pointer {
+ dataVal = reflect.Indirect(dataVal)
+ }
+
+ // Check input type and based on the input type jump to the proper func
+ switch dataVal.Kind() {
+ case reflect.Map:
+ return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+ case reflect.Struct:
+ return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+ case reflect.Array, reflect.Slice:
+ if d.config.WeaklyTypedInput {
+ return d.decodeMapFromSlice(name, dataVal, val, valMap)
+ }
+
+ fallthrough
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ // Special case for BC reasons (covered by tests)
+ if dataVal.Len() == 0 {
+ val.Set(valMap)
+ return nil
+ }
+
+ for i := 0; i < dataVal.Len(); i++ {
+ err := d.decode(
+ name+"["+strconv.Itoa(i)+"]",
+ dataVal.Index(i).Interface(), val)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // Accumulate errors
+ var errs []error
+
+ // If the input data is empty, then we just match what the input data is.
+ if dataVal.Len() == 0 {
+ if dataVal.IsNil() {
+ if !val.IsNil() {
+ val.Set(dataVal)
+ }
+ } else {
+ // Set to empty allocated value
+ val.Set(valMap)
+ }
+
+ return nil
+ }
+
+ for _, k := range dataVal.MapKeys() {
+ fieldName := name + "[" + k.String() + "]"
+
+ // First decode the key into the proper type
+ currentKey := reflect.Indirect(reflect.New(valKeyType))
+ if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ // Next decode the data into the proper type
+ v := dataVal.MapIndex(k).Interface()
+ currentVal := reflect.Indirect(reflect.New(valElemType))
+ if err := d.decode(fieldName, v, currentVal); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ valMap.SetMapIndex(currentKey, currentVal)
+ }
+
+ // Set the built up map to the value
+ val.Set(valMap)
+
+ return errors.Join(errs...)
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ typ := dataVal.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ // Get the StructField first since this is a cheap operation. If the
+ // field is unexported, then ignore it.
+ f := typ.Field(i)
+ if f.PkgPath != "" {
+ continue
+ }
+
+ // Next get the actual value of this field and verify it is assignable
+ // to the map value.
+ v := dataVal.Field(i)
+ if !v.Type().AssignableTo(valMap.Type().Elem()) {
+ return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+ }
+
+ tagValue := f.Tag.Get(d.config.TagName)
+ keyName := f.Name
+
+ if tagValue == "" && d.config.IgnoreUntaggedFields {
+ continue
+ }
+
+ // If Squash is set in the config, we squash the field down.
+ squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
+
+ v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
+
+ // Determine the name of the key in the map
+ if index := strings.Index(tagValue, ","); index != -1 {
+ if tagValue[:index] == "-" {
+ continue
+ }
+ // If "omitempty" is specified in the tag, it ignores empty values.
+ if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
+ continue
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption)
+ if squash {
+ // When squashing, the embedded type can be a pointer to a struct.
+ if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
+ v = v.Elem()
+ }
+
+ // The final type must be a struct
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ }
+ } else {
+ if strings.Index(tagValue[index+1:], "remain") != -1 {
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type())
+ }
+
+ ptr := v.MapRange()
+ for ptr.Next() {
+ valMap.SetMapIndex(ptr.Key(), ptr.Value())
+ }
+ continue
+ }
+ }
+ if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
+ keyName = keyNameTagValue
+ }
+ } else if len(tagValue) > 0 {
+ if tagValue == "-" {
+ continue
+ }
+ keyName = tagValue
+ }
+
+ switch v.Kind() {
+ // this is an embedded struct, so handle it differently
+ case reflect.Struct:
+ x := reflect.New(v.Type())
+ x.Elem().Set(v)
+
+ vType := valMap.Type()
+ vKeyType := vType.Key()
+ vElemType := vType.Elem()
+ mType := reflect.MapOf(vKeyType, vElemType)
+ vMap := reflect.MakeMap(mType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(vMap.Type())
+ reflect.Indirect(addrVal).Set(vMap)
+
+ err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
+ if err != nil {
+ return err
+ }
+
+ // the underlying map may have been completely overwritten so pull
+ // it indirectly out of the enclosing value.
+ vMap = reflect.Indirect(addrVal)
+
+ if squash {
+ for _, k := range vMap.MapKeys() {
+ valMap.SetMapIndex(k, vMap.MapIndex(k))
+ }
+ } else {
+ valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+ }
+
+ default:
+ valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+ }
+ }
+
+ if val.CanAddr() {
+ val.Set(valMap)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
+ // If the input data is nil, then we want to just set the output
+ // pointer to be nil as well.
+ isNil := data == nil
+ if !isNil {
+ switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+ case reflect.Chan,
+ reflect.Func,
+ reflect.Interface,
+ reflect.Map,
+ reflect.Ptr,
+ reflect.Slice:
+ isNil = v.IsNil()
+ }
+ }
+ if isNil {
+ if !val.IsNil() && val.CanSet() {
+ nilValue := reflect.New(val.Type()).Elem()
+ val.Set(nilValue)
+ }
+
+ return true, nil
+ }
+
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ valType := val.Type()
+ valElemType := valType.Elem()
+ if val.CanSet() {
+ realVal := val
+ if realVal.IsNil() || d.config.ZeroFields {
+ realVal = reflect.New(valElemType)
+ }
+
+ if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+ return false, err
+ }
+
+ val.Set(realVal)
+ } else {
+ if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if val.Type() != dataVal.Type() {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ sliceType := reflect.SliceOf(valElemType)
+
+ // If we have a non array/slice type then we first attempt to convert.
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Slice and array we use the normal logic
+ case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+ break
+
+ // Empty maps turn into empty slices
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ }
+ // Create slice of maps of other sizes
+ return d.decodeSlice(name, []interface{}{data}, val)
+
+ case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+ return d.decodeSlice(name, []byte(dataVal.String()), val)
+
+ // All other types we try to convert to the slice type
+ // and "lift" it into it. i.e. a string becomes a string slice.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeSlice(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
+
+ // If the input value is nil, then don't allocate since empty != nil
+ if dataValKind != reflect.Array && dataVal.IsNil() {
+ return nil
+ }
+
+ valSlice := val
+ if valSlice.IsNil() || d.config.ZeroFields {
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ } else if valSlice.Len() > dataVal.Len() {
+ valSlice = valSlice.Slice(0, dataVal.Len())
+ }
+
+ // Accumulate any errors
+ var errs []error
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ for valSlice.Len() <= i {
+ valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+ }
+ currentField := valSlice.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, set the value to the slice we built up
+ val.Set(valSlice)
+
+ return errors.Join(errs...)
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+ valArray := val
+
+ if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty arrays
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.Zero(arrayType))
+ return nil
+ }
+
+ // All other types we try to convert to the array type
+ // and "lift" it into it. i.e. a string becomes a string array.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeArray(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+ }
+ if dataVal.Len() > arrayType.Len() {
+ return fmt.Errorf(
+ "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+ }
+
+ // Make a new array to hold our result, same size as the original data.
+ valArray = reflect.New(arrayType).Elem()
+ }
+
+ // Accumulate any errors
+ var errs []error
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ currentField := valArray.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, set the value to the array we built up
+ val.Set(valArray)
+
+ return errors.Join(errs...)
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+ // If the type of the value to write to and the data match directly,
+ // then we just set it directly instead of recursing into the structure.
+ if dataVal.Type() == val.Type() {
+ val.Set(dataVal)
+ return nil
+ }
+
+ dataValKind := dataVal.Kind()
+ switch dataValKind {
+ case reflect.Map:
+ return d.decodeStructFromMap(name, dataVal, val)
+
+ case reflect.Struct:
+ // Not the most efficient way to do this but we can optimize later if
+ // we want to. To convert from struct to struct we go to map first
+ // as an intermediary.
+
+ // Make a new map to hold our result
+ mapType := reflect.TypeOf((map[string]interface{})(nil))
+ mval := reflect.MakeMap(mapType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(mval.Type())
+
+ reflect.Indirect(addrVal).Set(mval)
+ if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
+ return err
+ }
+
+ result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
+ return result
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+ dataValType := dataVal.Type()
+ if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+ return fmt.Errorf(
+ "'%s' needs a map with string keys, has '%s' keys",
+ name, dataValType.Key().Kind())
+ }
+
+ dataValKeys := make(map[reflect.Value]struct{})
+ dataValKeysUnused := make(map[interface{}]struct{})
+ for _, dataValKey := range dataVal.MapKeys() {
+ dataValKeys[dataValKey] = struct{}{}
+ dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+ }
+
+ targetValKeysUnused := make(map[interface{}]struct{})
+
+ var errs []error
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = val
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+
+ // remainField is set to a valid field set with the "remain" tag if
+ // we are keeping track of remaining values.
+ var remainField *field
+
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ fieldVal := structVal.Field(i)
+ if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
+ // Handle embedded struct pointers as embedded structs.
+ fieldVal = fieldVal.Elem()
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
+ remain := false
+
+ // We always parse the tags cause we're looking for other tags too
+ tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == d.config.SquashTagOption {
+ squash = true
+ break
+ }
+
+ if tag == "remain" {
+ remain = true
+ break
+ }
+ }
+
+ if squash {
+ switch fieldVal.Kind() {
+ case reflect.Struct:
+ structs = append(structs, fieldVal)
+ case reflect.Interface:
+ if !fieldVal.IsNil() {
+ structs = append(structs, fieldVal.Elem().Elem())
+ }
+ default:
+ errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
+ }
+ continue
+ }
+
+ // Build our field
+ if remain {
+ remainField = &field{fieldType, fieldVal}
+ } else {
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, fieldVal})
+ }
+ }
+ }
+
+ // for fieldType, field := range fields {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(d.config.TagName)
+ if tagValue == "" && d.config.IgnoreUntaggedFields {
+ continue
+ }
+ tagValue = strings.SplitN(tagValue, ",", 2)[0]
+ if tagValue != "" {
+ fieldName = tagValue
+ }
+
+ rawMapKey := reflect.ValueOf(fieldName)
+ rawMapVal := dataVal.MapIndex(rawMapKey)
+ if !rawMapVal.IsValid() {
+ // Do a slower search by iterating over each key and
+ // doing case-insensitive search.
+ for dataValKey := range dataValKeys {
+ mK, ok := dataValKey.Interface().(string)
+ if !ok {
+ // Not a string key
+ continue
+ }
+
+ if d.config.MatchName(mK, fieldName) {
+ rawMapKey = dataValKey
+ rawMapVal = dataVal.MapIndex(dataValKey)
+ break
+ }
+ }
+
+ if !rawMapVal.IsValid() {
+ // There was no matching key in the map for the value in
+ // the struct. Remember it for potential errors and metadata.
+ targetValKeysUnused[fieldName] = struct{}{}
+ continue
+ }
+ }
+
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
+ // If the name is empty string, then we're at the root, and we
+ // don't dot-join the fields.
+ if name != "" {
+ fieldName = name + "." + fieldName
+ }
+
+ if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have a "remain"-tagged field and we have unused keys then
+ // we put the unused keys directly into the remain field.
+ if remainField != nil && len(dataValKeysUnused) > 0 {
+ // Build a map of only the unused values
+ remain := map[interface{}]interface{}{}
+ for key := range dataValKeysUnused {
+ remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
+ }
+
+ // Decode it as-if we were just decoding this map onto our map.
+ if err := d.decodeMap(name, remain, remainField.val); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Set the map to nil so we have none so that the next check will
+ // not error (ErrorUnused)
+ dataValKeysUnused = nil
+ }
+
+ if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+ keys := make([]string, 0, len(dataValKeysUnused))
+ for rawKey := range dataValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+ errs = append(errs, err)
+ }
+
+ if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
+ keys := make([]string, 0, len(targetValKeysUnused))
+ for rawKey := range targetValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
+ errs = append(errs, err)
+ }
+
+ if err := errors.Join(errs...); err != nil {
+ return err
+ }
+
+ // Add the unused keys to the list of unused keys if we're tracking metadata
+ if d.config.Metadata != nil {
+ for rawKey := range dataValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+ }
+ for rawKey := range targetValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
+ }
+ }
+
+ return nil
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch getKind(v) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+ kind := val.Kind()
+
+ switch {
+ case kind >= reflect.Int && kind <= reflect.Int64:
+ return reflect.Int
+ case kind >= reflect.Uint && kind <= reflect.Uint64:
+ return reflect.Uint
+ case kind >= reflect.Float32 && kind <= reflect.Float64:
+ return reflect.Float32
+ case kind >= reflect.Complex64 && kind <= reflect.Complex128:
+ return reflect.Complex64
+ default:
+ return kind
+ }
+}
+
+func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
+ for i := 0; i < typ.NumField(); i++ {
+ f := typ.Field(i)
+ if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
+ return true
+ }
+ if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
+ return true
+ }
+ }
+ return false
+}
+
+func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return v
+ }
+ deref := v.Elem()
+ derefT := deref.Type()
+ if isStructTypeConvertibleToMap(derefT, true, tagName) {
+ return deref
+ }
+ return v
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
new file mode 100644
index 0000000000..d0913fff6c
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
@@ -0,0 +1,44 @@
+//go:build !go1.20
+
+package mapstructure
+
+import "reflect"
+
+func isComparable(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Invalid:
+ return false
+
+ case reflect.Array:
+ switch v.Type().Elem().Kind() {
+ case reflect.Interface, reflect.Array, reflect.Struct:
+ for i := 0; i < v.Type().Len(); i++ {
+ // if !v.Index(i).Comparable() {
+ if !isComparable(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return v.Type().Comparable()
+
+ case reflect.Interface:
+ // return v.Elem().Comparable()
+ return isComparable(v.Elem())
+
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ return false
+
+ // if !v.Field(i).Comparable() {
+ if !isComparable(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ return v.Type().Comparable()
+ }
+}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
new file mode 100644
index 0000000000..f8255a1b17
--- /dev/null
+++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
@@ -0,0 +1,10 @@
+//go:build go1.20
+
+package mapstructure
+
+import "reflect"
+
+// TODO: remove once we drop support for Go <1.20
+func isComparable(v reflect.Value) bool {
+ return v.Comparable()
+}
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
index 859a950867..73fe513468 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
+++ b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
@@ -1,3 +1,29 @@
+## v2.7.0 (2025-04-03)
+
+* [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID
+* [GH-3325](https://github.com/gophercloud/gophercloud/pull/3325) [v2] Switch to a version of gocovmerge compatible with go 1.22
+* [GH-3327](https://github.com/gophercloud/gophercloud/pull/3327) Merge pull request #3209 from shiftstack/proper-service-discovery
+* [GH-3328](https://github.com/gophercloud/gophercloud/pull/3328) [v2] Improve support for `network standard-attr-*` extensions
+* [GH-3330](https://github.com/gophercloud/gophercloud/pull/3330) [v2] Enhance Snapshot struct and add ListDetail function in V3 blockstorage
+* [GH-3333](https://github.com/gophercloud/gophercloud/pull/3333) [v2] vpnaas: add support for more ciphers (auth, encryption, pfs modes)
+* [GH-3334](https://github.com/gophercloud/gophercloud/pull/3334) [v2] Added support for VIF's in Baremetal
+* [GH-3335](https://github.com/gophercloud/gophercloud/pull/3335) [v2] Baremetal virtual media Get API
+
+## v2.6.0 (2025-03-03)
+
+* [GH-3309](https://github.com/gophercloud/gophercloud/pull/3309) Backport: Added support for hypervisor_hostname to v2
+
+## v2.5.0 (2025-02-11)
+
+* [GH-3278](https://github.com/gophercloud/gophercloud/pull/3278) [v2] test: Ensure that randomly created secgroup rules don't conflict
+* [GH-3287](https://github.com/gophercloud/gophercloud/pull/3287) [v2] Fix panic in ExtractIntoStructPtr
+* [GH-3288](https://github.com/gophercloud/gophercloud/pull/3288) [v2] Fix JSON field name hints in APIVersion structs
+* [GH-3292](https://github.com/gophercloud/gophercloud/pull/3292) [v2] Add permissions to the label-issue workflow
+* [GH-3294](https://github.com/gophercloud/gophercloud/pull/3294) [v2] Add support for zone sharing in DNS v2
+* [GH-3296](https://github.com/gophercloud/gophercloud/pull/3296) build(deps): bump golang.org/x/crypto from 0.30.0 to 0.31.0
+* [GH-3297](https://github.com/gophercloud/gophercloud/pull/3297) [v2] build(deps): bump golang.org/x/crypto from 0.31.0 to 0.32.0
+* [GH-3298](https://github.com/gophercloud/gophercloud/pull/3298) [v2] build(deps): bump golang.org/x/crypto from 0.32.0 to 0.33.0
+
## v2.4.0 (2024-12-18)
* [GH-3270](https://github.com/gophercloud/gophercloud/pull/3270) [v2] SG rules: implement bulk create
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
index 2fbc3c97f1..8818e769b8 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/endpoint_search.go
@@ -1,5 +1,7 @@
package gophercloud
+import "slices"
+
// Availability indicates to whom a specific service endpoint is accessible:
// the internet at large, internal networks only, or only to administrators.
// Different identity services use different terminology for these. Identity v2
@@ -22,6 +24,31 @@ const (
AvailabilityInternal Availability = "internal"
)
+// ServiceTypeAliases contains a mapping of service types to any aliases, as
+// defined by the OpenStack Service Types Authority. Only service types that
+// we support are included.
+var ServiceTypeAliases = map[string][]string{
+ "application-container": {"container"},
+ "baremetal": {"bare-metal"},
+ "baremetal-introspection": {},
+ "block-storage": {"block-store", "volume", "volumev2", "volumev3"},
+ "compute": {},
+ "container-infrastructure-management": {"container-infrastructure", "container-infra"},
+ "database": {},
+ "dns": {},
+ "identity": {},
+ "image": {},
+ "key-manager": {},
+ "load-balancer": {},
+ "message": {"messaging"},
+ "networking": {},
+ "object-store": {},
+ "orchestration": {},
+ "placement": {},
+ "shared-file-system": {"sharev2", "share"},
+ "workflow": {"workflowv2"},
+}
+
// EndpointOpts specifies search criteria used by queries against an
// OpenStack service catalog. The options must contain enough information to
// unambiguously identify one, and only one, endpoint within the catalog.
@@ -30,8 +57,9 @@ const (
// package, like "openstack.NewComputeV2()".
type EndpointOpts struct {
// Type [required] is the service type for the client (e.g., "compute",
- // "object-store"). Generally, this will be supplied by the service client
- // function, but a user-given value will be honored if provided.
+ // "object-store"), as defined by the OpenStack Service Types Authority.
+ // This will generally be supplied by the service client function, but a
+ // user-given value will be honored if provided.
Type string
// Name [optional] is the service name for the client (e.g., "nova") as it
@@ -39,6 +67,13 @@ type EndpointOpts struct {
// different Name, which is why both Type and Name are sometimes needed.
Name string
+ // Aliases [optional] is the set of aliases of the service type (e.g.
+ // "volumev2"/"volumev3", "volume" and "block-store" for the
+ // "block-storage" service type), as defined by the OpenStack Service Types
+ // Authority. As with Type, this will generally be supplied by the service
+ // client function, but a user-given value will be honored if provided.
+ Aliases []string
+
// Region [required] is the geographic region in which the endpoint resides,
// generally specifying which datacenter should house your resources.
// Required only for services that span multiple regions.
@@ -73,4 +108,26 @@ func (eo *EndpointOpts) ApplyDefaults(t string) {
if eo.Availability == "" {
eo.Availability = AvailabilityPublic
}
+ if len(eo.Aliases) == 0 {
+ if aliases, ok := ServiceTypeAliases[eo.Type]; ok {
+ // happy path: user requested a service type by its official name
+ eo.Aliases = aliases
+ } else {
+ // unhappy path: user requested a service type by its alias or an
+ // invalid/unsupported service type
+ // TODO(stephenfin): This should probably be an error in v3
+ for t, aliases := range ServiceTypeAliases {
+ if slices.Contains(aliases, eo.Type) {
+ // we intentionally override the service type, even if it
+ // was explicitly requested by the user
+ eo.Type = t
+ eo.Aliases = aliases
+ }
+ }
+ }
+ }
+}
+
+func (eo *EndpointOpts) Types() []string {
+ return append([]string{eo.Type}, eo.Aliases...)
}
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
index 43b569d3b4..122a3ee699 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go
@@ -344,6 +344,7 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp
}, nil
}
+// TODO(stephenfin): Allow passing aliases to all New${SERVICE}V${VERSION} methods in v3
func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) {
sc := new(gophercloud.ServiceClient)
eo.ApplyDefaults(clientType)
@@ -393,6 +394,7 @@ func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpt
return sc, err
}
+// TODO(stephenfin): Remove this in v3. We no longer support the V1 Block Storage service.
// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1
// block storage service.
func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
@@ -402,17 +404,17 @@ func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoi
// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2
// block storage service.
func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "volumev2")
+ return initClientOpts(client, eo, "block-storage")
}
// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service.
func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "volumev3")
+ return initClientOpts(client, eo, "block-storage")
}
// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service.
func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "sharev2")
+ return initClientOpts(client, eo, "shared-file-system")
}
// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1
@@ -457,14 +459,14 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi
// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging
// service.
func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- sc, err := initClientOpts(client, eo, "messaging")
+ sc, err := initClientOpts(client, eo, "message")
sc.MoreHeaders = map[string]string{"Client-ID": clientID}
return sc, err
}
// NewContainerV1 creates a ServiceClient that may be used with v1 container package
func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "container")
+ return initClientOpts(client, eo, "application-container")
}
// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key
@@ -478,12 +480,12 @@ func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.Endpoint
// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management
// package.
func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "container-infra")
+ return initClientOpts(client, eo, "container-infrastructure-management")
}
// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package.
func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
- return initClientOpts(client, eo, "workflowv2")
+ return initClientOpts(client, eo, "workflow")
}
// NewPlacementV1 creates a ServiceClient that may be used with the placement package.
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
index dd3b132d1d..44e8cccaeb 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/requests.go
@@ -508,6 +508,9 @@ type CreateOpts struct {
// DiskConfig [optional] controls how the created server's disk is partitioned.
DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"`
+
+ // HypervisorHostname is the name of the hypervisor to which the server is scheduled.
+ HypervisorHostname string `json:"hypervisor_hostname,omitempty"`
}
// ToServerCreateMap assembles a request body based on the contents of a
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
index 2cdbd3e7f7..14cff0d755 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/endpoint_location.go
@@ -1,6 +1,8 @@
package openstack
import (
+ "slices"
+
"github.com/gophercloud/gophercloud/v2"
tokens2 "github.com/gophercloud/gophercloud/v2/openstack/identity/v2/tokens"
tokens3 "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/tokens"
@@ -20,7 +22,7 @@ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpt
// Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided.
var endpoints = make([]tokens2.Endpoint, 0, 1)
for _, entry := range catalog.Entries {
- if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) {
+ if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
for _, endpoint := range entry.Endpoints {
if opts.Region == "" || endpoint.Region == opts.Region {
endpoints = append(endpoints, endpoint)
@@ -74,7 +76,7 @@ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpt
// Name if provided, and Region if provided.
var endpoints = make([]tokens3.Endpoint, 0, 1)
for _, entry := range catalog.Entries {
- if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) {
+ if (slices.Contains(opts.Types(), entry.Type)) && (opts.Name == "" || entry.Name == opts.Name) {
for _, endpoint := range entry.Endpoints {
if opts.Availability != gophercloud.AvailabilityAdmin &&
opts.Availability != gophercloud.AvailabilityPublic &&
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go
index a3afb0403c..be8949d693 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/requests.go
@@ -2,6 +2,7 @@ package floatingips
import (
"context"
+ "fmt"
"github.com/gophercloud/gophercloud/v2"
"github.com/gophercloud/gophercloud/v2/pagination"
@@ -37,6 +38,7 @@ type ListOpts struct {
TagsAny string `q:"tags-any"`
NotTags string `q:"not-tags"`
NotTagsAny string `q:"not-tags-any"`
+ RevisionNumber *int `q:"revision_number"`
}
// ToNetworkListQuery formats a ListOpts into a query string.
@@ -144,6 +146,11 @@ type UpdateOpts struct {
Description *string `json:"description,omitempty"`
PortID *string `json:"port_id,omitempty"`
FixedIP string `json:"fixed_ip_address,omitempty"`
+
+ // RevisionNumber implements extension:standard-attr-revisions. If != "" it
+ // will set revision_number=%s. If the revision number does not match, the
+ // update will fail.
+ RevisionNumber *int `json:"-" h:"If-Match"`
}
// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder
@@ -171,8 +178,19 @@ func Update(ctx context.Context, c *gophercloud.ServiceClient, id string, opts U
r.Err = err
return
}
+ h, err := gophercloud.BuildHeaders(opts)
+ if err != nil {
+ r.Err = err
+ return
+ }
+ for k := range h {
+ if k == "If-Match" {
+ h[k] = fmt.Sprintf("revision_number=%s", h[k])
+ }
+ }
resp, err := c.Put(ctx, resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{
- OkCodes: []int{200},
+ MoreHeaders: h,
+ OkCodes: []int{200},
})
_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
return
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go
index 50740ebf30..7ea6160032 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go
@@ -56,6 +56,9 @@ type FloatingIP struct {
// Tags optionally set via extensions/attributestags
Tags []string `json:"tags"`
+
+ // RevisionNumber optionally set via extensions/standard-attr-revisions
+ RevisionNumber int `json:"revision_number"`
}
func (r *FloatingIP) UnmarshalJSON(b []byte) error {
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go
index 218c2897f7..bfff2dffb2 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/requests.go
@@ -41,6 +41,7 @@ type ListOpts struct {
TagsAny string `q:"tags-any"`
NotTags string `q:"not-tags"`
NotTagsAny string `q:"not-tags-any"`
+ RevisionNumber *int `q:"revision_number"`
SecurityGroups []string `q:"security_groups"`
FixedIPs []FixedIPOpts
}
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
index ad3edc92d6..52fcd38ab3 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go
@@ -13,7 +13,7 @@ import (
// DefaultUserAgent is the default User-Agent string set in the request header.
const (
- DefaultUserAgent = "gophercloud/v2.4.0"
+ DefaultUserAgent = "gophercloud/v2.7.0"
DefaultMaxBackoffRetries = 60
)
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/results.go b/vendor/github.com/gophercloud/gophercloud/v2/results.go
index 9e6f630abb..b12c15a026 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/results.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/results.go
@@ -184,10 +184,19 @@ func (r Result) ExtractIntoStructPtr(to any, label string) error {
return r.Err
}
+ if to == nil {
+ return fmt.Errorf("Expected pointer, got %T", to)
+ }
+
t := reflect.TypeOf(to)
if k := t.Kind(); k != reflect.Ptr {
return fmt.Errorf("Expected pointer, got %v", k)
}
+
+ if reflect.ValueOf(to).IsNil() {
+ return fmt.Errorf("Expected pointer, got %T", to)
+ }
+
switch t.Elem().Kind() {
case reflect.Struct:
return r.extractIntoPtr(to, label)
@@ -210,10 +219,19 @@ func (r Result) ExtractIntoSlicePtr(to any, label string) error {
return r.Err
}
+ if to == nil {
+ return fmt.Errorf("Expected pointer, got %T", to)
+ }
+
t := reflect.TypeOf(to)
if k := t.Kind(); k != reflect.Ptr {
return fmt.Errorf("Expected pointer, got %v", k)
}
+
+ if reflect.ValueOf(to).IsNil() {
+ return fmt.Errorf("Expected pointer, got %T", to)
+ }
+
switch t.Elem().Kind() {
case reflect.Slice:
return r.extractIntoPtr(to, label)
diff --git a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
index 11b80108c3..c1f9f41d4d 100644
--- a/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
+++ b/vendor/github.com/gophercloud/gophercloud/v2/service_client.go
@@ -115,13 +115,17 @@ func (client *ServiceClient) Head(ctx context.Context, url string, opts *Request
}
func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
+ serviceType := client.Type
+
switch client.Type {
case "compute":
opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion
- case "sharev2":
+ case "shared-file-system", "sharev2", "share":
opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion
- case "volume":
+ case "block-storage", "block-store", "volume", "volumev3":
opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion
+ // cinder should accept block-storage but (as of Dalmatian) does not
+ serviceType = "volume"
case "baremetal":
opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion
case "baremetal-introspection":
@@ -129,7 +133,7 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
}
if client.Type != "" {
- opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion
+ opts.MoreHeaders["OpenStack-API-Version"] = serviceType + " " + client.Microversion
}
}
diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
new file mode 100644
index 0000000000..6d48174bfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
@@ -0,0 +1,64 @@
+# 1.7.0 (May 24, 2024)
+
+ENHANCEMENTS:
+
+- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91))
+- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133))
+
+INTERNAL:
+
+- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115))
+- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105))
+- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116))
+- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111))
+- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112))
+- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103))
+- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107))
+- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124))
+- update readme ([#104](https://github.com/hashicorp/go-version/pull/104))
+
+# 1.6.0 (June 28, 2022)
+
+FEATURES:
+
+- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100))
+
+# 1.5.0 (May 18, 2022)
+
+FEATURES:
+
+- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95))
+- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93))
+
+# 1.4.0 (January 5, 2022)
+
+FEATURES:
+
+ - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87))
+ - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88))
+
+# 1.3.0 (March 31, 2021)
+
+Please note that CHANGELOG.md does not exist in the source code prior to this release.
+
+FEATURES:
+ - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85))
+
+# 1.2.1 (June 17, 2020)
+
+BUG FIXES:
+ - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73))
+
+# 1.2.0 (April 23, 2019)
+
+FEATURES:
+ - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53))
+
+# 1.1.0 (Jan 07, 2019)
+
+FEATURES:
+ - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45))
+
+# 1.0.0 (August 24, 2018)
+
+Initial release.
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644
index 0000000000..1409d6ab92
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -0,0 +1,356 @@
+Copyright (c) 2014 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644
index 0000000000..4b7806cd96
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -0,0 +1,66 @@
+# Versioning Library for Go
+
+[](https://godoc.org/github.com/hashicorp/go-version)
+
+go-version is a library for parsing versions and version constraints,
+and verifying versions against a set of constraints. go-version
+can sort a collection of versions properly, handles prerelease/beta
+versions, can increment versions, etc.
+
+Versions used with go-version must follow [SemVer](http://semver.org/).
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-version
+```
+
+#### Version Parsing and Comparison
+
+```go
+v1, err := version.NewVersion("1.2")
+v2, err := version.NewVersion("1.5+metadata")
+
+// Comparison example. There is also GreaterThan, Equal, and just
+// a simple Compare that returns an int allowing easy >=, <=, etc.
+if v1.LessThan(v2) {
+ fmt.Printf("%s is less than %s", v1, v2)
+}
+```
+
+#### Version Constraints
+
+```go
+v1, err := version.NewVersion("1.2")
+
+// Constraints example.
+constraints, err := version.NewConstraint(">= 1.0, < 1.4")
+if constraints.Check(v1) {
+ fmt.Printf("%s satisfies constraints %s", v1, constraints)
+}
+```
+
+#### Version Sorting
+
+```go
+versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
+versions := make([]*version.Version, len(versionsRaw))
+for i, raw := range versionsRaw {
+ v, _ := version.NewVersion(raw)
+ versions[i] = v
+}
+
+// After this, the versions are properly sorted
+sort.Sort(version.Collection(versions))
+```
+
+## Issues and Contributing
+
+If you find an issue with this library, please report an issue. If you'd
+like, we welcome any contributions. Fork this library and submit a pull
+request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644
index 0000000000..29bdc4d2b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -0,0 +1,298 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package version
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// Constraint represents a single constraint for a version, such as
+// ">= 1.0".
+type Constraint struct {
+ f constraintFunc
+ op operator
+ check *Version
+ original string
+}
+
+func (c *Constraint) Equals(con *Constraint) bool {
+ return c.op == con.op && c.check.Equal(con.check)
+}
+
+// Constraints is a slice of constraints. We make a custom type so that
+// we can add methods to it.
+type Constraints []*Constraint
+
+type constraintFunc func(v, c *Version) bool
+
+var constraintOperators map[string]constraintOperation
+
+type constraintOperation struct {
+ op operator
+ f constraintFunc
+}
+
+var constraintRegexp *regexp.Regexp
+
+func init() {
+ constraintOperators = map[string]constraintOperation{
+ "": {op: equal, f: constraintEqual},
+ "=": {op: equal, f: constraintEqual},
+ "!=": {op: notEqual, f: constraintNotEqual},
+ ">": {op: greaterThan, f: constraintGreaterThan},
+ "<": {op: lessThan, f: constraintLessThan},
+ ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual},
+ "<=": {op: lessThanEqual, f: constraintLessThanEqual},
+ "~>": {op: pessimistic, f: constraintPessimistic},
+ }
+
+ ops := make([]string, 0, len(constraintOperators))
+ for k := range constraintOperators {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ VersionRegexpRaw))
+}
+
+// NewConstraint will parse one or more constraints from the given
+// constraint string. The string must be a comma-separated list of
+// constraints.
+func NewConstraint(v string) (Constraints, error) {
+ vs := strings.Split(v, ",")
+ result := make([]*Constraint, len(vs))
+ for i, single := range vs {
+ c, err := parseSingle(single)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = c
+ }
+
+ return Constraints(result), nil
+}
+
+// MustConstraints is a helper that wraps a call to a function
+// returning (Constraints, error) and panics if error is non-nil.
+func MustConstraints(c Constraints, err error) Constraints {
+ if err != nil {
+ panic(err)
+ }
+
+ return c
+}
+
+// Check tests if a version satisfies all the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ for _, c := range cs {
+ if !c.Check(v) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Equals compares Constraints with other Constraints
+// for equality. This may not represent logical equivalence
+// of compared constraints.
+// e.g. even though '>0.1,>0.2' is logically equivalent
+// to '>0.2' it is *NOT* treated as equal.
+//
+// Missing operator is treated as equal to '=', whitespaces
+// are ignored and constraints are sorted before comaparison.
+func (cs Constraints) Equals(c Constraints) bool {
+ if len(cs) != len(c) {
+ return false
+ }
+
+ // make copies to retain order of the original slices
+ left := make(Constraints, len(cs))
+ copy(left, cs)
+ sort.Stable(left)
+ right := make(Constraints, len(c))
+ copy(right, c)
+ sort.Stable(right)
+
+ // compare sorted slices
+ for i, con := range left {
+ if !con.Equals(right[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (cs Constraints) Len() int {
+ return len(cs)
+}
+
+func (cs Constraints) Less(i, j int) bool {
+ if cs[i].op < cs[j].op {
+ return true
+ }
+ if cs[i].op > cs[j].op {
+ return false
+ }
+
+ return cs[i].check.LessThan(cs[j].check)
+}
+
+func (cs Constraints) Swap(i, j int) {
+ cs[i], cs[j] = cs[j], cs[i]
+}
+
+// Returns the string format of the constraints
+func (cs Constraints) String() string {
+ csStr := make([]string, len(cs))
+ for i, c := range cs {
+ csStr[i] = c.String()
+ }
+
+ return strings.Join(csStr, ",")
+}
+
+// Check tests if a constraint is validated by the given version.
+func (c *Constraint) Check(v *Version) bool {
+ return c.f(v, c.check)
+}
+
+// Prerelease returns true if the version underlying this constraint
+// contains a prerelease field.
+func (c *Constraint) Prerelease() bool {
+ return len(c.check.Prerelease()) > 0
+}
+
+func (c *Constraint) String() string {
+ return c.original
+}
+
+func parseSingle(v string) (*Constraint, error) {
+ matches := constraintRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed constraint: %s", v)
+ }
+
+ check, err := NewVersion(matches[2])
+ if err != nil {
+ return nil, err
+ }
+
+ cop := constraintOperators[matches[1]]
+
+ return &Constraint{
+ f: cop.f,
+ op: cop.op,
+ check: check,
+ original: v,
+ }, nil
+}
+
+func prereleaseCheck(v, c *Version) bool {
+ switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
+ case cPre && vPre:
+ // A constraint with a pre-release can only match a pre-release version
+ // with the same base segments.
+ return v.equalSegments(c)
+
+ case !cPre && vPre:
+ // A constraint without a pre-release can only match a version without a
+ // pre-release.
+ return false
+
+ case cPre && !vPre:
+ // OK, except with the pessimistic operator
+ case !cPre && !vPre:
+ // OK
+ }
+ return true
+}
+
+//-------------------------------------------------------------------
+// Constraint functions
+//-------------------------------------------------------------------
+
+type operator rune
+
+const (
+ equal operator = '='
+ notEqual operator = '≠'
+ greaterThan operator = '>'
+ lessThan operator = '<'
+ greaterThanEqual operator = '≥'
+ lessThanEqual operator = '≤'
+ pessimistic operator = '~'
+)
+
+func constraintEqual(v, c *Version) bool {
+ return v.Equal(c)
+}
+
+func constraintNotEqual(v, c *Version) bool {
+ return !v.Equal(c)
+}
+
+func constraintGreaterThan(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) == 1
+}
+
+func constraintLessThan(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) == -1
+}
+
+func constraintGreaterThanEqual(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) >= 0
+}
+
+func constraintLessThanEqual(v, c *Version) bool {
+ return prereleaseCheck(v, c) && v.Compare(c) <= 0
+}
+
+func constraintPessimistic(v, c *Version) bool {
+ // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
+ if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
+ return false
+ }
+
+ // If the version being checked is naturally less than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if v.LessThan(c) {
+ return false
+ }
+ // We'll use this more than once, so grab the length now so it's a little cleaner
+ // to write the later checks
+ cs := len(c.segments)
+
+ // If the version being checked has less specificity than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if cs > len(v.segments) {
+ return false
+ }
+
+ // Check the segments in the constraint against those in the version. If the version
+ // being checked, at any point, does not have the same values in each index of the
+ // constraints segments, then it cannot be valid against the constraint.
+ for i := 0; i < c.si-1; i++ {
+ if v.segments[i] != c.segments[i] {
+ return false
+ }
+ }
+
+ // Check the last part of the segment in the constraint. If the version segment at
+ // this index is less than the constraints segment at this index, then it cannot
+ // be valid against the constraint
+ if c.segments[cs-1] > v.segments[cs-1] {
+ return false
+ }
+
+ // If nothing has rejected the version by now, it's valid
+ return true
+}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644
index 0000000000..7c683c2813
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -0,0 +1,441 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package version
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled regular expression used to test the validity of a version.
+var (
+ versionRegexp *regexp.Regexp
+ semverRegexp *regexp.Regexp
+)
+
+// The raw regular expression string used for testing the validity
+// of a version.
+const (
+ VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
+ `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
+ `?`
+
+ // SemverRegexpRaw requires a separator between version and prerelease
+ SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
+ `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
+ `?`
+)
+
+// Version represents a single version.
+type Version struct {
+ metadata string
+ pre string
+ segments []int64
+ si int
+ original string
+}
+
+func init() {
+ versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+ semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
+}
+
+// NewVersion parses the given version and returns a new
+// Version.
+func NewVersion(v string) (*Version, error) {
+ return newVersion(v, versionRegexp)
+}
+
+// NewSemver parses the given version and returns a new
+// Version that adheres strictly to SemVer specs
+// https://semver.org/
+func NewSemver(v string) (*Version, error) {
+ return newVersion(v, semverRegexp)
+}
+
+func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
+ matches := pattern.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed version: %s", v)
+ }
+ segmentsStr := strings.Split(matches[1], ".")
+ segments := make([]int64, len(segmentsStr))
+ for i, str := range segmentsStr {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing version: %s", err)
+ }
+
+ segments[i] = val
+ }
+
+ // Even though we could support more than three segments, if we
+ // got less than three, pad it with 0s. This is to cover the basic
+ // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
+ for i := len(segments); i < 3; i++ {
+ segments = append(segments, 0)
+ }
+
+ pre := matches[7]
+ if pre == "" {
+ pre = matches[4]
+ }
+
+ return &Version{
+ metadata: matches[10],
+ pre: pre,
+ segments: segments,
+ si: len(segmentsStr),
+ original: v,
+ }, nil
+}
+
+// Must is a helper that wraps a call to a function returning (*Version, error)
+// and panics if error is non-nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+//
+// If you want boolean results, use the LessThan, Equal,
+// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
+func (v *Version) Compare(other *Version) int {
+ // A quick, efficient equality check
+ if v.String() == other.String() {
+ return 0
+ }
+
+ // If the segments are the same, we must compare on prerelease info
+ if v.equalSegments(other) {
+ preSelf := v.Prerelease()
+ preOther := other.Prerelease()
+ if preSelf == "" && preOther == "" {
+ return 0
+ }
+ if preSelf == "" {
+ return 1
+ }
+ if preOther == "" {
+ return -1
+ }
+
+ return comparePrereleases(preSelf, preOther)
+ }
+
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+ // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
+ lenSelf := len(segmentsSelf)
+ lenOther := len(segmentsOther)
+ hS := lenSelf
+ if lenSelf < lenOther {
+ hS = lenOther
+ }
+ // Compare the segments
+ // Because a constraint could have more/less specificity than the version it's
+ // checking, we need to account for a lopsided or jagged comparison
+ for i := 0; i < hS; i++ {
+ if i > lenSelf-1 {
+ // This means Self had the lower specificity
+ // Check to see if the remaining segments in Other are all zeros
+ if !allZero(segmentsOther[i:]) {
+ // if not, it means that Other has to be greater than Self
+ return -1
+ }
+ break
+ } else if i > lenOther-1 {
+ // this means Other had the lower specificity
+ // Check to see if the remaining segments in Self are all zeros -
+ if !allZero(segmentsSelf[i:]) {
+ // if not, it means that Self has to be greater than Other
+ return 1
+ }
+ break
+ }
+ lhs := segmentsSelf[i]
+ rhs := segmentsOther[i]
+ if lhs == rhs {
+ continue
+ } else if lhs < rhs {
+ return -1
+ }
+ // Otherwis, rhs was > lhs, they're not equal
+ return 1
+ }
+
+ // if we got this far, they're equal
+ return 0
+}
+
+func (v *Version) equalSegments(other *Version) bool {
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+
+ if len(segmentsSelf) != len(segmentsOther) {
+ return false
+ }
+ for i, v := range segmentsSelf {
+ if v != segmentsOther[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func allZero(segs []int64) bool {
+ for _, s := range segs {
+ if s != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func comparePart(preSelf string, preOther string) int {
+ if preSelf == preOther {
+ return 0
+ }
+
+ var selfInt int64
+ selfNumeric := true
+ selfInt, err := strconv.ParseInt(preSelf, 10, 64)
+ if err != nil {
+ selfNumeric = false
+ }
+
+ var otherInt int64
+ otherNumeric := true
+ otherInt, err = strconv.ParseInt(preOther, 10, 64)
+ if err != nil {
+ otherNumeric = false
+ }
+
+ // if a part is empty, we use the other to decide
+ if preSelf == "" {
+ if otherNumeric {
+ return -1
+ }
+ return 1
+ }
+
+ if preOther == "" {
+ if selfNumeric {
+ return 1
+ }
+ return -1
+ }
+
+ if selfNumeric && !otherNumeric {
+ return -1
+ } else if !selfNumeric && otherNumeric {
+ return 1
+ } else if !selfNumeric && !otherNumeric && preSelf > preOther {
+ return 1
+ } else if selfInt > otherInt {
+ return 1
+ }
+
+ return -1
+}
+
+func comparePrereleases(v string, other string) int {
+ // the same pre release!
+ if v == other {
+ return 0
+ }
+
+ // split both pre releases for analyse their parts
+ selfPreReleaseMeta := strings.Split(v, ".")
+ otherPreReleaseMeta := strings.Split(other, ".")
+
+ selfPreReleaseLen := len(selfPreReleaseMeta)
+ otherPreReleaseLen := len(otherPreReleaseMeta)
+
+ biggestLen := otherPreReleaseLen
+ if selfPreReleaseLen > otherPreReleaseLen {
+ biggestLen = selfPreReleaseLen
+ }
+
+ // loop for parts to find the first difference
+ for i := 0; i < biggestLen; i = i + 1 {
+ partSelfPre := ""
+ if i < selfPreReleaseLen {
+ partSelfPre = selfPreReleaseMeta[i]
+ }
+
+ partOtherPre := ""
+ if i < otherPreReleaseLen {
+ partOtherPre = otherPreReleaseMeta[i]
+ }
+
+ compare := comparePart(partSelfPre, partOtherPre)
+ // if parts are equals, continue the loop
+ if compare != 0 {
+ return compare
+ }
+ }
+
+ return 0
+}
+
+// Core returns a new version constructed from only the MAJOR.MINOR.PATCH
+// segments of the version, without prerelease or metadata.
+func (v *Version) Core() *Version {
+ segments := v.Segments64()
+ segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
+ return Must(NewVersion(segmentsOnly))
+}
+
+// Equal tests if two versions are equal.
+func (v *Version) Equal(o *Version) bool {
+ if v == nil || o == nil {
+ return v == o
+ }
+
+ return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// GreaterThanOrEqual tests if this version is greater than or equal to another version.
+func (v *Version) GreaterThanOrEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// LessThanOrEqual tests if this version is less than or equal to another version.
+func (v *Version) LessThanOrEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
+// Metadata returns any metadata that was part of the version
+// string.
+//
+// Metadata is anything that comes after the "+" in the version.
+// For example, with "1.2.3+beta", the metadata is "beta".
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// Prerelease returns any prerelease data that is part of the version,
+// or blank if there is no prerelease data.
+//
+// Prerelease information is anything that comes after the "-" in the
+// version (but before any metadata). For example, with "1.2.3-beta",
+// the prerelease information is "beta".
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Segments returns the numeric segments of the version as a slice of ints.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments() []int {
+ segmentSlice := make([]int, len(v.segments))
+ for i, v := range v.segments {
+ segmentSlice[i] = int(v)
+ }
+ return segmentSlice
+}
+
+// Segments64 returns the numeric segments of the version as a slice of int64s.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments64() []int64 {
+ result := make([]int64, len(v.segments))
+ copy(result, v.segments)
+ return result
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+//
+// This value is rebuilt according to the parsed segments and other
+// information. Therefore, ambiguities in the version string such as
+// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
+// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
+// as shown in the parenthesized examples.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+ fmtParts := make([]string, len(v.segments))
+ for i, s := range v.segments {
+ // We can ignore err here since we've pre-parsed the values in segments
+ str := strconv.FormatInt(s, 10)
+ fmtParts[i] = str
+ }
+ fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original parsed version as-is, including any
+// potential whitespace, `v` prefix, etc.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(b []byte) error {
+ temp, err := NewVersion(string(b))
+ if err != nil {
+ return err
+ }
+
+ *v = *temp
+
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler interface.
+func (v *Version) MarshalText() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
+// Scan implements the sql.Scanner interface.
+func (v *Version) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case string:
+ return v.UnmarshalText([]byte(src))
+ case nil:
+ return nil
+ default:
+ return fmt.Errorf("cannot scan %T as Version", src)
+ }
+}
+
+// Value implements the driver.Valuer interface.
+func (v *Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644
index 0000000000..83547fe13d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -0,0 +1,20 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package version
+
+// Collection is a type that implements the sort.Interface interface
+// so that versions can be sorted.
+type Collection []*Version
+
+func (v Collection) Len() int {
+ return len(v)
+}
+
+func (v Collection) Less(i, j int) bool {
+ return v[i].LessThan(v[j])
+}
+
+func (v Collection) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/knadh/koanf/maps/LICENSE b/vendor/github.com/knadh/koanf/maps/LICENSE
new file mode 100644
index 0000000000..c78ef52fb1
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/maps/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2019, Kailash Nadh. https://github.com/knadh
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/knadh/koanf/maps/maps.go b/vendor/github.com/knadh/koanf/maps/maps.go
new file mode 100644
index 0000000000..bdb9d62b80
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/maps/maps.go
@@ -0,0 +1,303 @@
+// Package maps provides reusable functions for manipulating nested
+// map[string]interface{} maps are common unmarshal products from
+// various serializers such as json, yaml etc.
+package maps
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// Flatten takes a map[string]interface{} and traverses it and flattens
+// nested children into keys delimited by delim.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+//
+// eg: `{ "parent": { "child": 123 }}` becomes `{ "parent.child": 123 }`
+// In addition, it keeps track of and returns a map of the delimited keypaths with
+// a slice of key parts, for eg: { "parent.child": ["parent", "child"] }. This
+// parts list is used to remember the key path's original structure to
+// unflatten later.
+func Flatten(m map[string]interface{}, keys []string, delim string) (map[string]interface{}, map[string][]string) {
+ var (
+ out = make(map[string]interface{})
+ keyMap = make(map[string][]string)
+ )
+
+ flatten(m, keys, delim, out, keyMap)
+ return out, keyMap
+}
+
+func flatten(m map[string]interface{}, keys []string, delim string, out map[string]interface{}, keyMap map[string][]string) {
+ for key, val := range m {
+ // Copy the incoming key paths into a fresh list
+ // and append the current key in the iteration.
+ kp := make([]string, 0, len(keys)+1)
+ kp = append(kp, keys...)
+ kp = append(kp, key)
+
+ switch cur := val.(type) {
+ case map[string]interface{}:
+ // Empty map.
+ if len(cur) == 0 {
+ newKey := strings.Join(kp, delim)
+ out[newKey] = val
+ keyMap[newKey] = kp
+ continue
+ }
+
+ // It's a nested map. Flatten it recursively.
+ flatten(cur, kp, delim, out, keyMap)
+ default:
+ newKey := strings.Join(kp, delim)
+ out[newKey] = val
+ keyMap[newKey] = kp
+ }
+ }
+}
+
+// Unflatten takes a flattened key:value map (non-nested with delimited keys)
+// and returns a nested map where the keys are split into hierarchies by the given
+// delimiter. For instance, `parent.child.key: 1` to `{parent: {child: {key: 1}}}`
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Unflatten(m map[string]interface{}, delim string) map[string]interface{} {
+ out := make(map[string]interface{})
+
+ // Iterate through the flat conf map.
+ for k, v := range m {
+ var (
+ keys []string
+ next = out
+ )
+
+ if delim != "" {
+ keys = strings.Split(k, delim)
+ } else {
+ keys = []string{k}
+ }
+
+ // Iterate through key parts, for eg:, parent.child.key
+ // will be ["parent", "child", "key"]
+ for _, k := range keys[:len(keys)-1] {
+ sub, ok := next[k]
+ if !ok {
+ // If the key does not exist in the map, create it.
+ sub = make(map[string]interface{})
+ next[k] = sub
+ }
+ if n, ok := sub.(map[string]interface{}); ok {
+ next = n
+ }
+ }
+
+ // Assign the value.
+ next[keys[len(keys)-1]] = v
+ }
+ return out
+}
+
+// Merge recursively merges map a into b (left to right), mutating
+// and expanding map b. Note that there's no copying involved, so
+// map b will retain references to map a.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Merge(a, b map[string]interface{}) {
+ for key, val := range a {
+ // Does the key exist in the target map?
+ // If no, add it and move on.
+ bVal, ok := b[key]
+ if !ok {
+ b[key] = val
+ continue
+ }
+
+ // If the incoming val is not a map, do a direct merge.
+ if _, ok := val.(map[string]interface{}); !ok {
+ b[key] = val
+ continue
+ }
+
+ // The source key and target keys are both maps. Merge them.
+ switch v := bVal.(type) {
+ case map[string]interface{}:
+ Merge(val.(map[string]interface{}), v)
+ default:
+ b[key] = val
+ }
+ }
+}
+
+// MergeStrict recursively merges map a into b (left to right), mutating
+// and expanding map b. Note that there's no copying involved, so
+// map b will retain references to map a.
+// If an equal key in either of the maps has a different value type, it will return the first error.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func MergeStrict(a, b map[string]interface{}) error {
+ return mergeStrict(a, b, "")
+}
+
+func mergeStrict(a, b map[string]interface{}, fullKey string) error {
+ for key, val := range a {
+ // Does the key exist in the target map?
+ // If no, add it and move on.
+ bVal, ok := b[key]
+ if !ok {
+ b[key] = val
+ continue
+ }
+
+ newFullKey := key
+ if fullKey != "" {
+ newFullKey = fmt.Sprintf("%v.%v", fullKey, key)
+ }
+
+ // If the incoming val is not a map, do a direct merge between the same types.
+ if _, ok := val.(map[string]interface{}); !ok {
+ if reflect.TypeOf(b[key]) == reflect.TypeOf(val) {
+ b[key] = val
+ } else {
+ return fmt.Errorf("incorrect types at key %v, type %T != %T", fullKey, b[key], val)
+ }
+ continue
+ }
+
+ // The source key and target keys are both maps. Merge them.
+ switch v := bVal.(type) {
+ case map[string]interface{}:
+ if err := mergeStrict(val.(map[string]interface{}), v, newFullKey); err != nil {
+ return err
+ }
+ default:
+ b[key] = val
+ }
+ }
+ return nil
+}
+
+// Delete removes the entry present at a given path, from the map. The path
+// is the key map slice, for eg:, parent.child.key -> [parent child key].
+// Any empty, nested map on the path, is recursively deleted.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Delete(mp map[string]interface{}, path []string) {
+ next, ok := mp[path[0]]
+ if ok {
+ if len(path) == 1 {
+ delete(mp, path[0])
+ return
+ }
+ switch nval := next.(type) {
+ case map[string]interface{}:
+ Delete(nval, path[1:])
+ // Delete map if it has no keys.
+ if len(nval) == 0 {
+ delete(mp, path[0])
+ }
+ }
+ }
+}
+
+// Search recursively searches a map for a given path. The path is
+// the key map slice, for eg:, parent.child.key -> [parent child key].
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Search(mp map[string]interface{}, path []string) interface{} {
+ next, ok := mp[path[0]]
+ if ok {
+ if len(path) == 1 {
+ return next
+ }
+ switch m := next.(type) {
+ case map[string]interface{}:
+ return Search(m, path[1:])
+ default:
+ return nil
+ } //
+ // It's important to note that all nested maps should be
+ // map[string]interface{} and not map[interface{}]interface{}.
+ // Use IntfaceKeysToStrings() to convert if necessary.
+ }
+ return nil
+}
+
+// Copy returns a deep copy of a conf map.
+//
+// It's important to note that all nested maps should be
+// map[string]interface{} and not map[interface{}]interface{}.
+// Use IntfaceKeysToStrings() to convert if necessary.
+func Copy(mp map[string]interface{}) map[string]interface{} {
+ out, _ := copystructure.Copy(&mp)
+ if res, ok := out.(*map[string]interface{}); ok {
+ return *res
+ }
+ return map[string]interface{}{}
+}
+
+// IntfaceKeysToStrings recursively converts map[interface{}]interface{} to
+// map[string]interface{}. Some parses such as YAML unmarshal return this.
+func IntfaceKeysToStrings(mp map[string]interface{}) {
+ for key, val := range mp {
+ switch cur := val.(type) {
+ case map[interface{}]interface{}:
+ x := make(map[string]interface{})
+ for k, v := range cur {
+ x[fmt.Sprintf("%v", k)] = v
+ }
+ mp[key] = x
+ IntfaceKeysToStrings(x)
+ case []interface{}:
+ for i, v := range cur {
+ switch sub := v.(type) {
+ case map[interface{}]interface{}:
+ x := make(map[string]interface{})
+ for k, v := range sub {
+ x[fmt.Sprintf("%v", k)] = v
+ }
+ cur[i] = x
+ IntfaceKeysToStrings(x)
+ case map[string]interface{}:
+ IntfaceKeysToStrings(sub)
+ }
+ }
+ case map[string]interface{}:
+ IntfaceKeysToStrings(cur)
+ }
+ }
+}
+
+// StringSliceToLookupMap takes a slice of strings and returns a lookup map
+// with the slice values as keys with true values.
+func StringSliceToLookupMap(s []string) map[string]bool {
+ mp := make(map[string]bool, len(s))
+ for _, v := range s {
+ mp[v] = true
+ }
+ return mp
+}
+
+// Int64SliceToLookupMap takes a slice of int64s and returns a lookup map
+// with the slice values as keys with true values.
+func Int64SliceToLookupMap(s []int64) map[int64]bool {
+ mp := make(map[int64]bool, len(s))
+ for _, v := range s {
+ mp[v] = true
+ }
+ return mp
+}
diff --git a/vendor/github.com/knadh/koanf/providers/confmap/LICENSE b/vendor/github.com/knadh/koanf/providers/confmap/LICENSE
new file mode 100644
index 0000000000..c78ef52fb1
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/providers/confmap/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2019, Kailash Nadh. https://github.com/knadh
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/knadh/koanf/providers/confmap/confmap.go b/vendor/github.com/knadh/koanf/providers/confmap/confmap.go
new file mode 100644
index 0000000000..b6415fc2c5
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/providers/confmap/confmap.go
@@ -0,0 +1,37 @@
+// Package confmap implements a koanf.Provider that takes nested
+// and flat map[string]interface{} config maps and provides them
+// to koanf.
+package confmap
+
+import (
+ "errors"
+
+ "github.com/knadh/koanf/maps"
+)
+
+// Confmap implements a raw map[string]interface{} provider.
+type Confmap struct {
+ mp map[string]interface{}
+}
+
+// Provider returns a confmap Provider that takes a flat or nested
+// map[string]interface{}. If a delim is provided, it indicates that the
+// keys are flat and the map needs to be unflatted by delim.
+func Provider(mp map[string]interface{}, delim string) *Confmap {
+ cp := maps.Copy(mp)
+ maps.IntfaceKeysToStrings(cp)
+ if delim != "" {
+ cp = maps.Unflatten(cp, delim)
+ }
+ return &Confmap{mp: cp}
+}
+
+// ReadBytes is not supported by the confmap provider.
+func (e *Confmap) ReadBytes() ([]byte, error) {
+ return nil, errors.New("confmap provider does not support this method")
+}
+
+// Read returns the loaded map[string]interface{}.
+func (e *Confmap) Read() (map[string]interface{}, error) {
+ return e.mp, nil
+}
diff --git a/vendor/github.com/knadh/koanf/v2/.gitignore b/vendor/github.com/knadh/koanf/v2/.gitignore
new file mode 100644
index 0000000000..3777c0be01
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/.gitignore
@@ -0,0 +1,4 @@
+.env
+
+# IDE
+.idea
diff --git a/vendor/github.com/knadh/koanf/v2/LICENSE b/vendor/github.com/knadh/koanf/v2/LICENSE
new file mode 100644
index 0000000000..c78ef52fb1
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2019, Kailash Nadh. https://github.com/knadh
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md
new file mode 100644
index 0000000000..b4947e556f
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/README.md
@@ -0,0 +1,714 @@
+
+
+
+
+**koanf** is a library for reading configuration from different sources in different formats in Go applications. It is a cleaner, lighter [alternative to spf13/viper](#alternative-to-viper) with better abstractions and extensibility and far fewer dependencies.
+
+koanf v2 has modules (Providers) for reading configuration from a variety of sources such as files, command line flags, environment variables, Vault, and S3 and for parsing (Parsers) formats such as JSON, YAML, TOML, Hashicorp HCL. It is easy to plug in custom parsers and providers.
+
+All external dependencies in providers and parsers are detached from the core and can be installed separately as necessary.
+
+[](https://github.com/knadh/koanf/actions/workflows/test.yml) [](https://pkg.go.dev/github.com/knadh/koanf/v2)
+
+### Installation
+
+```shell
+# Install the core.
+go get -u github.com/knadh/koanf/v2
+
+# Install the necessary Provider(s).
+# Available: file, env, posflag, basicflag, confmap, rawbytes,
+# structs, fs, s3, appconfig/v2, consul/v2, etcd/v2, vault/v2, parameterstore/v2
+# eg: go get -u github.com/knadh/koanf/providers/s3
+# eg: go get -u github.com/knadh/koanf/providers/consul/v2
+
+go get -u github.com/knadh/koanf/providers/file
+
+
+# Install the necessary Parser(s).
+# Available: toml, toml/v2, json, yaml, dotenv, hcl, hjson, nestedtext
+# go get -u github.com/knadh/koanf/parsers/$parser
+
+go get -u github.com/knadh/koanf/parsers/toml
+```
+
+[See the list](#api) of all bundled Providers and Parsers.
+
+### Contents
+
+- [Concepts](#concepts)
+- [Reading config from files](#reading-config-from-files)
+- [Watching file for changes](#watching-file-for-changes)
+- [Reading from command line](#reading-from-command-line)
+- [Reading environment variables](#reading-environment-variables)
+- [Reading raw bytes](#reading-raw-bytes)
+- [Reading from maps and structs](#reading-from-nested-maps)
+- [Unmarshalling and marshalling](#unmarshalling-and-marshalling)
+- [Order of merge and key case sensitivity](#order-of-merge-and-key-case-sensitivity)
+- [Custom Providers and Parsers](#custom-providers-and-parsers)
+- [Custom merge strategies](#custom-merge-strategies)
+- [List of installable Providers and Parsers](#api)
+
+### Concepts
+
+- `koanf.Provider` is a generic interface that provides configuration, for example, from files, environment variables, HTTP sources, or anywhere. The configuration can either be raw bytes that a parser can parse, or it can be a nested `map[string]interface{}` that can be directly loaded.
+- `koanf.Parser` is a generic interface that takes raw bytes, parses, and returns a nested `map[string]interface{}`. For example, JSON and YAML parsers.
+- Once loaded into koanf, configuration are values queried by a delimited key path syntax. eg: `app.server.port`. Any delimiter can be chosen.
+- Configuration from multiple sources can be loaded and merged into a koanf instance, for example, load from a file first and override certain values with flags from the command line.
+
+With these two interface implementations, koanf can obtain configuration in any format from any source, parse it, and make it available to an application.
+
+### Reading config from files
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/parsers/yaml"
+ "github.com/knadh/koanf/providers/file"
+)
+
+// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character.
+var k = koanf.New(".")
+
+func main() {
+ // Load JSON config.
+ if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ // Load YAML config and merge into the previously loaded config (because we can).
+ k.Load(file.Provider("mock/mock.yml"), yaml.Parser())
+
+ fmt.Println("parent's name is = ", k.String("parent1.name"))
+ fmt.Println("parent's ID is = ", k.Int("parent1.id"))
+}
+
+```
+
+### Watching file for changes
+Some providers expose a `Watch()` method that makes the provider watch for changes
+in configuration and trigger a callback to reload the configuration.
+This is not goroutine safe if there are concurrent `*Get()` calls happening on the
+koanf object while it is doing a `Load()`. Such scenarios will need mutex locking.
+
+`file, appconfig, vault, consul` providers have a `Watch()` method.
+
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/parsers/yaml"
+ "github.com/knadh/koanf/providers/file"
+)
+
+// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character.
+var k = koanf.New(".")
+
+func main() {
+ // Load JSON config.
+ f := file.Provider("mock/mock.json")
+ if err := k.Load(f, json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ // Load YAML config and merge into the previously loaded config (because we can).
+ k.Load(file.Provider("mock/mock.yml"), yaml.Parser())
+
+ fmt.Println("parent's name is = ", k.String("parent1.name"))
+ fmt.Println("parent's ID is = ", k.Int("parent1.id"))
+
+ // Watch the file and get a callback on change. The callback can do whatever,
+ // like re-load the configuration.
+ // File provider always returns a nil `event`.
+ f.Watch(func(event interface{}, err error) {
+ if err != nil {
+ log.Printf("watch error: %v", err)
+ return
+ }
+
+ // Throw away the old config and load a fresh copy.
+ log.Println("config changed. Reloading ...")
+ k = koanf.New(".")
+ k.Load(f, json.Parser())
+ k.Print()
+ })
+
+ // To stop a file watcher, call:
+ // f.Unwatch()
+
+ // Block forever (and manually make a change to mock/mock.json) to
+ // reload the config.
+ log.Println("waiting forever. Try making a change to mock/mock.json to live reload")
+ <-make(chan bool)
+}
+```
+
+
+### Reading from command line
+
+The following example shows the use of `posflag.Provider`, a wrapper over the [spf13/pflag](https://github.com/spf13/pflag) library, an advanced commandline lib. For Go's built in `flag` package, use `basicflag.Provider`.
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/toml"
+
+ // TOML version 2 is available at:
+ // "github.com/knadh/koanf/parsers/toml/v2"
+
+ "github.com/knadh/koanf/providers/file"
+ "github.com/knadh/koanf/providers/posflag"
+ flag "github.com/spf13/pflag"
+)
+
+// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character.
+var k = koanf.New(".")
+
+func main() {
+ // Use the POSIX compliant pflag lib instead of Go's flag lib.
+ f := flag.NewFlagSet("config", flag.ContinueOnError)
+ f.Usage = func() {
+ fmt.Println(f.FlagUsages())
+ os.Exit(0)
+ }
+ // Path to one or more config files to load into koanf along with some config params.
+ f.StringSlice("conf", []string{"mock/mock.toml"}, "path to one or more .toml config files")
+ f.String("time", "2020-01-01", "a time string")
+ f.String("type", "xxx", "type of the app")
+ f.Parse(os.Args[1:])
+
+ // Load the config files provided in the commandline.
+ cFiles, _ := f.GetStringSlice("conf")
+ for _, c := range cFiles {
+ if err := k.Load(file.Provider(c), toml.Parser()); err != nil {
+ log.Fatalf("error loading file: %v", err)
+ }
+ }
+
+ // "time" and "type" may have been loaded from the config file, but
+ // they can still be overridden with the values from the command line.
+ // The bundled posflag.Provider takes a flagset from the spf13/pflag lib.
+ // Passing the Koanf instance to posflag helps it deal with default command
+ // line flag values that are not present in conf maps from previously loaded
+ // providers.
+ if err := k.Load(posflag.Provider(f, ".", k), nil); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ fmt.Println("time is = ", k.String("time"))
+}
+```
+
+### Reading environment variables
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/providers/env"
+ "github.com/knadh/koanf/providers/file"
+)
+
+// Global koanf instance. Use . as the key path delimiter. This can be / or anything.
+var k = koanf.New(".")
+
+func main() {
+ // Load JSON config.
+ if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ // Load environment variables and merge into the loaded config.
+ // "MYVAR" is the prefix to filter the env vars by.
+ // "." is the delimiter used to represent the key hierarchy in env vars.
+ // The (optional, or can be nil) function can be used to transform
+ // the env var names, for instance, to lowercase them.
+ //
+ // For example, env vars: MYVAR_TYPE and MYVAR_PARENT1_CHILD1_NAME
+ // will be merged into the "type" and the nested "parent1.child1.name"
+ // keys in the config file here as we lowercase the key,
+ // replace `_` with `.` and strip the MYVAR_ prefix so that
+ // only "parent1.child1.name" remains.
+ k.Load(env.Provider("MYVAR_", ".", func(s string) string {
+ return strings.Replace(strings.ToLower(
+ strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1)
+ }), nil)
+
+ fmt.Println("name is = ", k.String("parent1.child1.name"))
+}
+```
+
+You can also use the `env.ProviderWithValue` with a callback that supports mutating both the key and value
+to return types other than a string. For example, here, env values separated by spaces are
+returned as string slices or arrays. eg: `MYVAR_slice=a b c` becomes `slice: [a, b, c]`.
+
+```go
+ k.Load(env.ProviderWithValue("MYVAR_", ".", func(s string, v string) (string, interface{}) {
+ // Strip out the MYVAR_ prefix and lowercase and get the key while also replacing
+ // the _ character with . in the key (koanf delimeter).
+ key := strings.Replace(strings.ToLower(strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1)
+
+ // If there is a space in the value, split the value into a slice by the space.
+ if strings.Contains(v, " ") {
+ return key, strings.Split(v, " ")
+ }
+
+ // Otherwise, return the plain string.
+ return key, v
+ }), nil)
+```
+
+### Reading from an S3 bucket
+
+```go
+// Load JSON config from s3.
+if err := k.Load(s3.Provider(s3.Config{
+ AccessKey: os.Getenv("AWS_S3_ACCESS_KEY"),
+ SecretKey: os.Getenv("AWS_S3_SECRET_KEY"),
+ Region: os.Getenv("AWS_S3_REGION"),
+ Bucket: os.Getenv("AWS_S3_BUCKET"),
+ ObjectKey: "dir/config.json",
+}), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+}
+```
+
+### Reading raw bytes
+
+The bundled `rawbytes` Provider can be used to read arbitrary bytes from a source, like a database or an HTTP call.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/providers/rawbytes"
+)
+
+// Global koanf instance. Use . as the key path delimiter. This can be / or anything.
+var k = koanf.New(".")
+
+func main() {
+ b := []byte(`{"type": "rawbytes", "parent1": {"child1": {"type": "rawbytes"}}}`)
+ k.Load(rawbytes.Provider(b), json.Parser())
+ fmt.Println("type is = ", k.String("parent1.child1.type"))
+}
+```
+
+### Unmarshalling and marshalling
+`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example to JSON or YAML files
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/providers/file"
+)
+
+// Global koanf instance. Use . as the key path delimiter. This can be / or anything.
+var (
+ k = koanf.New(".")
+ parser = json.Parser()
+)
+
+func main() {
+ // Load JSON config.
+ if err := k.Load(file.Provider("mock/mock.json"), parser); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ // Structure to unmarshal nested conf to.
+ type childStruct struct {
+ Name string `koanf:"name"`
+ Type string `koanf:"type"`
+ Empty map[string]string `koanf:"empty"`
+ GrandChild struct {
+ Ids []int `koanf:"ids"`
+ On bool `koanf:"on"`
+ } `koanf:"grandchild1"`
+ }
+
+ var out childStruct
+
+ // Quick unmarshal.
+ k.Unmarshal("parent1.child1", &out)
+ fmt.Println(out)
+
+ // Unmarshal with advanced config.
+ out = childStruct{}
+ k.UnmarshalWithConf("parent1.child1", &out, koanf.UnmarshalConf{Tag: "koanf"})
+ fmt.Println(out)
+
+ // Marshal the instance back to JSON.
+ // The parser instance can be anything, eg: json.Parser(), yaml.Parser() etc.
+ b, _ := k.Marshal(parser)
+ fmt.Println(string(b))
+}
+```
+
+### Unmarshalling with flat paths
+
+Sometimes it is necessary to unmarshal an assortment of keys from various nested structures into a flat target structure. This is possible with the `UnmarshalConf.FlatPaths` flag.
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/providers/file"
+)
+
+// Global koanf instance. Use . as the key path delimiter. This can be / or anything.
+var k = koanf.New(".")
+
+func main() {
+ // Load JSON config.
+ if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ type rootFlat struct {
+ Type string `koanf:"type"`
+ Empty map[string]string `koanf:"empty"`
+ Parent1Name string `koanf:"parent1.name"`
+ Parent1ID int `koanf:"parent1.id"`
+ Parent1Child1Name string `koanf:"parent1.child1.name"`
+ Parent1Child1Type string `koanf:"parent1.child1.type"`
+ Parent1Child1Empty map[string]string `koanf:"parent1.child1.empty"`
+ Parent1Child1Grandchild1IDs []int `koanf:"parent1.child1.grandchild1.ids"`
+ Parent1Child1Grandchild1On bool `koanf:"parent1.child1.grandchild1.on"`
+ }
+
+ // Unmarshal the whole root with FlatPaths: True.
+ var o1 rootFlat
+ k.UnmarshalWithConf("", &o1, koanf.UnmarshalConf{Tag: "koanf", FlatPaths: true})
+ fmt.Println(o1)
+
+ // Unmarshal a child structure of "parent1".
+ type subFlat struct {
+ Name string `koanf:"name"`
+ ID int `koanf:"id"`
+ Child1Name string `koanf:"child1.name"`
+ Child1Type string `koanf:"child1.type"`
+ Child1Empty map[string]string `koanf:"child1.empty"`
+ Child1Grandchild1IDs []int `koanf:"child1.grandchild1.ids"`
+ Child1Grandchild1On bool `koanf:"child1.grandchild1.on"`
+ }
+
+ var o2 subFlat
+ k.UnmarshalWithConf("parent1", &o2, koanf.UnmarshalConf{Tag: "koanf", FlatPaths: true})
+ fmt.Println(o2)
+}
+```
+
+#### Reading from nested maps
+
+The bundled `confmap` provider takes a `map[string]interface{}` that can be loaded into a koanf instance.
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/providers/confmap"
+ "github.com/knadh/koanf/providers/file"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/parsers/yaml"
+)
+
+// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character.
+var k = koanf.New(".")
+
+func main() {
+ // Load default values using the confmap provider.
+ // We provide a flat map with the "." delimiter.
+ // A nested map can be loaded by setting the delimiter to an empty string "".
+ k.Load(confmap.Provider(map[string]interface{}{
+ "parent1.name": "Default Name",
+ "parent3.name": "New name here",
+ }, "."), nil)
+
+ // Load JSON config on top of the default values.
+ if err := k.Load(file.Provider("mock/mock.json"), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ // Load YAML config and merge into the previously loaded config (because we can).
+ k.Load(file.Provider("mock/mock.yml"), yaml.Parser())
+
+ fmt.Println("parent's name is = ", k.String("parent1.name"))
+ fmt.Println("parent's ID is = ", k.Int("parent1.id"))
+}
+```
+
+#### Reading from struct
+
+The bundled `structs` provider can be used to read data from a struct to load into a koanf instance.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/providers/structs"
+)
+
+// Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character.
+var k = koanf.New(".")
+
+type parentStruct struct {
+ Name string `koanf:"name"`
+ ID int `koanf:"id"`
+ Child1 childStruct `koanf:"child1"`
+}
+type childStruct struct {
+ Name string `koanf:"name"`
+ Type string `koanf:"type"`
+ Empty map[string]string `koanf:"empty"`
+ Grandchild1 grandchildStruct `koanf:"grandchild1"`
+}
+type grandchildStruct struct {
+ Ids []int `koanf:"ids"`
+ On bool `koanf:"on"`
+}
+type sampleStruct struct {
+ Type string `koanf:"type"`
+ Empty map[string]string `koanf:"empty"`
+ Parent1 parentStruct `koanf:"parent1"`
+}
+
+func main() {
+ // Load default values using the structs provider.
+ // We provide a struct along with the struct tag `koanf` to the
+ // provider.
+ k.Load(structs.Provider(sampleStruct{
+ Type: "json",
+ Empty: make(map[string]string),
+ Parent1: parentStruct{
+ Name: "parent1",
+ ID: 1234,
+ Child1: childStruct{
+ Name: "child1",
+ Type: "json",
+ Empty: make(map[string]string),
+ Grandchild1: grandchildStruct{
+ Ids: []int{1, 2, 3},
+ On: true,
+ },
+ },
+ },
+ }, "koanf"), nil)
+
+ fmt.Printf("name is = `%s`\n", k.String("parent1.child1.name"))
+}
+```
+### Merge behavior
+#### Default behavior
+The default behavior when you create Koanf this way is: `koanf.New(delim)` that the latest loaded configuration will
+merge with the previous one.
+
+For example:
+`first.yml`
+```yaml
+key: [1,2,3]
+```
+`second.yml`
+```yaml
+key: 'string'
+```
+When `second.yml` is loaded it will override the type of the `first.yml`.
+
+If this behavior is not desired, you can merge 'strictly'. In the same scenario, `Load` will return an error.
+
+```go
+package main
+
+import (
+ "errors"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/maps"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/parsers/yaml"
+ "github.com/knadh/koanf/providers/file"
+)
+
+var conf = koanf.Conf{
+ Delim: ".",
+ StrictMerge: true,
+}
+var k = koanf.NewWithConf(conf)
+
+func main() {
+ yamlPath := "mock/mock.yml"
+ if err := k.Load(file.Provider(yamlPath), yaml.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ jsonPath := "mock/mock.json"
+ if err := k.Load(file.Provider(jsonPath), json.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+}
+```
+**Note:** When merging different extensions, each parser can treat his types differently,
+ meaning even though you the load same types there is a probability that it will fail with `StrictMerge: true`.
+
+For example: merging JSON and YAML will most likely fail because JSON treats integers as float64 and YAML treats them as integers.
+
+### Order of merge and key case sensitivity
+
+- Config keys are case-sensitive in koanf. For example, `app.server.port` and `APP.SERVER.port` are not the same.
+- koanf does not impose any ordering on loading config from various providers. Every successive `Load()` or `Merge()` merges new config into the existing config. That is, it is possible to load environment variables first, then files on top of it, and then command line variables on top of it, or any such order.
+
+### Custom Providers and Parsers
+
+A Provider returns a nested `map[string]interface{}` config that can be loaded directly into koanf with `koanf.Load()` or it can return raw bytes that can be parsed with a Parser (again, loaded using `koanf.Load()`. Writing Providers and Parsers are easy. See the bundled implementations in the [providers](https://github.com/knadh/koanf/tree/master/providers) and [parsers](https://github.com/knadh/koanf/tree/master/parsers) directories.
+
+### Custom merge strategies
+
+By default, when merging two config sources using `Load()`, koanf recursively merges keys of nested maps (`map[string]interface{}`),
+while static values are overwritten (slices, strings, etc). This behaviour can be changed by providing a custom merge function with the `WithMergeFunc` option.
+
+```go
+package main
+
+import (
+ "errors"
+ "log"
+
+ "github.com/knadh/koanf/v2"
+ "github.com/knadh/koanf/maps"
+ "github.com/knadh/koanf/parsers/json"
+ "github.com/knadh/koanf/parsers/yaml"
+ "github.com/knadh/koanf/providers/file"
+)
+
+var conf = koanf.Conf{
+ Delim: ".",
+ StrictMerge: true,
+}
+var k = koanf.NewWithConf(conf)
+
+func main() {
+ yamlPath := "mock/mock.yml"
+ if err := k.Load(file.Provider(yamlPath), yaml.Parser()); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+
+ jsonPath := "mock/mock.json"
+ if err := k.Load(file.Provider(jsonPath), json.Parser(), koanf.WithMergeFunc(func(src, dest map[string]interface{}) error {
+ // Your custom logic, copying values from src into dst
+ return nil
+ })); err != nil {
+ log.Fatalf("error loading config: %v", err)
+ }
+}
+```
+
+## API
+
+See the full API documentation of all available methods at https://pkg.go.dev/github.com/knadh/koanf/v2#section-documentation
+
+### Bundled Providers
+
+Install with `go get -u github.com/knadh/koanf/providers/$provider`
+
+| Package | Provider | Description |
+| ------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| file | `file.Provider(filepath string)` | Reads a file and returns the raw bytes to be parsed. |
+| fs | `fs.Provider(f fs.FS, filepath string)` | (**Experimental**) Reads a file from fs.FS and returns the raw bytes to be parsed. The provider requires `go v1.16` or higher. |
+| basicflag | `basicflag.Provider(f *flag.FlagSet, delim string)` | Takes an stdlib `flag.FlagSet` |
+| posflag | `posflag.Provider(f *pflag.FlagSet, delim string)` | Takes an `spf13/pflag.FlagSet` (advanced POSIX compatible flags with multiple types) and provides a nested config map based on delim. |
+| env | `env.Provider(prefix, delim string, f func(s string) string)` | Takes an optional prefix to filter env variables by, an optional function that takes and returns a string to transform env variables, and returns a nested config map based on delim. |
+| confmap | `confmap.Provider(mp map[string]interface{}, delim string)` | Takes a premade `map[string]interface{}` conf map. If delim is provided, the keys are assumed to be flattened, thus unflattened using delim. |
+| structs | `structs.Provider(s interface{}, tag string)` | Takes a struct and struct tag. |
+| s3 | `s3.Provider(s3.S3Config{})` | Takes a s3 config struct. |
+| rawbytes | `rawbytes.Provider(b []byte)` | Takes a raw `[]byte` slice to be parsed with a koanf.Parser |
+| vault/v2 | `vault.Provider(vault.Config{})` | Hashicorp Vault provider |
+| appconfig/v2 | `vault.AppConfig(appconfig.Config{})` | AWS AppConfig provider |
+| etcd/v2 | `etcd.Provider(etcd.Config{})` | CNCF etcd provider |
+| consul/v2 | `consul.Provider(consul.Config{})` | Hashicorp Consul provider |
+| parameterstore/v2 | `parameterstore.Provider(parameterstore.Config{})` | AWS Systems Manager Parameter Store provider |
+
+
+### Bundled Parsers
+
+Install with `go get -u github.com/knadh/koanf/parsers/$parser`
+
+| Package | Parser | Description |
+| ------------ | -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| json | `json.Parser()` | Parses JSON bytes into a nested map |
+| yaml | `yaml.Parser()` | Parses YAML bytes into a nested map |
+| toml | `toml.Parser()` | Parses TOML bytes into a nested map |
+| toml/v2 | `toml.Parser()` | Parses TOML bytes into a nested map (using go-toml v2) |
+| dotenv | `dotenv.Parser()` | Parses DotEnv bytes into a flat map |
+| hcl | `hcl.Parser(flattenSlices bool)` | Parses Hashicorp HCL bytes into a nested map. `flattenSlices` is recommended to be set to true. [Read more](https://github.com/hashicorp/hcl/issues/162). |
+| nestedtext | `nestedtext.Parser()` | Parses NestedText bytes into a flat map |
+| hjson | `hjson.Parser()` | Parses HJSON bytes into a nested map
+ |
+
+
+### Third-party Providers
+| Package | Provider | Description |
+| ------------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| github.com/defensestation/koanf/providers/secretsmanager | `vault.SecretsMananger(secretsmanager.Config{}, f func(s string) string)` | AWS Secrets Manager provider, takes map or string as a value from store |
+| github.com/defensestation/koanf/providers/parameterstore | `vault.ParameterStore(parameterstore.Config{}, f func(s string) string)` | AWS ParameterStore provider, an optional function that takes and returns a string to transform env variables |
+
+
+### Alternative to viper
+
+koanf is a [lightweight](https://github.com/knadh/koanf/blob/master/go.mod) alternative to the popular [spf13/viper](https://github.com/spf13/viper). It was written as a result of multiple stumbling blocks encountered with some of viper's fundamental flaws.
+
+- viper breaks JSON, YAML, TOML, HCL language specs by [forcibly lowercasing keys](https://github.com/spf13/viper/pull/635).
+- Significantly bloats [build sizes](https://github.com/knadh/koanf/wiki/Comparison-with-spf13-viper).
+- Tightly couples config parsing with file extensions.
+- Has poor semantics and abstractions. Commandline, env, file etc. and various parses are hardcoded in the core. There are no primitives that can be extended.
+- Pulls a large number of [third party dependencies](https://github.com/spf13/viper/issues/707) into the core package. For instance, even if you do not use YAML or flags, the dependencies are still pulled as a result of the coupling.
+- Imposes arbitrary ordering conventions (eg: flag -> env -> config etc.)
+- `Get()` returns references to slices and maps. Mutations made outside change the underlying values inside the conf map.
+- Does non-idiomatic things such as [throwing away O(1) on flat maps](https://github.com/spf13/viper/blob/3b4aca75714a37276c4b1883630bd98c02498b73/viper.go#L1524).
+- Viper treats keys that contain an empty map (eg: `my_key: {}`) as if they were not set (ie: `IsSet("my_key") == false`).
+- There are a large number of [open issues](https://github.com/spf13/viper/issues).
diff --git a/vendor/github.com/knadh/koanf/v2/getters.go b/vendor/github.com/knadh/koanf/v2/getters.go
new file mode 100644
index 0000000000..266230f747
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/getters.go
@@ -0,0 +1,649 @@
+package koanf
+
+import (
+ "fmt"
+ "time"
+)
+
+// Int64 returns the int64 value of a given key path or 0 if the path
+// does not exist or if the value is not a valid int64.
+func (ko *Koanf) Int64(path string) int64 {
+ if v := ko.Get(path); v != nil {
+ i, _ := toInt64(v)
+ return i
+ }
+ return 0
+}
+
+// MustInt64 returns the int64 value of a given key path or panics
+// if the value is not set or set to default value of 0.
+func (ko *Koanf) MustInt64(path string) int64 {
+ val := ko.Int64(path)
+ if val == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Int64s returns the []int64 slice value of a given key path or an
+// empty []int64 slice if the path does not exist or if the value
+// is not a valid int slice.
+func (ko *Koanf) Int64s(path string) []int64 {
+ o := ko.Get(path)
+ if o == nil {
+ return []int64{}
+ }
+
+ var out []int64
+ switch v := o.(type) {
+ case []int64:
+ return v
+ case []int:
+ out = make([]int64, 0, len(v))
+ for _, vi := range v {
+ i, err := toInt64(vi)
+
+ // On error, return as it's not a valid
+ // int slice.
+ if err != nil {
+ return []int64{}
+ }
+ out = append(out, i)
+ }
+ return out
+ case []interface{}:
+ out = make([]int64, 0, len(v))
+ for _, vi := range v {
+ i, err := toInt64(vi)
+
+ // On error, return as it's not a valid
+ // int slice.
+ if err != nil {
+ return []int64{}
+ }
+ out = append(out, i)
+ }
+ return out
+ }
+
+ return []int64{}
+}
+
+// MustInt64s returns the []int64 slice value of a given key path or panics
+// if the value is not set or its default value.
+func (ko *Koanf) MustInt64s(path string) []int64 {
+ val := ko.Int64s(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Int64Map returns the map[string]int64 value of a given key path
+// or an empty map[string]int64 if the path does not exist or if the
+// value is not a valid int64 map.
+func (ko *Koanf) Int64Map(path string) map[string]int64 {
+ var (
+ out = map[string]int64{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ mp, ok := o.(map[string]interface{})
+ if !ok {
+ return out
+ }
+
+ out = make(map[string]int64, len(mp))
+ for k, v := range mp {
+ switch i := v.(type) {
+ case int64:
+ out[k] = i
+ default:
+ // Attempt a conversion.
+ iv, err := toInt64(i)
+ if err != nil {
+ return map[string]int64{}
+ }
+ out[k] = iv
+ }
+ }
+ return out
+}
+
+// MustInt64Map returns the map[string]int64 value of a given key path
+// or panics if it isn't set or set to default value.
+func (ko *Koanf) MustInt64Map(path string) map[string]int64 {
+ val := ko.Int64Map(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Int returns the int value of a given key path or 0 if the path
+// does not exist or if the value is not a valid int.
+func (ko *Koanf) Int(path string) int {
+ return int(ko.Int64(path))
+}
+
+// MustInt returns the int value of a given key path or panics
+// if it isn't set or set to default value of 0.
+func (ko *Koanf) MustInt(path string) int {
+ val := ko.Int(path)
+ if val == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Ints returns the []int slice value of a given key path or an
+// empty []int slice if the path does not exist or if the value
+// is not a valid int slice.
+func (ko *Koanf) Ints(path string) []int {
+ o := ko.Get(path)
+ if o == nil {
+ return []int{}
+ }
+
+ var out []int
+ switch v := o.(type) {
+ case []int:
+ return v
+ case []int64:
+ out = make([]int, 0, len(v))
+ for _, vi := range v {
+ out = append(out, int(vi))
+ }
+ return out
+ case []interface{}:
+ out = make([]int, 0, len(v))
+ for _, vi := range v {
+ i, err := toInt64(vi)
+
+ // On error, return as it's not a valid
+ // int slice.
+ if err != nil {
+ return []int{}
+ }
+ out = append(out, int(i))
+ }
+ return out
+ }
+
+ return []int{}
+}
+
+// MustInts returns the []int slice value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustInts(path string) []int {
+ val := ko.Ints(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// IntMap returns the map[string]int value of a given key path
+// or an empty map[string]int if the path does not exist or if the
+// value is not a valid int map.
+func (ko *Koanf) IntMap(path string) map[string]int {
+ var (
+ mp = ko.Int64Map(path)
+ out = make(map[string]int, len(mp))
+ )
+ for k, v := range mp {
+ out[k] = int(v)
+ }
+ return out
+}
+
+// MustIntMap returns the map[string]int value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustIntMap(path string) map[string]int {
+ val := ko.IntMap(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Float64 returns the float64 value of a given key path or 0 if the path
+// does not exist or if the value is not a valid float64.
+func (ko *Koanf) Float64(path string) float64 {
+ if v := ko.Get(path); v != nil {
+ f, _ := toFloat64(v)
+ return f
+ }
+ return 0
+}
+
+// MustFloat64 returns the float64 value of a given key path or panics
+// if it isn't set or set to default value 0.
+func (ko *Koanf) MustFloat64(path string) float64 {
+ val := ko.Float64(path)
+ if val == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Float64s returns the []float64 slice value of a given key path or an
+// empty []float64 slice if the path does not exist or if the value
+// is not a valid float64 slice.
+func (ko *Koanf) Float64s(path string) []float64 {
+ o := ko.Get(path)
+ if o == nil {
+ return []float64{}
+ }
+
+ var out []float64
+ switch v := o.(type) {
+ case []float64:
+ return v
+ case []interface{}:
+ out = make([]float64, 0, len(v))
+ for _, vi := range v {
+ i, err := toFloat64(vi)
+
+ // On error, return as it's not a valid
+ // int slice.
+ if err != nil {
+ return []float64{}
+ }
+ out = append(out, i)
+ }
+ return out
+ }
+
+ return []float64{}
+}
+
+// MustFloat64s returns the []Float64 slice value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustFloat64s(path string) []float64 {
+ val := ko.Float64s(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Float64Map returns the map[string]float64 value of a given key path
+// or an empty map[string]float64 if the path does not exist or if the
+// value is not a valid float64 map.
+func (ko *Koanf) Float64Map(path string) map[string]float64 {
+ var (
+ out = map[string]float64{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ mp, ok := o.(map[string]interface{})
+ if !ok {
+ return out
+ }
+
+ out = make(map[string]float64, len(mp))
+ for k, v := range mp {
+ switch i := v.(type) {
+ case float64:
+ out[k] = i
+ default:
+ // Attempt a conversion.
+ iv, err := toFloat64(i)
+ if err != nil {
+ return map[string]float64{}
+ }
+ out[k] = iv
+ }
+ }
+ return out
+}
+
+// MustFloat64Map returns the map[string]float64 value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustFloat64Map(path string) map[string]float64 {
+ val := ko.Float64Map(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Duration returns the time.Duration value of a given key path assuming
+// that the key contains a valid numeric value.
+func (ko *Koanf) Duration(path string) time.Duration {
+ // Look for a parsable string representation first.
+ if v := ko.Int64(path); v != 0 {
+ return time.Duration(v)
+ }
+
+ v, _ := time.ParseDuration(ko.String(path))
+ return v
+}
+
+// MustDuration returns the time.Duration value of a given key path or panics
+// if it isn't set or set to default value 0.
+func (ko *Koanf) MustDuration(path string) time.Duration {
+ val := ko.Duration(path)
+ if val == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Time attempts to parse the value of a given key path and return time.Time
+// representation. If the value is numeric, it is treated as a UNIX timestamp
+// and if it's string, a parse is attempted with the given layout.
+func (ko *Koanf) Time(path, layout string) time.Time {
+ // Unix timestamp?
+ v := ko.Int64(path)
+ if v != 0 {
+ return time.Unix(v, 0)
+ }
+
+ // String representation.
+ s := ko.String(path)
+ if s != "" {
+ t, _ := time.Parse(layout, s)
+ return t
+ }
+
+ return time.Time{}
+}
+
+// MustTime attempts to parse the value of a given key path and return time.Time
+// representation. If the value is numeric, it is treated as a UNIX timestamp
+// and if it's string, a parse is attempted with the given layout. It panics if
+// the parsed time is zero.
+func (ko *Koanf) MustTime(path, layout string) time.Time {
+ val := ko.Time(path, layout)
+ if val.IsZero() {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// String returns the string value of a given key path or "" if the path
+// does not exist or if the value is not a valid string.
+func (ko *Koanf) String(path string) string {
+ if v := ko.Get(path); v != nil {
+ if i, ok := v.(string); ok {
+ return i
+ }
+ return fmt.Sprintf("%v", v)
+ }
+ return ""
+}
+
+// MustString returns the string value of a given key path
+// or panics if it isn't set or set to default value "".
+func (ko *Koanf) MustString(path string) string {
+ val := ko.String(path)
+ if val == "" {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Strings returns the []string slice value of a given key path or an
+// empty []string slice if the path does not exist or if the value
+// is not a valid string slice.
+func (ko *Koanf) Strings(path string) []string {
+ o := ko.Get(path)
+ if o == nil {
+ return []string{}
+ }
+
+ var out []string
+ switch v := o.(type) {
+ case []interface{}:
+ out = make([]string, 0, len(v))
+ for _, u := range v {
+ if s, ok := u.(string); ok {
+ out = append(out, s)
+ } else {
+ out = append(out, fmt.Sprintf("%v", u))
+ }
+ }
+ return out
+ case []string:
+ out := make([]string, len(v))
+ copy(out, v)
+ return out
+ }
+
+ return []string{}
+}
+
+// MustStrings returns the []string slice value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustStrings(path string) []string {
+ val := ko.Strings(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// StringMap returns the map[string]string value of a given key path
+// or an empty map[string]string if the path does not exist or if the
+// value is not a valid string map.
+func (ko *Koanf) StringMap(path string) map[string]string {
+ var (
+ out = map[string]string{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ switch mp := o.(type) {
+ case map[string]string:
+ out = make(map[string]string, len(mp))
+ for k, v := range mp {
+ out[k] = v
+ }
+ case map[string]interface{}:
+ out = make(map[string]string, len(mp))
+ for k, v := range mp {
+ switch s := v.(type) {
+ case string:
+ out[k] = s
+ default:
+ // There's a non string type. Return.
+ return map[string]string{}
+ }
+ }
+ }
+
+ return out
+}
+
+// MustStringMap returns the map[string]string value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustStringMap(path string) map[string]string {
+ val := ko.StringMap(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// StringsMap returns the map[string][]string value of a given key path
+// or an empty map[string][]string if the path does not exist or if the
+// value is not a valid strings map.
+func (ko *Koanf) StringsMap(path string) map[string][]string {
+ var (
+ out = map[string][]string{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ switch mp := o.(type) {
+ case map[string][]string:
+ out = make(map[string][]string, len(mp))
+ for k, v := range mp {
+ out[k] = append(out[k], v...)
+ }
+ case map[string][]interface{}:
+ out = make(map[string][]string, len(mp))
+ for k, v := range mp {
+ for _, v := range v {
+ switch sv := v.(type) {
+ case string:
+ out[k] = append(out[k], sv)
+ default:
+ return map[string][]string{}
+ }
+ }
+ }
+ case map[string]interface{}:
+ out = make(map[string][]string, len(mp))
+ for k, v := range mp {
+ switch s := v.(type) {
+ case []string:
+ out[k] = append(out[k], s...)
+ case []interface{}:
+ for _, v := range s {
+ switch sv := v.(type) {
+ case string:
+ out[k] = append(out[k], sv)
+ default:
+ return map[string][]string{}
+ }
+ }
+ default:
+ // There's a non []interface type. Return.
+ return map[string][]string{}
+ }
+ }
+ }
+
+ return out
+}
+
+// MustStringsMap returns the map[string][]string value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustStringsMap(path string) map[string][]string {
+ val := ko.StringsMap(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Bytes returns the []byte value of a given key path or an empty
+// []byte slice if the path does not exist or if the value is not a valid string.
+func (ko *Koanf) Bytes(path string) []byte {
+ return []byte(ko.String(path))
+}
+
+// MustBytes returns the []byte value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustBytes(path string) []byte {
+ val := ko.Bytes(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// Bool returns the bool value of a given key path or false if the path
+// does not exist or if the value is not a valid bool representation.
+// Accepted string representations of bool are the ones supported by strconv.ParseBool.
+func (ko *Koanf) Bool(path string) bool {
+ if v := ko.Get(path); v != nil {
+ b, _ := toBool(v)
+ return b
+ }
+ return false
+}
+
+// Bools returns the []bool slice value of a given key path or an
+// empty []bool slice if the path does not exist or if the value
+// is not a valid bool slice.
+func (ko *Koanf) Bools(path string) []bool {
+ o := ko.Get(path)
+ if o == nil {
+ return []bool{}
+ }
+
+ var out []bool
+ switch v := o.(type) {
+ case []interface{}:
+ out = make([]bool, 0, len(v))
+ for _, u := range v {
+ b, err := toBool(u)
+ if err != nil {
+ return nil
+ }
+ out = append(out, b)
+ }
+ return out
+ case []bool:
+ return out
+ }
+ return nil
+}
+
+// MustBools returns the []bool value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustBools(path string) []bool {
+ val := ko.Bools(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
+
+// BoolMap returns the map[string]bool value of a given key path
+// or an empty map[string]bool if the path does not exist or if the
+// value is not a valid bool map.
+func (ko *Koanf) BoolMap(path string) map[string]bool {
+ var (
+ out = map[string]bool{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ mp, ok := o.(map[string]interface{})
+ if !ok {
+ return out
+ }
+ out = make(map[string]bool, len(mp))
+ for k, v := range mp {
+ switch i := v.(type) {
+ case bool:
+ out[k] = i
+ default:
+ // Attempt a conversion.
+ b, err := toBool(i)
+ if err != nil {
+ return map[string]bool{}
+ }
+ out[k] = b
+ }
+ }
+
+ return out
+}
+
+// MustBoolMap returns the map[string]bool value of a given key path or panics
+// if the value is not set or set to default value.
+func (ko *Koanf) MustBoolMap(path string) map[string]bool {
+ val := ko.BoolMap(path)
+ if len(val) == 0 {
+ panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+ }
+ return val
+}
diff --git a/vendor/github.com/knadh/koanf/v2/go.work b/vendor/github.com/knadh/koanf/v2/go.work
new file mode 100644
index 0000000000..b5337e02c9
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/go.work
@@ -0,0 +1,31 @@
+go 1.18
+
+use (
+ .
+ ./examples
+ ./maps
+ ./parsers/dotenv
+ ./parsers/hcl
+ ./parsers/hjson
+ ./parsers/json
+ ./parsers/kdl
+ ./parsers/nestedtext
+ ./parsers/toml
+ ./parsers/yaml
+ ./providers/appconfig
+ ./providers/basicflag
+ ./providers/confmap
+ ./providers/consul
+ ./providers/env
+ ./providers/etcd
+ ./providers/file
+ ./providers/fs
+ ./providers/nats
+ ./providers/parameterstore
+ ./providers/posflag
+ ./providers/rawbytes
+ ./providers/s3
+ ./providers/structs
+ ./providers/vault
+ ./tests
+)
diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum
new file mode 100644
index 0000000000..eab7bcf845
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/go.work.sum
@@ -0,0 +1,154 @@
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/knadh/koanf/v2/interfaces.go b/vendor/github.com/knadh/koanf/v2/interfaces.go
new file mode 100644
index 0000000000..ba69a2443a
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/interfaces.go
@@ -0,0 +1,20 @@
+package koanf
+
+// Provider represents a configuration provider. Providers can
+// read configuration from a source (file, HTTP etc.)
+type Provider interface {
+ // ReadBytes returns the entire configuration as raw []bytes to be parsed.
+ // with a Parser.
+ ReadBytes() ([]byte, error)
+
+ // Read returns the parsed configuration as a nested map[string]interface{}.
+ // It is important to note that the string keys should not be flat delimited
+ // keys like `parent.child.key`, but nested like `{parent: {child: {key: 1}}}`.
+ Read() (map[string]interface{}, error)
+}
+
+// Parser represents a configuration format parser.
+type Parser interface {
+ Unmarshal([]byte) (map[string]interface{}, error)
+ Marshal(map[string]interface{}) ([]byte, error)
+}
diff --git a/vendor/github.com/knadh/koanf/v2/koanf.go b/vendor/github.com/knadh/koanf/v2/koanf.go
new file mode 100644
index 0000000000..bd06a2d7e5
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/koanf.go
@@ -0,0 +1,577 @@
+package koanf
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+
+ "github.com/knadh/koanf/maps"
+ "github.com/mitchellh/copystructure"
+ "github.com/go-viper/mapstructure/v2"
+)
+
+// Koanf is the configuration apparatus.
+type Koanf struct {
+ confMap map[string]interface{}
+ confMapFlat map[string]interface{}
+ keyMap KeyMap
+ conf Conf
+}
+
+// Conf is the Koanf configuration.
+type Conf struct {
+ // Delim is the delimiter to use
+ // when specifying config key paths, for instance a . for `parent.child.key`
+ // or a / for `parent/child/key`.
+ Delim string
+
+ // StrictMerge makes the merging behavior strict.
+ // Meaning when loading two files that have the same key,
+ // the first loaded file will define the desired type, and if the second file loads
+ // a different type will cause an error.
+ StrictMerge bool
+}
+
+// KeyMap represents a map of flattened delimited keys and the non-delimited
+// parts as their slices. For nested keys, the map holds all levels of path combinations.
+// For example, the nested structure `parent -> child -> key` will produce the map:
+// parent.child.key => [parent, child, key]
+// parent.child => [parent, child]
+// parent => [parent]
+type KeyMap map[string][]string
+
+// UnmarshalConf represents configuration options used by
+// Unmarshal() to unmarshal conf maps into arbitrary structs.
+type UnmarshalConf struct {
+ // Tag is the struct field tag to unmarshal.
+ // `koanf` is used if left empty.
+ Tag string
+
+ // If this is set to true, instead of unmarshalling nested structures
+ // based on the key path, keys are taken literally to unmarshal into
+ // a flat struct. For example:
+ // ```
+ // type MyStuff struct {
+ // Child1Name string `koanf:"parent1.child1.name"`
+ // Child2Name string `koanf:"parent2.child2.name"`
+ // Type string `koanf:"json"`
+ // }
+ // ```
+ FlatPaths bool
+ DecoderConfig *mapstructure.DecoderConfig
+}
+
+// New returns a new instance of Koanf. delim is the delimiter to use
+// when specifying config key paths, for instance a . for `parent.child.key`
+// or a / for `parent/child/key`.
+func New(delim string) *Koanf {
+ return NewWithConf(Conf{
+ Delim: delim,
+ StrictMerge: false,
+ })
+}
+
+// NewWithConf returns a new instance of Koanf based on the Conf.
+func NewWithConf(conf Conf) *Koanf {
+ return &Koanf{
+ confMap: make(map[string]interface{}),
+ confMapFlat: make(map[string]interface{}),
+ keyMap: make(KeyMap),
+ conf: conf,
+ }
+}
+
+// Load takes a Provider that either provides a parsed config map[string]interface{}
+// in which case pa (Parser) can be nil, or raw bytes to be parsed, where a Parser
+// can be provided to parse. Additionally, options can be passed which modify the
+// load behavior, such as passing a custom merge function.
+func (ko *Koanf) Load(p Provider, pa Parser, opts ...Option) error {
+ var (
+ mp map[string]interface{}
+ err error
+ )
+
+ if p == nil {
+ return fmt.Errorf("load received a nil provider")
+ }
+
+ // No Parser is given. Call the Provider's Read() method to get
+ // the config map.
+ if pa == nil {
+ mp, err = p.Read()
+ if err != nil {
+ return err
+ }
+ } else {
+ // There's a Parser. Get raw bytes from the Provider to parse.
+ b, err := p.ReadBytes()
+ if err != nil {
+ return err
+ }
+ mp, err = pa.Unmarshal(b)
+ if err != nil {
+ return err
+ }
+ }
+
+ return ko.merge(mp, newOptions(opts))
+}
+
+// Keys returns the slice of all flattened keys in the loaded configuration
+// sorted alphabetically.
+func (ko *Koanf) Keys() []string {
+ out := make([]string, 0, len(ko.confMapFlat))
+ for k := range ko.confMapFlat {
+ out = append(out, k)
+ }
+ sort.Strings(out)
+ return out
+}
+
+// KeyMap returns a map of flattened keys and the individual parts of the
+// key as slices. eg: "parent.child.key" => ["parent", "child", "key"].
+func (ko *Koanf) KeyMap() KeyMap {
+ out := make(KeyMap, len(ko.keyMap))
+ for key, parts := range ko.keyMap {
+ out[key] = make([]string, len(parts))
+ copy(out[key], parts)
+ }
+ return out
+}
+
+// All returns a map of all flattened key paths and their values.
+// Note that it uses maps.Copy to create a copy that uses
+// json.Marshal which changes the numeric types to float64.
+func (ko *Koanf) All() map[string]interface{} {
+ return maps.Copy(ko.confMapFlat)
+}
+
+// Raw returns a copy of the full raw conf map.
+// Note that it uses maps.Copy to create a copy that uses
+// json.Marshal which changes the numeric types to float64.
+func (ko *Koanf) Raw() map[string]interface{} {
+ return maps.Copy(ko.confMap)
+}
+
+// Sprint returns a key -> value string representation
+// of the config map with keys sorted alphabetically.
+func (ko *Koanf) Sprint() string {
+ b := bytes.Buffer{}
+ for _, k := range ko.Keys() {
+ b.WriteString(fmt.Sprintf("%s -> %v\n", k, ko.confMapFlat[k]))
+ }
+ return b.String()
+}
+
+// Print prints a key -> value string representation
+// of the config map with keys sorted alphabetically.
+func (ko *Koanf) Print() {
+ fmt.Print(ko.Sprint())
+}
+
+// Cut cuts the config map at a given key path into a sub map and
+// returns a new Koanf instance with the cut config map loaded.
+// For instance, if the loaded config has a path that looks like
+// parent.child.sub.a.b, `Cut("parent.child")` returns a new Koanf
+// instance with the config map `sub.a.b` where everything above
+// `parent.child` are cut out.
+func (ko *Koanf) Cut(path string) *Koanf {
+ out := make(map[string]interface{})
+
+ // Cut only makes sense if the requested key path is a map.
+ if v, ok := ko.Get(path).(map[string]interface{}); ok {
+ out = v
+ }
+
+ n := New(ko.conf.Delim)
+ _ = n.merge(out, new(options))
+ return n
+}
+
+// Copy returns a copy of the Koanf instance.
+func (ko *Koanf) Copy() *Koanf {
+ return ko.Cut("")
+}
+
+// Merge merges the config map of a given Koanf instance into
+// the current instance.
+func (ko *Koanf) Merge(in *Koanf) error {
+ return ko.merge(in.Raw(), new(options))
+}
+
+// MergeAt merges the config map of a given Koanf instance into
+// the current instance as a sub map, at the given key path.
+// If all or part of the key path is missing, it will be created.
+// If the key path is `""`, this is equivalent to Merge.
+func (ko *Koanf) MergeAt(in *Koanf, path string) error {
+ // No path. Merge the two config maps.
+ if path == "" {
+ return ko.Merge(in)
+ }
+
+ // Unflatten the config map with the given key path.
+ n := maps.Unflatten(map[string]interface{}{
+ path: in.Raw(),
+ }, ko.conf.Delim)
+
+ return ko.merge(n, new(options))
+}
+
+// Set sets the value at a specific key.
+func (ko *Koanf) Set(key string, val interface{}) error {
+ // Unflatten the config map with the given key path.
+ n := maps.Unflatten(map[string]interface{}{
+ key: val,
+ }, ko.conf.Delim)
+
+ return ko.merge(n, new(options))
+}
+
+// Marshal takes a Parser implementation and marshals the config map into bytes,
+// for example, to TOML or JSON bytes.
+func (ko *Koanf) Marshal(p Parser) ([]byte, error) {
+ return p.Marshal(ko.Raw())
+}
+
+// Unmarshal unmarshals a given key path into the given struct using
+// the mapstructure lib. If no path is specified, the whole map is unmarshalled.
+// `koanf` is the struct field tag used to match field names. To customize,
+// use UnmarshalWithConf(). It uses the mitchellh/mapstructure package.
+func (ko *Koanf) Unmarshal(path string, o interface{}) error {
+ return ko.UnmarshalWithConf(path, o, UnmarshalConf{})
+}
+
+// UnmarshalWithConf is like Unmarshal but takes configuration params in UnmarshalConf.
+// See mitchellh/mapstructure's DecoderConfig for advanced customization
+// of the unmarshal behaviour.
+func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf) error {
+ if c.DecoderConfig == nil {
+ c.DecoderConfig = &mapstructure.DecoderConfig{
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ textUnmarshalerHookFunc()),
+ Metadata: nil,
+ Result: o,
+ WeaklyTypedInput: true,
+ }
+ }
+
+ if c.Tag == "" {
+ c.DecoderConfig.TagName = "koanf"
+ } else {
+ c.DecoderConfig.TagName = c.Tag
+ }
+
+ d, err := mapstructure.NewDecoder(c.DecoderConfig)
+ if err != nil {
+ return err
+ }
+
+ // Unmarshal using flat key paths.
+ mp := ko.Get(path)
+ if c.FlatPaths {
+ if f, ok := mp.(map[string]interface{}); ok {
+ fmp, _ := maps.Flatten(f, nil, ko.conf.Delim)
+ mp = fmp
+ }
+ }
+
+ return d.Decode(mp)
+}
+
+// Delete removes all nested values from a given path.
+// Clears all keys/values if no path is specified.
+// Every empty, key on the path, is recursively deleted.
+func (ko *Koanf) Delete(path string) {
+ // No path. Erase the entire map.
+ if path == "" {
+ ko.confMap = make(map[string]interface{})
+ ko.confMapFlat = make(map[string]interface{})
+ ko.keyMap = make(KeyMap)
+ return
+ }
+
+ // Does the path exist?
+ p, ok := ko.keyMap[path]
+ if !ok {
+ return
+ }
+ maps.Delete(ko.confMap, p)
+
+ // Update the flattened version as well.
+ ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim)
+ ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim)
+}
+
+// Get returns the raw, uncast interface{} value of a given key path
+// in the config map. If the key path does not exist, nil is returned.
+func (ko *Koanf) Get(path string) interface{} {
+ // No path. Return the whole conf map.
+ if path == "" {
+ return ko.Raw()
+ }
+
+ // Does the path exist?
+ p, ok := ko.keyMap[path]
+ if !ok {
+ return nil
+ }
+ res := maps.Search(ko.confMap, p)
+
+ // Non-reference types are okay to return directly.
+ // Other types are "copied" with maps.Copy or json.Marshal
+ // that change the numeric types to float64.
+
+ switch v := res.(type) {
+ case int, int8, int16, int32, int64, float32, float64, string, bool:
+ return v
+ case map[string]interface{}:
+ return maps.Copy(v)
+ }
+
+ out, _ := copystructure.Copy(&res)
+ if ptrOut, ok := out.(*interface{}); ok {
+ return *ptrOut
+ }
+ return out
+}
+
+// Slices returns a list of Koanf instances constructed out of a
+// []map[string]interface{} interface at the given path.
+func (ko *Koanf) Slices(path string) []*Koanf {
+ out := []*Koanf{}
+ if path == "" {
+ return out
+ }
+
+ // Does the path exist?
+ sl, ok := ko.Get(path).([]interface{})
+ if !ok {
+ return out
+ }
+
+ for _, s := range sl {
+ mp, ok := s.(map[string]interface{})
+ if !ok {
+ continue
+ }
+
+ k := New(ko.conf.Delim)
+ _ = k.merge(mp, new(options))
+ out = append(out, k)
+ }
+
+ return out
+}
+
+// Exists returns true if the given key path exists in the conf map.
+func (ko *Koanf) Exists(path string) bool {
+ _, ok := ko.keyMap[path]
+ return ok
+}
+
+// MapKeys returns a sorted string list of keys in a map addressed by the
+// given path. If the path is not a map, an empty string slice is
+// returned.
+func (ko *Koanf) MapKeys(path string) []string {
+ var (
+ out = []string{}
+ o = ko.Get(path)
+ )
+ if o == nil {
+ return out
+ }
+
+ mp, ok := o.(map[string]interface{})
+ if !ok {
+ return out
+ }
+ out = make([]string, 0, len(mp))
+ for k := range mp {
+ out = append(out, k)
+ }
+ sort.Strings(out)
+ return out
+}
+
+// Delim returns delimiter in used by this instance of Koanf.
+func (ko *Koanf) Delim() string {
+ return ko.conf.Delim
+}
+
+func (ko *Koanf) merge(c map[string]interface{}, opts *options) error {
+ maps.IntfaceKeysToStrings(c)
+ if opts.merge != nil {
+ if err := opts.merge(c, ko.confMap); err != nil {
+ return err
+ }
+ } else if ko.conf.StrictMerge {
+ if err := maps.MergeStrict(c, ko.confMap); err != nil {
+ return err
+ }
+ } else {
+ maps.Merge(c, ko.confMap)
+ }
+
+ // Maintain a flattened version as well.
+ ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim)
+ ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim)
+
+ return nil
+}
+
+// toInt64 takes an interface value and if it is an integer type,
+// converts and returns int64. If it's any other type,
+// forces it to a string and attempts to do a strconv.Atoi
+// to get an integer out.
+func toInt64(v interface{}) (int64, error) {
+ switch i := v.(type) {
+ case int:
+ return int64(i), nil
+ case int8:
+ return int64(i), nil
+ case int16:
+ return int64(i), nil
+ case int32:
+ return int64(i), nil
+ case int64:
+ return i, nil
+ }
+
+ // Force it to a string and try to convert.
+ f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return int64(f), nil
+}
+
+// toInt64 takes a `v interface{}` value and if it is a float type,
+// converts and returns a `float64`. If it's any other type, forces it to a
+// string and attempts to get a float out using `strconv.ParseFloat`.
+func toFloat64(v interface{}) (float64, error) {
+ switch i := v.(type) {
+ case float32:
+ return float64(i), nil
+ case float64:
+ return i, nil
+ }
+
+ // Force it to a string and try to convert.
+ f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64)
+ if err != nil {
+ return f, err
+ }
+
+ return f, nil
+}
+
+// toBool takes an interface value and if it is a bool type,
+// returns it. If it's any other type, forces it to a string and attempts
+// to parse it as a bool using strconv.ParseBool.
+func toBool(v interface{}) (bool, error) {
+ if b, ok := v.(bool); ok {
+ return b, nil
+ }
+
+ // Force it to a string and try to convert.
+ b, err := strconv.ParseBool(fmt.Sprintf("%v", v))
+ if err != nil {
+ return b, err
+ }
+ return b, nil
+}
+
+// populateKeyParts iterates a key map and generates all possible
+// traversal paths. For instance, `parent.child.key` generates
+// `parent`, and `parent.child`.
+func populateKeyParts(m KeyMap, delim string) KeyMap {
+ out := make(KeyMap, len(m)) // The size of the result is at very least same to KeyMap
+ for _, parts := range m {
+ // parts is a slice of [parent, child, key]
+ var nk string
+
+ for i := range parts {
+ if i == 0 {
+ // On first iteration only use first part
+ nk = parts[i]
+ } else {
+ // If nk already contains a part (e.g. `parent`) append delim + `child`
+ nk += delim + parts[i]
+ }
+ if _, ok := out[nk]; ok {
+ continue
+ }
+ out[nk] = make([]string, i+1)
+ copy(out[nk], parts[0:i+1])
+ }
+ }
+ return out
+}
+
+// textUnmarshalerHookFunc is a fixed version of mapstructure.TextUnmarshallerHookFunc.
+// This hook allows to additionally unmarshal text into custom string types that implement the encoding.Text(Un)Marshaler interface(s).
+func textUnmarshalerHookFunc() mapstructure.DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{},
+ ) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+
+ // default text representation is the actual value of the `from` string
+ var (
+ dataVal = reflect.ValueOf(data)
+ text = []byte(dataVal.String())
+ )
+ if f.Kind() == t.Kind() {
+ // source and target are of underlying type string
+ var (
+ err error
+ ptrVal = reflect.New(dataVal.Type())
+ )
+ if !ptrVal.Elem().CanSet() {
+ // cannot set, skip, this should not happen
+ if err := unmarshaller.UnmarshalText(text); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+ ptrVal.Elem().Set(dataVal)
+
+ // We need to assert that both, the value type and the pointer type
+ // do (not) implement the TextMarshaller interface before proceeding and simply
+ // using the string value of the string type.
+ // it might be the case that the internal string representation differs from
+ // the (un)marshalled string.
+
+ for _, v := range []reflect.Value{dataVal, ptrVal} {
+ if marshaller, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err = marshaller.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ break
+ }
+ }
+ }
+
+ // text is either the source string's value or the source string type's marshaled value
+ // which may differ from its internal string value.
+ if err := unmarshaller.UnmarshalText(text); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
diff --git a/vendor/github.com/knadh/koanf/v2/options.go b/vendor/github.com/knadh/koanf/v2/options.go
new file mode 100644
index 0000000000..63cea203e6
--- /dev/null
+++ b/vendor/github.com/knadh/koanf/v2/options.go
@@ -0,0 +1,33 @@
+package koanf
+
+// options contains options to modify the behavior of Koanf.Load.
+type options struct {
+ merge func(a, b map[string]interface{}) error
+}
+
+// newOptions creates a new options instance.
+func newOptions(opts []Option) *options {
+ o := new(options)
+ o.apply(opts)
+ return o
+}
+
+// Option is a generic type used to modify the behavior of Koanf.Load.
+type Option func(*options)
+
+// apply the given options.
+func (o *options) apply(opts []Option) {
+ for _, opt := range opts {
+ opt(o)
+ }
+}
+
+// WithMergeFunc is an option to modify the merge behavior of Koanf.Load.
+// If unset, the default merge function is used.
+//
+// The merge function is expected to merge map src into dest (left to right).
+func WithMergeFunc(merge func(src, dest map[string]interface{}) error) Option {
+ return func(o *options) {
+ o.merge = merge
+ }
+}
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 9831c37baf..0e42858aed 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -86,7 +86,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://linuxcontainers.org/incus/
* https://ifconfig.es
* https://github.com/zmap/zdns
-
+* https://framagit.org/bortzmeyer/check-soa
Send pull request if you want to be listed here.
@@ -193,6 +193,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 9460 - Service Binding and Parameter Specification via the DNS
* 9461 - Service Binding Mapping for DNS Servers
* 9462 - Discovery of Designated Resolvers
+* 9460 - SVCB and HTTPS Records
+* 9606 - DNS Resolver Information
+* Draft - Compact Denial of Existence in DNSSEC
## Loosely Based Upon
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 0447fd826a..91793b9069 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -27,6 +27,7 @@ const (
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK
+ _CO = 1 << 14 // Compact Answers OK
)
// makeDataOpt is used to unpack the EDNS0 option(s) from a message.
@@ -75,7 +76,11 @@ type OPT struct {
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
- s += "flags: do; "
+ if rr.Co() {
+ s += "flags: do, co; "
+ } else {
+ s += "flags: do; "
+ }
} else {
s += "flags:; "
}
@@ -195,14 +200,34 @@ func (rr *OPT) SetDo(do ...bool) {
}
}
-// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
+// Co returns the value of the CO (Compact Answers OK) bit.
+func (rr *OPT) Co() bool {
+ return rr.Hdr.Ttl&_CO == _CO
+}
+
+// SetCo sets the CO (Compact Answers OK) bit.
+// If we pass an argument, set the CO bit to that value.
+// It is possible to pass 2 or more arguments, but they will be ignored.
+func (rr *OPT) SetCo(co ...bool) {
+ if len(co) == 1 {
+ if co[0] {
+ rr.Hdr.Ttl |= _CO
+ } else {
+ rr.Hdr.Ttl &^= _CO
+ }
+ } else {
+ rr.Hdr.Ttl |= _CO
+ }
+}
+
+// Z returns the Z part of the OPT RR as a uint16 with only the 14 least significant bits used.
func (rr *OPT) Z() uint16 {
- return uint16(rr.Hdr.Ttl & 0x7FFF)
+ return uint16(rr.Hdr.Ttl & 0x3FFF)
}
-// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
+// SetZ sets the Z part of the OPT RR, note only the 14 least significant bits of z are used.
func (rr *OPT) SetZ(z uint16) {
- rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
+ rr.Hdr.Ttl = rr.Hdr.Ttl&^0x3FFF | uint32(z&0x3FFF)
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index e26e8027a4..fa8a332eda 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -108,6 +108,8 @@ type ttlState struct {
// origin for resolving relative domain names defaults to the DNS root (.).
// Full zone file syntax is supported, including directives like $TTL and $ORIGIN.
// All fields of the returned RR are set from the read data, except RR.Header().Rdlength which is set to 0.
+// Is you need a partial resource record with no rdata - for instance - for dynamic updates, see the [ANY]
+// documentation.
func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index c1a76995e7..ac885f66fe 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -1620,6 +1620,16 @@ func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
return nil
}
+// Uses the same format as TXT
+func (rr *RESINFO) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToTxtSlice(c, "bad RESINFO Resinfo")
+ if e != nil {
+ return e
+ }
+ rr.Txt = s
+ return nil
+}
+
func (rr *URI) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go
index 310c7d11f5..d1baeea992 100644
--- a/vendor/github.com/miekg/dns/svcb.go
+++ b/vendor/github.com/miekg/dns/svcb.go
@@ -214,11 +214,7 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue {
}
}
-// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08).
-//
-// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
-// The API, including constants and types related to SVCBKeyValues, may
-// change in future versions in accordance with the latest drafts.
+// SVCB RR. See RFC 9460.
type SVCB struct {
Hdr RR_Header
Priority uint16 // If zero, Value must be empty or discarded by the user of this library
@@ -226,12 +222,8 @@ type SVCB struct {
Value []SVCBKeyValue `dns:"pairs"`
}
-// HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
+// HTTPS RR. See RFC 9460. Everything valid for SVCB applies to HTTPS as well.
// Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols.
-//
-// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
-// The API, including constants and types related to SVCBKeyValues, may
-// change in future versions in accordance with the latest drafts.
type HTTPS struct {
SVCB
}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index 7a34c14ca0..e39cf2fecf 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -101,6 +101,7 @@ const (
TypeCAA uint16 = 257
TypeAVC uint16 = 258
TypeAMTRELAY uint16 = 260
+ TypeRESINFO uint16 = 261
TypeTKEY uint16 = 249
TypeTSIG uint16 = 250
@@ -267,11 +268,20 @@ func (q *Question) String() (s string) {
return s
}
-// ANY is a wild card record. See RFC 1035, Section 3.2.3. ANY
-// is named "*" there.
+// ANY is a wild card record. See RFC 1035, Section 3.2.3. ANY is named "*" there.
+// The ANY records can be (ab)used to create resource records without any rdata, that
+// can be used in dynamic update requests. Basic use pattern:
+//
+// a := &ANY{RR_Header{
+// Name: "example.org.",
+// Rrtype: TypeA,
+// Class: ClassINET,
+// }}
+//
+// Results in an A record without rdata.
type ANY struct {
Hdr RR_Header
- // Does not have any rdata
+ // Does not have any rdata.
}
func (rr *ANY) String() string { return rr.Hdr.String() }
@@ -1508,6 +1518,15 @@ func (rr *ZONEMD) String() string {
" " + rr.Digest
}
+// RESINFO RR. See RFC 9606.
+
+type RESINFO struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *RESINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
// APL RR. See RFC 3123.
type APL struct {
Hdr RR_Header
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
index c018ad43da..d226718595 100644
--- a/vendor/github.com/miekg/dns/udp.go
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -1,5 +1,5 @@
-//go:build !windows
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package dns
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_no_control.go
similarity index 85%
rename from vendor/github.com/miekg/dns/udp_windows.go
rename to vendor/github.com/miekg/dns/udp_no_control.go
index a259b67e4d..ca3d4a633b 100644
--- a/vendor/github.com/miekg/dns/udp_windows.go
+++ b/vendor/github.com/miekg/dns/udp_no_control.go
@@ -1,9 +1,11 @@
-//go:build windows
-// +build windows
+//go:build windows || darwin
+// +build windows darwin
// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and
// go.dev/issue/7174 are ever fixed.
+// NOTICE(stek29): darwin supports PKTINFO in sendmsg, but it unbinds sockets, see https://github.com/miekg/dns/issues/724
+
package dns
import "net"
diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go
index 16f9ee85a5..2fef1461f5 100644
--- a/vendor/github.com/miekg/dns/update.go
+++ b/vendor/github.com/miekg/dns/update.go
@@ -2,6 +2,7 @@ package dns
// NameUsed sets the RRs in the prereq section to
// "Name is in use" RRs. RFC 2136 section 2.4.4.
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) NameUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
@@ -41,6 +42,7 @@ func (u *Msg) Used(rr []RR) {
// RRsetUsed sets the RRs in the prereq section to
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) RRsetUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
@@ -53,6 +55,7 @@ func (u *Msg) RRsetUsed(rr []RR) {
// RRsetNotUsed sets the RRs in the prereq section to
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) RRsetNotUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
@@ -64,6 +67,7 @@ func (u *Msg) RRsetNotUsed(rr []RR) {
}
// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) Insert(rr []RR) {
if len(u.Question) == 0 {
panic("dns: empty question section")
@@ -78,6 +82,7 @@ func (u *Msg) Insert(rr []RR) {
}
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) RemoveRRset(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
@@ -89,6 +94,7 @@ func (u *Msg) RemoveRRset(rr []RR) {
}
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) RemoveName(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
@@ -99,6 +105,7 @@ func (u *Msg) RemoveName(rr []RR) {
}
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
+// See [ANY] on how to make RRs without rdata.
func (u *Msg) Remove(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index e290e3dff7..73e34edc31 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 63}
+var Version = v{1, 1, 65}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
index 330c05395f..ebd9e02970 100644
--- a/vendor/github.com/miekg/dns/zduplicate.go
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -957,6 +957,23 @@ func (r1 *PX) isDuplicate(_r2 RR) bool {
return true
}
+func (r1 *RESINFO) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RESINFO)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.Txt) != len(r2.Txt) {
+ return false
+ }
+ for i := 0; i < len(r1.Txt); i++ {
+ if r1.Txt[i] != r2.Txt[i] {
+ return false
+ }
+ }
+ return true
+}
+
func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RFC3597)
if !ok {
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index 5a6cf4c6ad..cc09810fb1 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -762,6 +762,14 @@ func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress boo
return off, nil
}
+func (rr *RESINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packStringHex(rr.Rdata, msg, off)
if err != nil {
@@ -2353,6 +2361,17 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil
}
+func (rr *RESINFO) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 11f13ecf9c..cea79ae772 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -66,6 +66,7 @@ var TypeToRR = map[uint16]func() RR{
TypeOPT: func() RR { return new(OPT) },
TypePTR: func() RR { return new(PTR) },
TypePX: func() RR { return new(PX) },
+ TypeRESINFO: func() RR { return new(RESINFO) },
TypeRKEY: func() RR { return new(RKEY) },
TypeRP: func() RR { return new(RP) },
TypeRRSIG: func() RR { return new(RRSIG) },
@@ -154,6 +155,7 @@ var TypeToString = map[uint16]string{
TypeOPT: "OPT",
TypePTR: "PTR",
TypePX: "PX",
+ TypeRESINFO: "RESINFO",
TypeRKEY: "RKEY",
TypeRP: "RP",
TypeRRSIG: "RRSIG",
@@ -238,6 +240,7 @@ func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
+func (rr *RESINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
@@ -622,6 +625,14 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
return l
}
+func (rr *RESINFO) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+
func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Rdata) / 2
@@ -1148,6 +1159,10 @@ func (rr *PX) copy() RR {
}
}
+func (rr *RESINFO) copy() RR {
+ return &RESINFO{rr.Hdr, cloneSlice(rr.Txt)}
+}
+
func (rr *RFC3597) copy() RR {
return &RFC3597{rr.Hdr, rr.Rdata}
}
diff --git a/vendor/github.com/oklog/ulid/v2/.gitignore b/vendor/github.com/oklog/ulid/v2/.gitignore
new file mode 100644
index 0000000000..c92c4d5608
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/.gitignore
@@ -0,0 +1,29 @@
+#### joe made this: http://goel.io/joe
+
+#####=== Go ===#####
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
diff --git a/vendor/github.com/oklog/ulid/v2/AUTHORS.md b/vendor/github.com/oklog/ulid/v2/AUTHORS.md
new file mode 100644
index 0000000000..95581c78b0
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/AUTHORS.md
@@ -0,0 +1,2 @@
+- Peter Bourgon (@peterbourgon)
+- Tomás Senart (@tsenart)
diff --git a/vendor/github.com/oklog/ulid/v2/CHANGELOG.md b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md
new file mode 100644
index 0000000000..8da38c6b00
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md
@@ -0,0 +1,33 @@
+## 1.3.1 / 2018-10-02
+
+* Use underlying entropy source for random increments in Monotonic (#32)
+
+## 1.3.0 / 2018-09-29
+
+* Monotonic entropy support (#31)
+
+## 1.2.0 / 2018-09-09
+
+* Add a function to convert Unix time in milliseconds back to time.Time (#30)
+
+## 1.1.0 / 2018-08-15
+
+* Ensure random part is always read from the entropy reader in full (#28)
+
+## 1.0.0 / 2018-07-29
+
+* Add ParseStrict and MustParseStrict functions (#26)
+* Enforce overflow checking when parsing (#20)
+
+## 0.3.0 / 2017-01-03
+
+* Implement ULID.Compare method
+
+## 0.2.0 / 2016-12-13
+
+* Remove year 2262 Timestamp bug. (#1)
+* Gracefully handle invalid encodings when parsing.
+
+## 0.1.0 / 2016-12-06
+
+* First ULID release
diff --git a/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..68f03f26eb
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+We use GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first propose your ideas
+ in a Github issue. This will avoid unnecessary work and surely give
+ you and us a good deal of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/oklog/ulid/v2/LICENSE b/vendor/github.com/oklog/ulid/v2/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/oklog/ulid/v2/README.md b/vendor/github.com/oklog/ulid/v2/README.md
new file mode 100644
index 0000000000..c0094ce881
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/README.md
@@ -0,0 +1,234 @@
+# Universally Unique Lexicographically Sortable Identifier
+
+[](https://github.com/oklog/ulid/releases/latest)
+
+[](https://goreportcard.com/report/oklog/ulid)
+[](https://coveralls.io/github/oklog/ulid?branch=master)
+[](https://pkg.go.dev/github.com/oklog/ulid/v2)
+[](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE)
+
+A Go port of [ulid/javascript](https://github.com/ulid/javascript) with binary format implemented.
+
+## Background
+
+A GUID/UUID can be suboptimal for many use-cases because:
+
+- It isn't the most character efficient way of encoding 128 bits
+- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address
+- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures
+- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures
+
+A ULID however:
+
+- Is compatible with UUID/GUID's
+- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact)
+- Lexicographically sortable
+- Canonically encoded as a 26 character string, as opposed to the 36 character UUID
+- Uses Crockford's base32 for better efficiency and readability (5 bits per character)
+- Case insensitive
+- No special characters (URL safe)
+- Monotonic sort order (correctly detects and handles the same millisecond)
+
+## Install
+
+This package requires Go modules.
+
+```shell
+go get github.com/oklog/ulid/v2
+```
+
+## Usage
+
+ULIDs are constructed from two things: a timestamp with millisecond precision,
+and some random data.
+
+Timestamps are modeled as uint64 values representing a Unix time in milliseconds.
+They can be produced by passing a [time.Time](https://pkg.go.dev/time#Time) to
+[ulid.Timestamp](https://pkg.go.dev/github.com/oklog/ulid/v2#Timestamp),
+or by calling [time.Time.UnixMilli](https://pkg.go.dev/time#Time.UnixMilli)
+and converting the returned value to `uint64`.
+
+Random data is taken from a provided [io.Reader](https://pkg.go.dev/io#Reader).
+This design allows for greater flexibility when choosing trade-offs, but can be
+a bit confusing to newcomers.
+
+If you just want to generate a ULID and don't (yet) care about details like
+performance, cryptographic security, monotonicity, etc., use the
+[ulid.Make](https://pkg.go.dev/github.com/oklog/ulid/v2#Make) helper function.
+This function calls [time.Now](https://pkg.go.dev/time#Now) to get a timestamp,
+and uses a source of entropy which is process-global,
+[pseudo-random](https://pkg.go.dev/math/rand)), and
+[monotonic](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicReader)).
+
+```go
+println(ulid.Make())
+// 01G65Z755AFWAKHE12NY0CQ9FH
+```
+
+More advanced use cases should utilize
+[ulid.New](https://pkg.go.dev/github.com/oklog/ulid/v2#New).
+
+```go
+entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
+ms := ulid.Timestamp(time.Now())
+println(ulid.New(ms, entropy))
+// 01G65Z755AFWAKHE12NY0CQ9FH
+```
+
+Care should be taken when providing a source of entropy.
+
+The above example utilizes [math/rand.Rand](https://pkg.go.dev/math/rand#Rand),
+which is not safe for concurrent use by multiple goroutines. Consider
+alternatives such as
+[x/exp/rand](https://pkg.go.dev/golang.org/x/exp/rand#LockedSource).
+Security-sensitive use cases should always use cryptographically secure entropy
+provided by [crypto/rand](https://pkg.go.dev/crypto/rand).
+
+Performance-sensitive use cases should avoid synchronization when generating
+IDs. One option is to use a unique source of entropy for each concurrent
+goroutine, which results in no lock contention, but cannot provide strong
+guarantees about the random data, and does not provide monotonicity within a
+given millisecond. One common performance optimization is to pool sources of
+entropy using a [sync.Pool](https://pkg.go.dev/sync#Pool).
+
+Monotonicity is a property that says each ULID is "bigger than" the previous
+one. ULIDs are automatically monotonic, but only to millisecond precision. ULIDs
+generated within the same millisecond are ordered by their random component,
+which means they are by default un-ordered. You can use
+[ulid.MonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#MonotonicEntropy) or
+[ulid.LockedMonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicEntropy)
+to create ULIDs that are monotonic within a given millisecond, with caveats. See
+the documentation for details.
+
+If you don't care about time-based ordering of generated IDs, then there's no
+reason to use ULIDs! There are many other kinds of IDs that are easier, faster,
+smaller, etc. Consider UUIDs.
+
+## Commandline tool
+
+This repo also provides a tool to generate and parse ULIDs at the command line.
+These commands should install the latest version of the tool at `bin/ulid`:
+
+```shell
+cd $(mktemp -d)
+env GOPATH=$(pwd) GO111MODULE=on go get -v github.com/oklog/ulid/v2/cmd/ulid
+```
+
+Usage:
+
+```shell
+Usage: ulid [-hlqz] [-f ] [parameters ...]
+ -f, --format= when parsing, show times in this format: default, rfc3339, unix, ms
+ -h, --help print this help text
+ -l, --local when parsing, show local time instead of UTC
+ -q, --quick when generating, use non-crypto-grade entropy
+ -z, --zero when generating, fix entropy to all-zeroes
+```
+
+Examples:
+
+```shell
+$ ulid
+01D78XYFJ1PRM1WPBCBT3VHMNV
+$ ulid -z
+01D78XZ44G0000000000000000
+$ ulid 01D78XZ44G0000000000000000
+Sun Mar 31 03:51:23.536 UTC 2019
+$ ulid --format=rfc3339 --local 01D78XZ44G0000000000000000
+2019-03-31T05:51:23.536+02:00
+```
+
+## Specification
+
+Below is the current specification of ULID as implemented in this repository.
+
+### Components
+
+**Timestamp**
+- 48 bits
+- UNIX-time in milliseconds
+- Won't run out of space till the year 10889 AD
+
+**Entropy**
+- 80 bits
+- User defined entropy source.
+- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic)
+
+### Encoding
+
+[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown.
+This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse.
+
+```
+0123456789ABCDEFGHJKMNPQRSTVWXYZ
+```
+
+### Binary Layout and Byte Order
+
+The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order).
+
+```
+0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_time_high |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 16_bit_uint_time_low | 16_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+```
+
+### String Representation
+
+```
+ 01AN4Z07BY 79KA1307SR9X4MV3
+|----------| |----------------|
+ Timestamp Entropy
+ 10 chars 16 chars
+ 48bits 80bits
+ base32 base32
+```
+
+## Test
+
+```shell
+go test ./...
+```
+
+## Benchmarks
+
+On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1
+
+```
+BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op
+BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op
+BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op
+BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op
+BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op
+BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op
+BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op
+BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op
+BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op
+BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op
+BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op
+```
+
+## Prior Art
+
+- [ulid/javascript](https://github.com/ulid/javascript)
+- [RobThree/NUlid](https://github.com/RobThree/NUlid)
+- [imdario/go-ulid](https://github.com/imdario/go-ulid)
diff --git a/vendor/github.com/oklog/ulid/v2/ulid.go b/vendor/github.com/oklog/ulid/v2/ulid.go
new file mode 100644
index 0000000000..0cb258d431
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/ulid.go
@@ -0,0 +1,696 @@
+// Copyright 2016 The Oklog Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ulid
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "math/bits"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+/*
+An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier
+
+ The components are encoded as 16 octets.
+ Each component is encoded with the MSB first (network byte order).
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 16_bit_uint_time_low | 16_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+type ULID [16]byte
+
+var (
+ // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong
+ // data size.
+ ErrDataSize = errors.New("ulid: bad data size when unmarshaling")
+
+ // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with
+ // invalid Base32 encodings.
+ ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling")
+
+ // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient
+ // size.
+ ErrBufferSize = errors.New("ulid: bad buffer size when marshaling")
+
+ // ErrBigTime is returned when constructing an ULID with a time that is larger
+ // than MaxTime.
+ ErrBigTime = errors.New("ulid: time too big")
+
+ // ErrOverflow is returned when unmarshaling a ULID whose first character is
+ // larger than 7, thereby exceeding the valid bit depth of 128.
+ ErrOverflow = errors.New("ulid: overflow when unmarshaling")
+
+ // ErrMonotonicOverflow is returned by a Monotonic entropy source when
+ // incrementing the previous ULID's entropy bytes would result in overflow.
+ ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow")
+
+ // ErrScanValue is returned when the value passed to scan cannot be unmarshaled
+ // into the ULID.
+ ErrScanValue = errors.New("ulid: source value must be a string or byte slice")
+)
+
+// MonotonicReader is an interface that should yield monotonically increasing
+// entropy into the provided slice for all calls with the same ms parameter. If
+// a MonotonicReader is provided to the New constructor, its MonotonicRead
+// method will be used instead of Read.
+type MonotonicReader interface {
+ io.Reader
+ MonotonicRead(ms uint64, p []byte) error
+}
+
+// New returns an ULID with the given Unix milliseconds timestamp and an
+// optional entropy source. Use the Timestamp function to convert
+// a time.Time to Unix milliseconds.
+//
+// ErrBigTime is returned when passing a timestamp bigger than MaxTime.
+// Reading from the entropy source may also return an error.
+//
+// Safety for concurrent use is only dependent on the safety of the
+// entropy source.
+func New(ms uint64, entropy io.Reader) (id ULID, err error) {
+ if err = id.SetTime(ms); err != nil {
+ return id, err
+ }
+
+ switch e := entropy.(type) {
+ case nil:
+ return id, err
+ case MonotonicReader:
+ err = e.MonotonicRead(ms, id[6:])
+ default:
+ _, err = io.ReadFull(e, id[6:])
+ }
+
+ return id, err
+}
+
+// MustNew is a convenience function equivalent to New that panics on failure
+// instead of returning an error.
+func MustNew(ms uint64, entropy io.Reader) ULID {
+ id, err := New(ms, entropy)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+var (
+ entropy io.Reader
+ entropyOnce sync.Once
+)
+
+// DefaultEntropy returns a thread-safe per process monotonically increasing
+// entropy source.
+func DefaultEntropy() io.Reader {
+ entropyOnce.Do(func() {
+ rng := rand.New(rand.NewSource(time.Now().UnixNano()))
+ entropy = &LockedMonotonicReader{
+ MonotonicReader: Monotonic(rng, 0),
+ }
+ })
+ return entropy
+}
+
+// Make returns an ULID with the current time in Unix milliseconds and
+// monotonically increasing entropy for the same millisecond.
+// It is safe for concurrent use, leveraging a sync.Pool underneath for minimal
+// contention.
+func Make() (id ULID) {
+ // NOTE: MustNew can't panic since DefaultEntropy never returns an error.
+ return MustNew(Now(), DefaultEntropy())
+}
+
+// Parse parses an encoded ULID, returning an error in case of failure.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs. For a version that
+// returns an error instead, see ParseStrict.
+func Parse(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), false, &id)
+}
+
+// ParseStrict parses an encoded ULID, returning an error in case of failure.
+//
+// It is like Parse, but additionally validates that the parsed ULID consists
+// only of valid base32 characters. It is slightly slower than Parse.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings return ErrInvalidCharacters.
+func ParseStrict(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), true, &id)
+}
+
+func parse(v []byte, strict bool, id *ULID) error {
+ // Check if a base32 encoded ULID is the right length.
+ if len(v) != EncodedSize {
+ return ErrDataSize
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if strict &&
+ (dec[v[0]] == 0xFF ||
+ dec[v[1]] == 0xFF ||
+ dec[v[2]] == 0xFF ||
+ dec[v[3]] == 0xFF ||
+ dec[v[4]] == 0xFF ||
+ dec[v[5]] == 0xFF ||
+ dec[v[6]] == 0xFF ||
+ dec[v[7]] == 0xFF ||
+ dec[v[8]] == 0xFF ||
+ dec[v[9]] == 0xFF ||
+ dec[v[10]] == 0xFF ||
+ dec[v[11]] == 0xFF ||
+ dec[v[12]] == 0xFF ||
+ dec[v[13]] == 0xFF ||
+ dec[v[14]] == 0xFF ||
+ dec[v[15]] == 0xFF ||
+ dec[v[16]] == 0xFF ||
+ dec[v[17]] == 0xFF ||
+ dec[v[18]] == 0xFF ||
+ dec[v[19]] == 0xFF ||
+ dec[v[20]] == 0xFF ||
+ dec[v[21]] == 0xFF ||
+ dec[v[22]] == 0xFF ||
+ dec[v[23]] == 0xFF ||
+ dec[v[24]] == 0xFF ||
+ dec[v[25]] == 0xFF) {
+ return ErrInvalidCharacters
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if v[0] > '7' {
+ return ErrOverflow
+ }
+
+ // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid)
+ // to decode a base32 ULID.
+
+ // 6 bytes timestamp (48 bits)
+ (*id)[0] = (dec[v[0]] << 5) | dec[v[1]]
+ (*id)[1] = (dec[v[2]] << 3) | (dec[v[3]] >> 2)
+ (*id)[2] = (dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)
+ (*id)[3] = (dec[v[5]] << 4) | (dec[v[6]] >> 1)
+ (*id)[4] = (dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)
+ (*id)[5] = (dec[v[8]] << 5) | dec[v[9]]
+
+ // 10 bytes of entropy (80 bits)
+ (*id)[6] = (dec[v[10]] << 3) | (dec[v[11]] >> 2)
+ (*id)[7] = (dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)
+ (*id)[8] = (dec[v[13]] << 4) | (dec[v[14]] >> 1)
+ (*id)[9] = (dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)
+ (*id)[10] = (dec[v[16]] << 5) | dec[v[17]]
+ (*id)[11] = (dec[v[18]] << 3) | dec[v[19]]>>2
+ (*id)[12] = (dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)
+ (*id)[13] = (dec[v[21]] << 4) | (dec[v[22]] >> 1)
+ (*id)[14] = (dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)
+ (*id)[15] = (dec[v[24]] << 5) | dec[v[25]]
+
+ return nil
+}
+
+// MustParse is a convenience function equivalent to Parse that panics on failure
+// instead of returning an error.
+func MustParse(ulid string) ULID {
+ id, err := Parse(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// MustParseStrict is a convenience function equivalent to ParseStrict that
+// panics on failure instead of returning an error.
+func MustParseStrict(ulid string) ULID {
+ id, err := ParseStrict(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// Bytes returns bytes slice representation of ULID.
+func (id ULID) Bytes() []byte {
+ return id[:]
+}
+
+// String returns a lexicographically sortable string encoded ULID
+// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3.
+// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy.
+func (id ULID) String() string {
+ ulid := make([]byte, EncodedSize)
+ _ = id.MarshalTextTo(ulid)
+ return string(ulid)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface by
+// returning the ULID as a byte slice.
+func (id ULID) MarshalBinary() ([]byte, error) {
+ ulid := make([]byte, len(id))
+ return ulid, id.MarshalBinaryTo(ulid)
+}
+
+// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 16.
+func (id ULID) MarshalBinaryTo(dst []byte) error {
+ if len(dst) != len(id) {
+ return ErrBufferSize
+ }
+
+ copy(dst, id[:])
+ return nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by
+// copying the passed data and converting it to an ULID. ErrDataSize is
+// returned if the data length is different from ULID length.
+func (id *ULID) UnmarshalBinary(data []byte) error {
+ if len(data) != len(*id) {
+ return ErrDataSize
+ }
+
+ copy((*id)[:], data)
+ return nil
+}
+
+// Encoding is the base 32 encoding alphabet used in ULID strings.
+const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
+
+// MarshalText implements the encoding.TextMarshaler interface by
+// returning the string encoded ULID.
+func (id ULID) MarshalText() ([]byte, error) {
+ ulid := make([]byte, EncodedSize)
+ return ulid, id.MarshalTextTo(ulid)
+}
+
+// MarshalTextTo writes the ULID as a string to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 26.
+func (id ULID) MarshalTextTo(dst []byte) error {
+ // Optimized unrolled loop ahead.
+ // From https://github.com/RobThree/NUlid
+
+ if len(dst) != EncodedSize {
+ return ErrBufferSize
+ }
+
+ // 10 byte timestamp
+ dst[0] = Encoding[(id[0]&224)>>5]
+ dst[1] = Encoding[id[0]&31]
+ dst[2] = Encoding[(id[1]&248)>>3]
+ dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)]
+ dst[4] = Encoding[(id[2]&62)>>1]
+ dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)]
+ dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)]
+ dst[7] = Encoding[(id[4]&124)>>2]
+ dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)]
+ dst[9] = Encoding[id[5]&31]
+
+ // 16 bytes of entropy
+ dst[10] = Encoding[(id[6]&248)>>3]
+ dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)]
+ dst[12] = Encoding[(id[7]&62)>>1]
+ dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)]
+ dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)]
+ dst[15] = Encoding[(id[9]&124)>>2]
+ dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)]
+ dst[17] = Encoding[id[10]&31]
+ dst[18] = Encoding[(id[11]&248)>>3]
+ dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)]
+ dst[20] = Encoding[(id[12]&62)>>1]
+ dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)]
+ dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)]
+ dst[23] = Encoding[(id[14]&124)>>2]
+ dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)]
+ dst[25] = Encoding[id[15]&31]
+
+ return nil
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var dec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const EncodedSize = 26
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface by
+// parsing the data as string encoded ULID.
+//
+// ErrDataSize is returned if the len(v) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs.
+func (id *ULID) UnmarshalText(v []byte) error {
+ return parse(v, false, id)
+}
+
+// Time returns the Unix time in milliseconds encoded in the ULID.
+// Use the top level Time function to convert the returned value to
+// a time.Time.
+func (id ULID) Time() uint64 {
+ return uint64(id[5]) | uint64(id[4])<<8 |
+ uint64(id[3])<<16 | uint64(id[2])<<24 |
+ uint64(id[1])<<32 | uint64(id[0])<<40
+}
+
+// maxTime is the maximum Unix time in milliseconds that can be
+// represented in an ULID.
+var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time()
+
+// MaxTime returns the maximum Unix time in milliseconds that
+// can be encoded in an ULID.
+func MaxTime() uint64 { return maxTime }
+
+// Now is a convenience function that returns the current
+// UTC time in Unix milliseconds. Equivalent to:
+// Timestamp(time.Now().UTC())
+func Now() uint64 { return Timestamp(time.Now().UTC()) }
+
+// Timestamp converts a time.Time to Unix milliseconds.
+//
+// Because of the way ULID stores time, times from the year
+// 10889 produces undefined results.
+func Timestamp(t time.Time) uint64 {
+ return uint64(t.Unix())*1000 +
+ uint64(t.Nanosecond()/int(time.Millisecond))
+}
+
+// Time converts Unix milliseconds in the format
+// returned by the Timestamp function to a time.Time.
+func Time(ms uint64) time.Time {
+ s := int64(ms / 1e3)
+ ns := int64((ms % 1e3) * 1e6)
+ return time.Unix(s, ns)
+}
+
+// SetTime sets the time component of the ULID to the given Unix time
+// in milliseconds.
+func (id *ULID) SetTime(ms uint64) error {
+ if ms > maxTime {
+ return ErrBigTime
+ }
+
+ (*id)[0] = byte(ms >> 40)
+ (*id)[1] = byte(ms >> 32)
+ (*id)[2] = byte(ms >> 24)
+ (*id)[3] = byte(ms >> 16)
+ (*id)[4] = byte(ms >> 8)
+ (*id)[5] = byte(ms)
+
+ return nil
+}
+
+// Entropy returns the entropy from the ULID.
+func (id ULID) Entropy() []byte {
+ e := make([]byte, 10)
+ copy(e, id[6:])
+ return e
+}
+
+// SetEntropy sets the ULID entropy to the passed byte slice.
+// ErrDataSize is returned if len(e) != 10.
+func (id *ULID) SetEntropy(e []byte) error {
+ if len(e) != 10 {
+ return ErrDataSize
+ }
+
+ copy((*id)[6:], e)
+ return nil
+}
+
+// Compare returns an integer comparing id and other lexicographically.
+// The result will be 0 if id==other, -1 if id < other, and +1 if id > other.
+func (id ULID) Compare(other ULID) int {
+ return bytes.Compare(id[:], other[:])
+}
+
+// Scan implements the sql.Scanner interface. It supports scanning
+// a string or byte slice.
+func (id *ULID) Scan(src interface{}) error {
+ switch x := src.(type) {
+ case nil:
+ return nil
+ case string:
+ return id.UnmarshalText([]byte(x))
+ case []byte:
+ return id.UnmarshalBinary(x)
+ }
+
+ return ErrScanValue
+}
+
+// Value implements the sql/driver.Valuer interface, returning the ULID as a
+// slice of bytes, by invoking MarshalBinary. If your use case requires a string
+// representation instead, you can create a wrapper type that calls String()
+// instead.
+//
+// type stringValuer ulid.ULID
+//
+// func (v stringValuer) Value() (driver.Value, error) {
+// return ulid.ULID(v).String(), nil
+// }
+//
+// // Example usage.
+// db.Exec("...", stringValuer(id))
+//
+// All valid ULIDs, including zero-value ULIDs, return a valid Value with a nil
+// error. If your use case requires zero-value ULIDs to return a non-nil error,
+// you can create a wrapper type that special-cases this behavior.
+//
+// var zeroValueULID ulid.ULID
+//
+// type invalidZeroValuer ulid.ULID
+//
+// func (v invalidZeroValuer) Value() (driver.Value, error) {
+// if ulid.ULID(v).Compare(zeroValueULID) == 0 {
+// return nil, fmt.Errorf("zero value")
+// }
+// return ulid.ULID(v).Value()
+// }
+//
+// // Example usage.
+// db.Exec("...", invalidZeroValuer(id))
+//
+func (id ULID) Value() (driver.Value, error) {
+ return id.MarshalBinary()
+}
+
+// Monotonic returns an entropy source that is guaranteed to yield
+// strictly increasing entropy bytes for the same ULID timestamp.
+// On conflicts, the previous ULID entropy is incremented with a
+// random number between 1 and `inc` (inclusive).
+//
+// The provided entropy source must actually yield random bytes or else
+// monotonic reads are not guaranteed to terminate, since there isn't
+// enough randomness to compute an increment number.
+//
+// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`.
+// The lower the value of `inc`, the easier the next ULID within the
+// same millisecond is to guess. If your code depends on ULIDs having
+// secure entropy bytes, then don't go under this default unless you know
+// what you're doing.
+//
+// The returned type isn't safe for concurrent use.
+func Monotonic(entropy io.Reader, inc uint64) *MonotonicEntropy {
+ m := MonotonicEntropy{
+ Reader: bufio.NewReader(entropy),
+ inc: inc,
+ }
+
+ if m.inc == 0 {
+ m.inc = math.MaxUint32
+ }
+
+ if rng, ok := entropy.(rng); ok {
+ m.rng = rng
+ }
+
+ return &m
+}
+
+type rng interface{ Int63n(n int64) int64 }
+
+// LockedMonotonicReader wraps a MonotonicReader with a sync.Mutex for
+// safe concurrent use.
+type LockedMonotonicReader struct {
+ mu sync.Mutex
+ MonotonicReader
+}
+
+// MonotonicRead synchronizes calls to the wrapped MonotonicReader.
+func (r *LockedMonotonicReader) MonotonicRead(ms uint64, p []byte) (err error) {
+ r.mu.Lock()
+ err = r.MonotonicReader.MonotonicRead(ms, p)
+ r.mu.Unlock()
+ return err
+}
+
+// MonotonicEntropy is an opaque type that provides monotonic entropy.
+type MonotonicEntropy struct {
+ io.Reader
+ ms uint64
+ inc uint64
+ entropy uint80
+ rand [8]byte
+ rng rng
+}
+
+// MonotonicRead implements the MonotonicReader interface.
+func (m *MonotonicEntropy) MonotonicRead(ms uint64, entropy []byte) (err error) {
+ if !m.entropy.IsZero() && m.ms == ms {
+ err = m.increment()
+ m.entropy.AppendTo(entropy)
+ } else if _, err = io.ReadFull(m.Reader, entropy); err == nil {
+ m.ms = ms
+ m.entropy.SetBytes(entropy)
+ }
+ return err
+}
+
+// increment the previous entropy number with a random number
+// of up to m.inc (inclusive).
+func (m *MonotonicEntropy) increment() error {
+ if inc, err := m.random(); err != nil {
+ return err
+ } else if m.entropy.Add(inc) {
+ return ErrMonotonicOverflow
+ }
+ return nil
+}
+
+// random returns a uniform random value in [1, m.inc), reading entropy
+// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1.
+// Adapted from: https://golang.org/pkg/crypto/rand/#Int
+func (m *MonotonicEntropy) random() (inc uint64, err error) {
+ if m.inc <= 1 {
+ return 1, nil
+ }
+
+ // Fast path for using a underlying rand.Rand directly.
+ if m.rng != nil {
+ // Range: [1, m.inc)
+ return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil
+ }
+
+ // bitLen is the maximum bit length needed to encode a value < m.inc.
+ bitLen := bits.Len64(m.inc)
+
+ // byteLen is the maximum byte length needed to encode a value < m.inc.
+ byteLen := uint(bitLen+7) / 8
+
+ // msbitLen is the number of bits in the most significant byte of m.inc-1.
+ msbitLen := uint(bitLen % 8)
+ if msbitLen == 0 {
+ msbitLen = 8
+ }
+
+ for inc == 0 || inc >= m.inc {
+ if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil {
+ return 0, err
+ }
+
+ // Clear bits in the first byte to increase the probability
+ // that the candidate is < m.inc.
+ m.rand[0] &= uint8(int(1< 0 {
- _, ts := stale.pq.Peek()
- if now.Sub(ts) < max {
- break
- }
- id, _ := stale.pq.Pop()
- ids = append(ids, id)
- }
-
- return ids
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go
deleted file mode 100644
index 5f0d715b69..0000000000
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams/streams.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package streams // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/streams"
-
-import (
- "go.opentelemetry.io/collector/pdata/pcommon"
-
- "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity"
-)
-
-// Sequence of streams that can be iterated upon
-type Seq[T any] func(yield func(identity.Stream, T) bool) bool
-
-// Map defines a collection of items tracked by a stream-id and the operations
-// on it
-type Map[T any] interface {
- Load(identity.Stream) (T, bool)
- Store(identity.Stream, T) error
- Delete(identity.Stream)
- Items() func(yield func(identity.Stream, T) bool) bool
- Len() int
- Clear()
-}
-
-var _ Map[any] = HashMap[any](nil)
-
-type HashMap[T any] map[identity.Stream]T
-
-func (m HashMap[T]) Load(id identity.Stream) (T, bool) {
- v, ok := (map[identity.Stream]T)(m)[id]
- return v, ok
-}
-
-func (m HashMap[T]) Store(id identity.Stream, v T) error {
- (map[identity.Stream]T)(m)[id] = v
- return nil
-}
-
-func (m HashMap[T]) Delete(id identity.Stream) {
- delete((map[identity.Stream]T)(m), id)
-}
-
-func (m HashMap[T]) Items() func(yield func(identity.Stream, T) bool) bool {
- return func(yield func(identity.Stream, T) bool) bool {
- for id, v := range (map[identity.Stream]T)(m) {
- if !yield(id, v) {
- break
- }
- }
- return false
- }
-}
-
-func (m HashMap[T]) Len() int {
- return len((map[identity.Stream]T)(m))
-}
-
-func (m HashMap[T]) Clear() {
- clear(m)
-}
-
-// Evictors remove the "least important" stream based on some strategy such as
-// the oldest, least active, etc.
-//
-// Returns whether a stream was evicted and if so the now gone stream id
-type Evictor interface {
- Evict() (gone identity.Stream, ok bool)
-}
-
-type DataPointSlice[DP DataPoint[DP]] interface {
- Len() int
- At(i int) DP
- AppendEmpty() DP
-}
-
-type DataPoint[Self any] interface {
- Timestamp() pcommon.Timestamp
- Attributes() pcommon.Map
- CopyTo(dest Self)
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go
index 172789c607..9f08870936 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go
@@ -130,10 +130,9 @@ func (hw *hashWriter) writeMapHash(m pcommon.Map) {
// on the first call due to it being cleared of any added keys at then end of the function.
nextIndex := len(hw.keysBuf)
- m.Range(func(k string, _ pcommon.Value) bool {
+ for k := range m.All() {
hw.keysBuf = append(hw.keysBuf, k)
- return true
- })
+ }
// Get only the newly added keys from the buffer by slicing the buffer from nextIndex to the end
workingKeySet := hw.keysBuf[nextIndex:]
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml
index f726a58cdf..ac3c7541c0 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/metadata.yaml
@@ -1,3 +1,4 @@
status:
+ class: pkg
codeowners:
active: [dmitryax]
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go
index f5b5c1c59d..589bdc744d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/config.go
@@ -10,11 +10,12 @@ import (
"time"
"go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/confmap/xconfmap"
telemetry "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry"
)
-var _ component.ConfigValidator = (*Config)(nil)
+var _ xconfmap.Validator = (*Config)(nil)
type Config struct {
MaxStale time.Duration `mapstructure:"max_stale"`
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
index f9c560a870..f11a329c32 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
@@ -6,15 +6,7 @@
The following telemetry is emitted by this component.
-### otelcol_deltatocumulative.datapoints.dropped
-
-number of datapoints dropped due to given 'reason'
-
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {datapoint} | Sum | Int | true |
-
-### otelcol_deltatocumulative.datapoints.linear
+### otelcol_deltatocumulative_datapoints
total number of datapoints processed. may have 'error' attribute, if processing failed
@@ -22,31 +14,7 @@ total number of datapoints processed. may have 'error' attribute, if processing
| ---- | ----------- | ---------- | --------- |
| {datapoint} | Sum | Int | true |
-### otelcol_deltatocumulative.datapoints.processed
-
-number of datapoints processed
-
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {datapoint} | Sum | Int | true |
-
-### otelcol_deltatocumulative.gaps.length
-
-total duration where data was expected but not received
-
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| s | Sum | Int | true |
-
-### otelcol_deltatocumulative.streams.evicted
-
-number of streams evicted
-
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {stream} | Sum | Int | true |
-
-### otelcol_deltatocumulative.streams.limit
+### otelcol_deltatocumulative_streams_limit
upper limit of tracked streams
@@ -54,7 +22,7 @@ upper limit of tracked streams
| ---- | ----------- | ---------- |
| {stream} | Gauge | Int |
-### otelcol_deltatocumulative.streams.max_stale
+### otelcol_deltatocumulative_streams_max_stale
duration after which streams inactive streams are dropped
@@ -62,15 +30,7 @@ duration after which streams inactive streams are dropped
| ---- | ----------- | ---------- |
| s | Gauge | Int |
-### otelcol_deltatocumulative.streams.tracked
-
-number of streams tracked
-
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {dps} | Sum | Int | false |
-
-### otelcol_deltatocumulative.streams.tracked.linear
+### otelcol_deltatocumulative_streams_tracked
number of streams tracked
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go
index 904ae1ee68..9e05d41965 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/factory.go
@@ -5,7 +5,7 @@ package deltatocumulativeprocessor // import "github.com/open-telemetry/opentele
import (
"context"
- "fmt"
+ "errors"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
@@ -26,7 +26,7 @@ func NewFactory() processor.Factory {
func createMetricsProcessor(_ context.Context, set processor.Settings, cfg component.Config, next consumer.Metrics) (processor.Metrics, error) {
pcfg, ok := cfg.(*Config)
if !ok {
- return nil, fmt.Errorf("configuration parsing error")
+ return nil, errors.New("configuration parsing error")
}
tel, err := telemetry.New(set.TelemetrySettings)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
index 33c2f283c8..1da69b6c62 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
@@ -12,103 +12,130 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice"
)
-func (dp Number) Add(in Number) Number {
- switch in.ValueType() {
+// Aggregator performs an operation on two datapoints.
+// Given [pmetric] types are mutable by nature, this logically works as follows:
+//
+// *state = op(state, dp)
+//
+// See [Adder] for an implementation.
+type Aggregator interface {
+ Numbers(state, dp pmetric.NumberDataPoint) error
+ Histograms(state, dp pmetric.HistogramDataPoint) error
+ Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error
+}
+
+var _ Aggregator = (*Adder)(nil)
+
+// Adder adds (+) datapoints.
+type Adder struct{}
+
+var maxBuckets = 160
+
+func (add Adder) Numbers(state, dp pmetric.NumberDataPoint) error {
+ switch dp.ValueType() {
case pmetric.NumberDataPointValueTypeDouble:
- v := dp.DoubleValue() + in.DoubleValue()
- dp.SetDoubleValue(v)
+ v := state.DoubleValue() + dp.DoubleValue()
+ state.SetDoubleValue(v)
case pmetric.NumberDataPointValueTypeInt:
- v := dp.IntValue() + in.IntValue()
- dp.SetIntValue(v)
+ v := state.IntValue() + dp.IntValue()
+ state.SetIntValue(v)
}
- dp.SetTimestamp(in.Timestamp())
- return dp
+ return nil
}
-func (dp Histogram) Add(in Histogram) Histogram {
+func (add Adder) Histograms(state, dp pmetric.HistogramDataPoint) error {
// bounds different: no way to merge, so reset observation to new boundaries
- if !pslice.Equal(dp.ExplicitBounds(), in.ExplicitBounds()) {
- in.MoveTo(dp.HistogramDataPoint)
- return dp
+ if !pslice.Equal(state.ExplicitBounds(), dp.ExplicitBounds()) {
+ dp.CopyTo(state)
+ return nil
}
// spec requires len(BucketCounts) == len(ExplicitBounds)+1.
// given we have limited error handling at this stage (and already verified boundaries are correct),
// doing a best-effort add of whatever we have appears reasonable.
- n := min(dp.BucketCounts().Len(), in.BucketCounts().Len())
+ n := min(state.BucketCounts().Len(), dp.BucketCounts().Len())
for i := 0; i < n; i++ {
- sum := dp.BucketCounts().At(i) + in.BucketCounts().At(i)
- dp.BucketCounts().SetAt(i, sum)
+ sum := state.BucketCounts().At(i) + dp.BucketCounts().At(i)
+ state.BucketCounts().SetAt(i, sum)
}
- dp.SetTimestamp(in.Timestamp())
- dp.SetCount(dp.Count() + in.Count())
+ state.SetCount(state.Count() + dp.Count())
- if dp.HasSum() && in.HasSum() {
- dp.SetSum(dp.Sum() + in.Sum())
+ if state.HasSum() && dp.HasSum() {
+ state.SetSum(state.Sum() + dp.Sum())
} else {
- dp.RemoveSum()
+ state.RemoveSum()
}
- if dp.HasMin() && in.HasMin() {
- dp.SetMin(math.Min(dp.Min(), in.Min()))
+ if state.HasMin() && dp.HasMin() {
+ state.SetMin(math.Min(state.Min(), dp.Min()))
} else {
- dp.RemoveMin()
+ state.RemoveMin()
}
- if dp.HasMax() && in.HasMax() {
- dp.SetMax(math.Max(dp.Max(), in.Max()))
+ if state.HasMax() && dp.HasMax() {
+ state.SetMax(math.Max(state.Max(), dp.Max()))
} else {
- dp.RemoveMax()
+ state.RemoveMax()
}
- return dp
+ return nil
}
-func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram {
- type H = ExpHistogram
+func (add Adder) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error {
+ type H = pmetric.ExponentialHistogramDataPoint
- if dp.Scale() != in.Scale() {
- hi, lo := expo.HiLo(dp, in, H.Scale)
+ if state.Scale() != dp.Scale() {
+ hi, lo := expo.HiLo(state, dp, H.Scale)
from, to := expo.Scale(hi.Scale()), expo.Scale(lo.Scale())
expo.Downscale(hi.Positive(), from, to)
expo.Downscale(hi.Negative(), from, to)
hi.SetScale(lo.Scale())
}
- if dp.ZeroThreshold() != in.ZeroThreshold() {
- hi, lo := expo.HiLo(dp, in, H.ZeroThreshold)
- expo.WidenZero(lo.DataPoint, hi.ZeroThreshold())
+ // Downscale if an expected number of buckets after the merge is too large.
+ from := expo.Scale(state.Scale())
+ to := min(
+ expo.Limit(maxBuckets, from, state.Positive(), dp.Positive()),
+ expo.Limit(maxBuckets, from, state.Negative(), dp.Negative()),
+ )
+ if from != to {
+ expo.Downscale(state.Positive(), from, to)
+ expo.Downscale(state.Negative(), from, to)
+ expo.Downscale(dp.Positive(), from, to)
+ expo.Downscale(dp.Negative(), from, to)
+ state.SetScale(int32(to))
+ dp.SetScale(int32(to))
}
- expo.Merge(dp.Positive(), in.Positive())
- expo.Merge(dp.Negative(), in.Negative())
+ if state.ZeroThreshold() != dp.ZeroThreshold() {
+ hi, lo := expo.HiLo(state, dp, H.ZeroThreshold)
+ expo.WidenZero(lo, hi.ZeroThreshold())
+ }
+
+ expo.Merge(state.Positive(), dp.Positive())
+ expo.Merge(state.Negative(), dp.Negative())
- dp.SetTimestamp(in.Timestamp())
- dp.SetCount(dp.Count() + in.Count())
- dp.SetZeroCount(dp.ZeroCount() + in.ZeroCount())
+ state.SetCount(state.Count() + dp.Count())
+ state.SetZeroCount(state.ZeroCount() + dp.ZeroCount())
- if dp.HasSum() && in.HasSum() {
- dp.SetSum(dp.Sum() + in.Sum())
+ if state.HasSum() && dp.HasSum() {
+ state.SetSum(state.Sum() + dp.Sum())
} else {
- dp.RemoveSum()
+ state.RemoveSum()
}
- if dp.HasMin() && in.HasMin() {
- dp.SetMin(math.Min(dp.Min(), in.Min()))
+ if state.HasMin() && dp.HasMin() {
+ state.SetMin(math.Min(state.Min(), dp.Min()))
} else {
- dp.RemoveMin()
+ state.RemoveMin()
}
- if dp.HasMax() && in.HasMax() {
- dp.SetMax(math.Max(dp.Max(), in.Max()))
+ if state.HasMax() && dp.HasMax() {
+ state.SetMax(math.Max(state.Max(), dp.Max()))
} else {
- dp.RemoveMax()
+ state.RemoveMax()
}
- return dp
-}
-
-func (dp Summary) Add(Summary) Summary {
- panic("todo")
+ return nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go
deleted file mode 100644
index 3a36f6d552..0000000000
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/data.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data"
-
-import (
- "go.opentelemetry.io/collector/pdata/pmetric"
-
- "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo"
-)
-
-type Number struct {
- pmetric.NumberDataPoint
-}
-
-type Histogram struct {
- pmetric.HistogramDataPoint
-}
-
-type ExpHistogram struct {
- expo.DataPoint
-}
-
-type Summary struct {
- pmetric.SummaryDataPoint
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go
index 150e29a658..82536ea1fa 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/merge.go
@@ -23,6 +23,15 @@ func Merge(arel, brel Buckets) {
lo := min(a.Lower(), b.Lower())
up := max(a.Upper(), b.Upper())
+ // Skip leading and trailing zeros to reduce number of buckets.
+ // As we cap number of buckets this allows us to have higher scale.
+ for lo < up && a.Abs(lo) == 0 && b.Abs(lo) == 0 {
+ lo++
+ }
+ for lo < up-1 && a.Abs(up-1) == 0 && b.Abs(up-1) == 0 {
+ up--
+ }
+
size := up - lo
counts := pcommon.NewUInt64Slice()
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go
index 5201806fb8..50fdef75c9 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/scale.go
@@ -6,6 +6,8 @@ package expo // import "github.com/open-telemetry/opentelemetry-collector-contri
import (
"fmt"
"math"
+
+ "go.opentelemetry.io/collector/pdata/pmetric"
)
type Scale int32
@@ -29,7 +31,7 @@ func (scale Scale) Idx(v float64) int {
// This means a value min < v <= max belongs to this bucket.
//
// NOTE: this is different from Go slice intervals, which are [a,b)
-func (scale Scale) Bounds(index int) (min, max float64) {
+func (scale Scale) Bounds(index int) (minVal, maxVal float64) {
// from: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#all-scales-use-the-logarithm-function
lower := func(index int) float64 {
inverseFactor := math.Ldexp(math.Ln2, int(-scale))
@@ -47,7 +49,7 @@ func Downscale(bs Buckets, from, to Scale) {
case from < to:
// because even distribution within the buckets cannot be assumed, it is
// not possible to correctly upscale (split) buckets.
- // any attempt to do so would yield erronous data.
+ // any attempt to do so would yield erroneous data.
panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to))
}
@@ -107,9 +109,35 @@ func Collapse(bs Buckets) {
// zero the excess area. its not needed to represent the observation
// anymore, but kept for two reasons:
// 1. future observations may need it, no need to re-alloc then if kept
- // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid
+ // 2. [pcommon.Uint64Slice] cannot, in fact, be sliced, so getting rid
// of it would alloc ¯\_(ツ)_/¯
for i := size; i < counts.Len(); i++ {
counts.SetAt(i, 0)
}
}
+
+// Limit returns a target Scale that when be downscaled to,
+// the total bucket count after [Merge] never exceeds maxBuckets.
+func Limit(maxBuckets int, scale Scale, arel, brel pmetric.ExponentialHistogramDataPointBuckets) Scale {
+ a, b := Abs(arel), Abs(brel)
+
+ lo := min(a.Lower(), b.Lower())
+ up := max(a.Upper(), b.Upper())
+
+ // Skip leading and trailing zeros.
+ for lo < up && a.Abs(lo) == 0 && b.Abs(lo) == 0 {
+ lo++
+ }
+ for lo < up-1 && a.Abs(up-1) == 0 && b.Abs(up-1) == 0 {
+ up--
+ }
+
+ // Keep downscaling until the number of buckets is within the limit.
+ for up-lo > maxBuckets {
+ lo /= 2
+ up /= 2
+ scale--
+ }
+
+ return scale
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go
index 2d5401b39f..969c5f2734 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo/zero.go
@@ -37,8 +37,8 @@ func WidenZero(dp DataPoint, width float64) {
widen(dp.Positive())
widen(dp.Negative())
- _, max := scale.Bounds(zero)
- dp.SetZeroThreshold(max)
+ _, maxVal := scale.Bounds(zero)
+ dp.SetZeroThreshold(maxVal)
}
// Slice drops data outside the range from <= i < to from the bucket counts. It behaves the same as Go's [a:b]
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go
index f2a759e9bf..608932e722 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta/delta.go
@@ -30,18 +30,25 @@ func (e ErrOutOfOrder) Error() string {
return fmt.Sprintf("out of order: dropped sample from time=%s, because series is already at time=%s", e.Sample, e.Last)
}
-type Type interface {
+type Type[Self any] interface {
pmetric.NumberDataPoint | pmetric.HistogramDataPoint | pmetric.ExponentialHistogramDataPoint
StartTimestamp() pcommon.Timestamp
Timestamp() pcommon.Timestamp
+ SetTimestamp(pcommon.Timestamp)
+ CopyTo(Self)
}
-// AccumulateInto adds state and dp, storing the result in state
-//
-// state = state + dp
-func AccumulateInto[T Type](state, dp T) error {
+type Aggregator struct {
+ data.Aggregator
+}
+
+func Aggregate[T Type[T]](state, dp T, aggregate func(state, dp T) error) error {
switch {
+ case state.Timestamp() == 0:
+ // first sample of series, no state to aggregate with
+ dp.CopyTo(state)
+ return nil
case dp.StartTimestamp() < state.StartTimestamp():
// belongs to older series
return ErrOlderStart{Start: state.StartTimestamp(), Sample: dp.StartTimestamp()}
@@ -50,16 +57,22 @@ func AccumulateInto[T Type](state, dp T) error {
return ErrOutOfOrder{Last: state.Timestamp(), Sample: dp.Timestamp()}
}
- switch dp := any(dp).(type) {
- case pmetric.NumberDataPoint:
- state := any(state).(pmetric.NumberDataPoint)
- data.Number{NumberDataPoint: state}.Add(data.Number{NumberDataPoint: dp})
- case pmetric.HistogramDataPoint:
- state := any(state).(pmetric.HistogramDataPoint)
- data.Histogram{HistogramDataPoint: state}.Add(data.Histogram{HistogramDataPoint: dp})
- case pmetric.ExponentialHistogramDataPoint:
- state := any(state).(pmetric.ExponentialHistogramDataPoint)
- data.ExpHistogram{DataPoint: state}.Add(data.ExpHistogram{DataPoint: dp})
+ if err := aggregate(state, dp); err != nil {
+ return err
}
+
+ state.SetTimestamp(dp.Timestamp())
return nil
}
+
+func (aggr Aggregator) Numbers(state, dp pmetric.NumberDataPoint) error {
+ return Aggregate(state, dp, aggr.Aggregator.Numbers)
+}
+
+func (aggr Aggregator) Histograms(state, dp pmetric.HistogramDataPoint) error {
+ return Aggregate(state, dp, aggr.Aggregator.Histograms)
+}
+
+func (aggr Aggregator) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error {
+ return Aggregate(state, dp, aggr.Aggregator.Exponential)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go
new file mode 100644
index 0000000000..b93f9935d6
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package maps // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps"
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/puzpuzpuz/xsync/v3"
+)
+
+func Limit(limit int64) Context {
+ return Context{limit: limit, guard: new(atomic.Int64), total: new(atomic.Int64)}
+}
+
+func New[K comparable, V any](ctx Context) *Parallel[K, V] {
+ return &Parallel[K, V]{ctx: ctx, elems: *xsync.NewMapOf[K, V]()}
+}
+
+// lowercase alias for unexported embedding
+type ctx = Context
+
+// Parallel is a lock-free map-like structure. It can be safely used by multiple
+// routines concurrently.
+//
+// Due to the lock-free nature, typical get, put, delete operations are not
+// available. Instead, [Parallel.LoadOrStore] returns an existing value or
+// inserts a new one if missing. As such, values themselves should be mutable by
+// being reference types (pointers or pmetric.* types).
+//
+// Parallel enforces the [Context] size limit.
+type Parallel[K comparable, V any] struct {
+ ctx
+ elems xsync.MapOf[K, V]
+}
+
+// Context holds size information about one or more maps.
+// Can be shared across maps for a common limit.
+type Context struct {
+ limit int64
+ guard *atomic.Int64
+ total *atomic.Int64
+}
+
+func (ctx Context) String() string {
+ return fmt.Sprintf("(%d, %d)", ctx.guard.Load(), ctx.total.Load())
+}
+
+// LoadOrStore loads existing values from the map or creates missing ones initialized to .
+//
+// Return Value:
+// - , true: m[k] already existed and was loaded
+// - , false: m[k] was created and initialized to
+// - , false: m[k] did not exist but was not created due to size limit
+func (m *Parallel[K, V]) LoadOrStore(k K, def V) (_ V, loaded bool) {
+ // multiple routines may attempt to LoadOrStore the same value at once. as
+ // such, we cannot use data-dependent instructions such as if(not exist)
+ // {...}, because the may have changed right after we checked
+ // it.
+
+ v, ok := m.elems.Load(k)
+ if ok {
+ return v, true
+ }
+
+ // as long as there appears to be actual space, try to store
+ for m.total.Load() < m.limit {
+ // multiple routines may do this. to enforce the limit, try to claim a
+ // "slot" below the limit
+ slot := m.guard.Add(1)
+ if slot > m.limit {
+ // slot we got is above the limit. either the map is now full (loop
+ // will exit) or routines that won't actually store hold slots, in
+ // which case we will try again.
+ m.guard.Add(-1)
+ continue
+ }
+
+ // we got a valid slot. others may too. as such, we try to store, but
+ // may end up loading instead if another routine stored just before us.
+ v, loaded = m.elems.LoadOrStore(k, def)
+ if loaded {
+ // another routine stored, but we got a value. give up slot
+ m.guard.Add(-1)
+ } else {
+ // we stored. increase the total size
+ m.total.Add(1)
+ }
+ return v, loaded
+ }
+
+ // we didn't store, because we hit the limit. attempt another load, just in
+ // case another routine stored by now.
+ return m.elems.Load(k)
+}
+
+// LoadAndDelete deletes m[k], returning the value it had if it existed
+func (m *Parallel[K, V]) LoadAndDelete(k K) (_ V, loaded bool) {
+ v, loaded := m.elems.LoadAndDelete(k)
+ if loaded {
+ // m[k] did exist. decrease size and open up a slot
+ m.total.Add(-1)
+ m.guard.Add(-1)
+ }
+ return v, loaded
+}
+
+func (ctx Context) Size() int64 {
+ return ctx.total.Load()
+}
+
+// Exceeded reports whether a [Limited.LoadOrStore] failed due to the limit being exceeded.
+func Exceeded[T comparable](v T, loaded bool) bool {
+ return !loaded && v == *new(T)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
index 82a4476ba9..c7134638b6 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
@@ -5,13 +5,13 @@ package metadata
import (
"context"
"errors"
+ "sync"
"go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/config/configtelemetry"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
@@ -25,17 +25,13 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer {
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
- meter metric.Meter
- DeltatocumulativeDatapointsDropped metric.Int64Counter
- DeltatocumulativeDatapointsLinear metric.Int64Counter
- DeltatocumulativeDatapointsProcessed metric.Int64Counter
- DeltatocumulativeGapsLength metric.Int64Counter
- DeltatocumulativeStreamsEvicted metric.Int64Counter
- DeltatocumulativeStreamsLimit metric.Int64Gauge
- DeltatocumulativeStreamsMaxStale metric.Int64Gauge
- DeltatocumulativeStreamsTracked metric.Int64UpDownCounter
- DeltatocumulativeStreamsTrackedLinear metric.Int64ObservableUpDownCounter
- observeDeltatocumulativeStreamsTrackedLinear func(context.Context, metric.Observer) error
+ meter metric.Meter
+ mu sync.Mutex
+ registrations []metric.Registration
+ DeltatocumulativeDatapoints metric.Int64Counter
+ DeltatocumulativeStreamsLimit metric.Int64Gauge
+ DeltatocumulativeStreamsMaxStale metric.Int64Gauge
+ DeltatocumulativeStreamsTracked metric.Int64ObservableUpDownCounter
}
// TelemetryBuilderOption applies changes to default builder.
@@ -49,14 +45,38 @@ func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
-// WithDeltatocumulativeStreamsTrackedLinearCallback sets callback for observable DeltatocumulativeStreamsTrackedLinear metric.
-func WithDeltatocumulativeStreamsTrackedLinearCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption {
- return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) {
- builder.observeDeltatocumulativeStreamsTrackedLinear = func(_ context.Context, o metric.Observer) error {
- o.ObserveInt64(builder.DeltatocumulativeStreamsTrackedLinear, cb(), opts...)
- return nil
- }
- })
+// RegisterDeltatocumulativeStreamsTrackedCallback sets callback for observable DeltatocumulativeStreamsTracked metric.
+func (builder *TelemetryBuilder) RegisterDeltatocumulativeStreamsTrackedCallback(cb metric.Int64Callback) error {
+ reg, err := builder.meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error {
+ cb(ctx, &observerInt64{inst: builder.DeltatocumulativeStreamsTracked, obs: o})
+ return nil
+ }, builder.DeltatocumulativeStreamsTracked)
+ if err != nil {
+ return err
+ }
+ builder.mu.Lock()
+ defer builder.mu.Unlock()
+ builder.registrations = append(builder.registrations, reg)
+ return nil
+}
+
+type observerInt64 struct {
+ embedded.Int64Observer
+ inst metric.Int64Observable
+ obs metric.Observer
+}
+
+func (oi *observerInt64) Observe(value int64, opts ...metric.ObserveOption) {
+ oi.obs.ObserveInt64(oi.inst, value, opts...)
+}
+
+// Shutdown unregister all registered callbacks for async instruments.
+func (builder *TelemetryBuilder) Shutdown() {
+ builder.mu.Lock()
+ defer builder.mu.Unlock()
+ for _, reg := range builder.registrations {
+ reg.Unregister()
+ }
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
@@ -68,68 +88,29 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme
}
builder.meter = Meter(settings)
var err, errs error
- builder.DeltatocumulativeDatapointsDropped, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter(
- "otelcol_deltatocumulative.datapoints.dropped",
- metric.WithDescription("number of datapoints dropped due to given 'reason'"),
- metric.WithUnit("{datapoint}"),
- )
- errs = errors.Join(errs, err)
- builder.DeltatocumulativeDatapointsLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter(
- "otelcol_deltatocumulative.datapoints.linear",
+ builder.DeltatocumulativeDatapoints, err = builder.meter.Int64Counter(
+ "otelcol_deltatocumulative_datapoints",
metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed"),
metric.WithUnit("{datapoint}"),
)
errs = errors.Join(errs, err)
- builder.DeltatocumulativeDatapointsProcessed, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter(
- "otelcol_deltatocumulative.datapoints.processed",
- metric.WithDescription("number of datapoints processed"),
- metric.WithUnit("{datapoint}"),
- )
- errs = errors.Join(errs, err)
- builder.DeltatocumulativeGapsLength, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter(
- "otelcol_deltatocumulative.gaps.length",
- metric.WithDescription("total duration where data was expected but not received"),
- metric.WithUnit("s"),
- )
- errs = errors.Join(errs, err)
- builder.DeltatocumulativeStreamsEvicted, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter(
- "otelcol_deltatocumulative.streams.evicted",
- metric.WithDescription("number of streams evicted"),
- metric.WithUnit("{stream}"),
- )
- errs = errors.Join(errs, err)
- builder.DeltatocumulativeStreamsLimit, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge(
- "otelcol_deltatocumulative.streams.limit",
+ builder.DeltatocumulativeStreamsLimit, err = builder.meter.Int64Gauge(
+ "otelcol_deltatocumulative_streams_limit",
metric.WithDescription("upper limit of tracked streams"),
metric.WithUnit("{stream}"),
)
errs = errors.Join(errs, err)
- builder.DeltatocumulativeStreamsMaxStale, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge(
- "otelcol_deltatocumulative.streams.max_stale",
+ builder.DeltatocumulativeStreamsMaxStale, err = builder.meter.Int64Gauge(
+ "otelcol_deltatocumulative_streams_max_stale",
metric.WithDescription("duration after which streams inactive streams are dropped"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
- builder.DeltatocumulativeStreamsTracked, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64UpDownCounter(
- "otelcol_deltatocumulative.streams.tracked",
- metric.WithDescription("number of streams tracked"),
- metric.WithUnit("{dps}"),
- )
- errs = errors.Join(errs, err)
- builder.DeltatocumulativeStreamsTrackedLinear, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableUpDownCounter(
- "otelcol_deltatocumulative.streams.tracked.linear",
+ builder.DeltatocumulativeStreamsTracked, err = builder.meter.Int64ObservableUpDownCounter(
+ "otelcol_deltatocumulative_streams_tracked",
metric.WithDescription("number of streams tracked"),
metric.WithUnit("{dps}"),
)
errs = errors.Join(errs, err)
- _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeDeltatocumulativeStreamsTrackedLinear, builder.DeltatocumulativeStreamsTrackedLinear)
- errs = errors.Join(errs, err)
return &builder, errs
}
-
-func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter {
- if cfgLevel <= srvLevel {
- return meter
- }
- return noop.Meter{}
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go
index ab6fde6550..401478fb54 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go
@@ -21,27 +21,30 @@ func New(set component.TelemetrySettings) (Metrics, error) {
tracked: &zero,
}
- trackedCb := metadata.WithDeltatocumulativeStreamsTrackedLinearCallback(func() int64 {
- return int64((*m.tracked)())
+ telb, err := metadata.NewTelemetryBuilder(set)
+ if err != nil {
+ return Metrics{}, err
+ }
+ err = telb.RegisterDeltatocumulativeStreamsTrackedCallback(func(_ context.Context, observer metric.Int64Observer) error {
+ observer.Observe(int64((*m.tracked)()))
+ return nil
})
-
- telb, err := metadata.NewTelemetryBuilder(set, trackedCb)
if err != nil {
return Metrics{}, err
}
- m.TelemetryBuilder = *telb
+ m.TelemetryBuilder = telb
return m, nil
}
type Metrics struct {
- metadata.TelemetryBuilder
+ *metadata.TelemetryBuilder
tracked *func() int
}
-func (m Metrics) Datapoints() Counter {
- return Counter{Int64Counter: m.DeltatocumulativeDatapointsLinear}
+func (m *Metrics) Datapoints() Counter {
+ return Counter{Int64Counter: m.DeltatocumulativeDatapoints}
}
func (m *Metrics) WithTracked(streams func() int) {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
index be925197db..3269c0b011 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
@@ -12,14 +12,7 @@ status:
telemetry:
metrics:
# streams
- deltatocumulative.streams.tracked:
- description: number of streams tracked
- unit: "{dps}"
- sum:
- value_type: int
- monotonic: false
- enabled: true
- deltatocumulative.streams.tracked.linear:
+ deltatocumulative_streams_tracked:
description: number of streams tracked
unit: "{dps}"
sum:
@@ -27,52 +20,23 @@ telemetry:
monotonic: false
async: true
enabled: true
- deltatocumulative.streams.limit:
+ deltatocumulative_streams_limit:
description: upper limit of tracked streams
unit: "{stream}"
gauge:
value_type: int
enabled: true
- deltatocumulative.streams.evicted:
- description: number of streams evicted
- unit: "{stream}"
- sum:
- value_type: int
- monotonic: true
- enabled: true
- deltatocumulative.streams.max_stale:
+ deltatocumulative_streams_max_stale:
description: duration after which streams inactive streams are dropped
unit: "s"
gauge:
value_type: int
enabled: true
# datapoints
- deltatocumulative.datapoints.processed:
- description: number of datapoints processed
- unit: "{datapoint}"
- sum:
- value_type: int
- monotonic: true
- enabled: true
- deltatocumulative.datapoints.dropped:
- description: number of datapoints dropped due to given 'reason'
- unit: "{datapoint}"
- sum:
- value_type: int
- monotonic: true
- enabled: true
-
- deltatocumulative.datapoints.linear:
+ deltatocumulative_datapoints:
description: total number of datapoints processed. may have 'error' attribute, if processing failed
unit: "{datapoint}"
sum:
value_type: int
monotonic: true
enabled: true
- deltatocumulative.gaps.length:
- description: total duration where data was expected but not received
- unit: "s"
- sum:
- value_type: int
- monotonic: true
- enabled: true
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
index 149431b897..5fd061355d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
@@ -8,14 +8,16 @@ import (
"sync"
"time"
+ "github.com/puzpuzpuz/xsync/v3"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity"
- "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/staleness"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/delta"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry"
)
@@ -27,43 +29,49 @@ type Processor struct {
cfg Config
last state
- mtx sync.Mutex
+ aggr data.Aggregator
ctx context.Context
cancel context.CancelFunc
- stale staleness.Tracker
+ stale *xsync.MapOf[identity.Stream, time.Time]
tel telemetry.Metrics
}
func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *Processor {
ctx, cancel := context.WithCancel(context.Background())
+ limit := maps.Limit(int64(cfg.MaxStreams))
proc := Processor{
next: next,
cfg: *cfg,
last: state{
- nums: make(map[identity.Stream]pmetric.NumberDataPoint),
- hist: make(map[identity.Stream]pmetric.HistogramDataPoint),
- expo: make(map[identity.Stream]pmetric.ExponentialHistogramDataPoint),
+ ctx: limit,
+ nums: maps.New[identity.Stream, *mutex[pmetric.NumberDataPoint]](limit),
+ hist: maps.New[identity.Stream, *mutex[pmetric.HistogramDataPoint]](limit),
+ expo: maps.New[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]](limit),
},
+ aggr: delta.Aggregator{Aggregator: new(data.Adder)},
ctx: ctx,
cancel: cancel,
- stale: staleness.NewTracker(),
+ stale: xsync.NewMapOf[identity.Stream, time.Time](),
tel: tel,
}
- tel.WithTracked(proc.last.Len)
+ tel.WithTracked(proc.last.Size)
cfg.Metrics(tel)
return &proc
}
-func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
- p.mtx.Lock()
- defer p.mtx.Unlock()
+type vals struct {
+ nums *mutex[pmetric.NumberDataPoint]
+ hist *mutex[pmetric.HistogramDataPoint]
+ expo *mutex[pmetric.ExponentialHistogramDataPoint]
+}
+func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
now := time.Now()
const (
@@ -71,6 +79,12 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro
drop = false
)
+ zero := vals{
+ nums: guard(pmetric.NewNumberDataPoint()),
+ hist: guard(pmetric.NewHistogramDataPoint()),
+ expo: guard(pmetric.NewExponentialHistogramDataPoint()),
+ }
+
metrics.Filter(md, func(m metrics.Metric) bool {
if m.AggregationTemporality() != pmetric.AggregationTemporalityDelta {
return keep
@@ -85,41 +99,70 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro
var attrs telemetry.Attributes
defer func() { p.tel.Datapoints().Inc(ctx, attrs...) }()
- // if stream new and state capacity reached, reject
- exist := p.last.Has(id)
- if !exist && p.last.Len() >= p.cfg.MaxStreams {
- attrs.Set(telemetry.Error("limit"))
- return drop
- }
-
- // stream is ok and active, update stale tracker
- p.stale.Refresh(now, id)
-
- // this is the first sample of the stream. there is nothing to
- // aggregate with, so clone this value into the state and done
- if !exist {
- p.last.BeginWith(id, dp)
- return keep
- }
-
- // aggregate with state from previous requests.
- // delta.AccumulateInto(state, dp) stores result in `state`.
- // this is then copied into `dp` (the value passed onto the pipeline)
var err error
switch dp := dp.(type) {
case pmetric.NumberDataPoint:
- state := p.last.nums[id]
- err = delta.AccumulateInto(state, dp)
- state.CopyTo(dp)
+ last, loaded := p.last.nums.LoadOrStore(id, zero.nums)
+ if maps.Exceeded(last, loaded) {
+ // state is full, reject stream
+ attrs.Set(telemetry.Error("limit"))
+ return drop
+ }
+
+ // stream is ok and active, update stale tracker
+ p.stale.Store(id, now)
+
+ if !loaded {
+ // cached zero was stored, alloc new one
+ zero.nums = guard(pmetric.NewNumberDataPoint())
+ }
+
+ last.use(func(last pmetric.NumberDataPoint) {
+ err = p.aggr.Numbers(last, dp)
+ last.CopyTo(dp)
+ })
case pmetric.HistogramDataPoint:
- state := p.last.hist[id]
- err = delta.AccumulateInto(state, dp)
- state.CopyTo(dp)
+ last, loaded := p.last.hist.LoadOrStore(id, zero.hist)
+ if maps.Exceeded(last, loaded) {
+ // state is full, reject stream
+ attrs.Set(telemetry.Error("limit"))
+ return drop
+ }
+
+ // stream is ok and active, update stale tracker
+ p.stale.Store(id, now)
+
+ if !loaded {
+ // cached zero was stored, alloc new one
+ zero.hist = guard(pmetric.NewHistogramDataPoint())
+ }
+
+ last.use(func(last pmetric.HistogramDataPoint) {
+ err = p.aggr.Histograms(last, dp)
+ last.CopyTo(dp)
+ })
case pmetric.ExponentialHistogramDataPoint:
- state := p.last.expo[id]
- err = delta.AccumulateInto(state, dp)
- state.CopyTo(dp)
+ last, loaded := p.last.expo.LoadOrStore(id, zero.expo)
+ if maps.Exceeded(last, loaded) {
+ // state is full, reject stream
+ attrs.Set(telemetry.Error("limit"))
+ return drop
+ }
+
+ // stream is ok and active, update stale tracker
+ p.stale.Store(id, now)
+
+ if !loaded {
+ // cached zero was stored, alloc new one
+ zero.expo = guard(pmetric.NewExponentialHistogramDataPoint())
+ }
+
+ last.use(func(last pmetric.ExponentialHistogramDataPoint) {
+ err = p.aggr.Exponential(last, dp)
+ last.CopyTo(dp)
+ })
}
+
if err != nil {
attrs.Set(telemetry.Cause(err))
return drop
@@ -152,12 +195,16 @@ func (p *Processor) Start(_ context.Context, _ component.Host) error {
case <-p.ctx.Done():
return
case <-tick.C:
- p.mtx.Lock()
- stale := p.stale.Collect(p.cfg.MaxStale)
- for _, id := range stale {
- p.last.Delete(id)
- }
- p.mtx.Unlock()
+ now := time.Now()
+ p.stale.Range(func(id identity.Stream, last time.Time) bool {
+ if now.Sub(last) > p.cfg.MaxStale {
+ p.last.nums.LoadAndDelete(id)
+ p.last.hist.LoadAndDelete(id)
+ p.last.expo.LoadAndDelete(id)
+ p.stale.Delete(id)
+ }
+ return true
+ })
}
}
}()
@@ -177,38 +224,27 @@ func (p *Processor) Capabilities() consumer.Capabilities {
// state keeps a cumulative value, aggregated over time, per stream
type state struct {
- nums map[identity.Stream]pmetric.NumberDataPoint
- hist map[identity.Stream]pmetric.HistogramDataPoint
- expo map[identity.Stream]pmetric.ExponentialHistogramDataPoint
+ ctx maps.Context
+ nums *maps.Parallel[identity.Stream, *mutex[pmetric.NumberDataPoint]]
+ hist *maps.Parallel[identity.Stream, *mutex[pmetric.HistogramDataPoint]]
+ expo *maps.Parallel[identity.Stream, *mutex[pmetric.ExponentialHistogramDataPoint]]
}
-func (m state) Len() int {
- return len(m.nums) + len(m.hist) + len(m.expo)
+func (s state) Size() int {
+ return int(s.ctx.Size())
}
-func (m state) Has(id identity.Stream) bool {
- _, nok := m.nums[id]
- _, hok := m.hist[id]
- _, eok := m.expo[id]
- return nok || hok || eok
+type mutex[T any] struct {
+ mtx sync.Mutex
+ v T
}
-func (m state) Delete(id identity.Stream) {
- delete(m.nums, id)
- delete(m.hist, id)
- delete(m.expo, id)
+func (mtx *mutex[T]) use(do func(T)) {
+ mtx.mtx.Lock()
+ do(mtx.v)
+ mtx.mtx.Unlock()
}
-func (m state) BeginWith(id identity.Stream, dp any) {
- switch dp := dp.(type) {
- case pmetric.NumberDataPoint:
- m.nums[id] = pmetric.NewNumberDataPoint()
- dp.CopyTo(m.nums[id])
- case pmetric.HistogramDataPoint:
- m.hist[id] = pmetric.NewHistogramDataPoint()
- dp.CopyTo(m.hist[id])
- case pmetric.ExponentialHistogramDataPoint:
- m.expo[id] = pmetric.NewExponentialHistogramDataPoint()
- dp.CopyTo(m.expo[id])
- }
+func guard[T any](v T) *mutex[T] {
+ return &mutex[T]{v: v}
}
diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go
index 7276742ec9..9beaae26c2 100644
--- a/vendor/github.com/prometheus/common/config/headers.go
+++ b/vendor/github.com/prometheus/common/config/headers.go
@@ -24,9 +24,9 @@ import (
"strings"
)
-// reservedHeaders that change the connection, are set by Prometheus, or can
+// ReservedHeaders that change the connection, are set by Prometheus, or can
// be changed otherwise.
-var reservedHeaders = map[string]struct{}{
+var ReservedHeaders = map[string]struct{}{
"Authorization": {},
"Host": {},
"Content-Encoding": {},
@@ -72,7 +72,7 @@ func (h *Headers) SetDirectory(dir string) {
// Validate validates the Headers config.
func (h *Headers) Validate() error {
for n := range h.Headers {
- if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok {
+ if _, ok := ReservedHeaders[http.CanonicalHeaderKey(n)]; ok {
return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n))
}
}
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index 63809083ac..5d3f1941bb 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -225,7 +225,7 @@ func (u *URL) UnmarshalJSON(data []byte) error {
// MarshalJSON implements the json.Marshaler interface for URL.
func (u URL) MarshalJSON() ([]byte, error) {
if u.URL != nil {
- return json.Marshal(u.URL.String())
+ return json.Marshal(u.String())
}
return []byte("null"), nil
}
@@ -251,7 +251,7 @@ func (o *OAuth2) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal((*plain)(o)); err != nil {
return err
}
- return o.ProxyConfig.Validate()
+ return o.Validate()
}
// UnmarshalJSON implements the json.Marshaler interface for URL.
@@ -260,7 +260,7 @@ func (o *OAuth2) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, (*plain)(o)); err != nil {
return err
}
- return o.ProxyConfig.Validate()
+ return o.Validate()
}
// SetDirectory joins any relative file paths with dir.
@@ -604,8 +604,8 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
- Proxy: cfg.ProxyConfig.Proxy(),
- ProxyConnectHeader: cfg.ProxyConfig.GetProxyConnectHeader(),
+ Proxy: cfg.Proxy(),
+ ProxyConnectHeader: cfg.GetProxyConnectHeader(),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: !opts.keepAlivesEnabled,
@@ -914,8 +914,8 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str
tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) {
return &http.Transport{
TLSClientConfig: tlsConfig,
- Proxy: rt.config.ProxyConfig.Proxy(),
- ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(),
+ Proxy: rt.config.Proxy(),
+ ProxyConnectHeader: rt.config.GetProxyConnectHeader(),
DisableKeepAlives: !rt.opts.keepAlivesEnabled,
MaxIdleConns: 20,
MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801
@@ -1508,7 +1508,7 @@ func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) {
}
return
}
- if c.ProxyURL.URL != nil && c.ProxyURL.URL.String() != "" {
+ if c.ProxyURL.URL != nil && c.ProxyURL.String() != "" {
if c.NoProxy == "" {
c.proxyFunc = http.ProxyURL(c.ProxyURL.URL)
return
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b4607fe4d2..4067978a17 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
}
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+ (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index bd3a39e3e1..460f554f29 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 73b7aa3e60..de83afe93e 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
return false
}
for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ // TODO: Apply De Morgan's law. Make sure there are tests for this.
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
return false
}
}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 5766107cf9..a6b01755bd 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -27,13 +27,25 @@ import (
)
var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
- // mode in isolation from other components that don't support UTF-8 may result
- // in bugs or other undefined behavior. This value can be set to
- // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
- // avoid need for locking, this value should be set once, ideally in an
- // init(), before multiple goroutines are started.
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
NameValidationScheme = UTF8Validation
// NameEscapingScheme defines the default way that names will be escaped when
@@ -50,7 +62,7 @@ var (
type ValidationScheme int
const (
- // LegacyValidation is a setting that requirets that metric and label names
+ // LegacyValidation is a setting that requires that all metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
LegacyValidation ValidationScheme = iota
diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go
index 6e8fbabce5..3bd817328a 100644
--- a/vendor/github.com/prometheus/common/promslog/slog.go
+++ b/vendor/github.com/prometheus/common/promslog/slog.go
@@ -25,73 +25,43 @@ import (
"path/filepath"
"strconv"
"strings"
+ "time"
)
+// LogStyle represents the common logging formats in the Prometheus ecosystem.
type LogStyle string
const (
SlogStyle LogStyle = "slog"
GoKitStyle LogStyle = "go-kit"
+
+ reservedKeyPrefix = "logged_"
)
var (
- LevelFlagOptions = []string{"debug", "info", "warn", "error"}
+ // LevelFlagOptions represents allowed logging levels.
+ LevelFlagOptions = []string{"debug", "info", "warn", "error"}
+ // FormatFlagOptions represents allowed formats.
FormatFlagOptions = []string{"logfmt", "json"}
- callerAddFunc = false
- defaultWriter = os.Stderr
- goKitStyleReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr {
- key := a.Key
- switch key {
- case slog.TimeKey:
- a.Key = "ts"
-
- // This timestamp format differs from RFC3339Nano by using .000 instead
- // of .999999999 which changes the timestamp from 9 variable to 3 fixed
- // decimals (.130 instead of .130987456).
- t := a.Value.Time()
- a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00"))
- case slog.SourceKey:
- a.Key = "caller"
- src, _ := a.Value.Any().(*slog.Source)
-
- switch callerAddFunc {
- case true:
- a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line))
- default:
- a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
- }
- case slog.LevelKey:
- a.Value = slog.StringValue(strings.ToLower(a.Value.String()))
- default:
- }
-
- return a
- }
- defaultReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr {
- key := a.Key
- switch key {
- case slog.TimeKey:
- t := a.Value.Time()
- a.Value = slog.TimeValue(t.UTC())
- case slog.SourceKey:
- src, _ := a.Value.Any().(*slog.Source)
- a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
- default:
- }
-
- return a
- }
+ defaultWriter = os.Stderr
)
-// AllowedLevel is a settable identifier for the minimum level a log entry
-// must be have.
-type AllowedLevel struct {
- s string
+// Level controls a logging level, with an info default.
+// It wraps slog.LevelVar with string-based level control.
+// Level is safe to be used concurrently.
+type Level struct {
lvl *slog.LevelVar
}
-func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
+// NewLevel returns a new Level.
+func NewLevel() *Level {
+ return &Level{
+ lvl: &slog.LevelVar{},
+ }
+}
+
+func (l *Level) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
type plain string
if err := unmarshal((*plain)(&s)); err != nil {
@@ -100,55 +70,60 @@ func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
if s == "" {
return nil
}
- lo := &AllowedLevel{}
- if err := lo.Set(s); err != nil {
+ if err := l.Set(s); err != nil {
return err
}
- *l = *lo
return nil
}
-func (l *AllowedLevel) String() string {
- return l.s
-}
-
-// Set updates the value of the allowed level.
-func (l *AllowedLevel) Set(s string) error {
- if l.lvl == nil {
- l.lvl = &slog.LevelVar{}
+// String returns the current level.
+func (l *Level) String() string {
+ switch l.lvl.Level() {
+ case slog.LevelDebug:
+ return "debug"
+ case slog.LevelInfo:
+ return "info"
+ case slog.LevelWarn:
+ return "warn"
+ case slog.LevelError:
+ return "error"
+ default:
+ return ""
}
+}
+// Set updates the logging level with the validation.
+func (l *Level) Set(s string) error {
switch strings.ToLower(s) {
case "debug":
l.lvl.Set(slog.LevelDebug)
- callerAddFunc = true
case "info":
l.lvl.Set(slog.LevelInfo)
- callerAddFunc = false
case "warn":
l.lvl.Set(slog.LevelWarn)
- callerAddFunc = false
case "error":
l.lvl.Set(slog.LevelError)
- callerAddFunc = false
default:
return fmt.Errorf("unrecognized log level %s", s)
}
- l.s = s
return nil
}
-// AllowedFormat is a settable identifier for the output format that the logger can have.
-type AllowedFormat struct {
+// Format controls a logging output format.
+// Not concurrency-safe.
+type Format struct {
s string
}
-func (f *AllowedFormat) String() string {
+// NewFormat creates a new Format.
+func NewFormat() *Format { return &Format{} }
+
+func (f *Format) String() string {
return f.s
}
// Set updates the value of the allowed format.
-func (f *AllowedFormat) Set(s string) error {
+func (f *Format) Set(s string) error {
switch s {
case "logfmt", "json":
f.s = s
@@ -160,18 +135,112 @@ func (f *AllowedFormat) Set(s string) error {
// Config is a struct containing configurable settings for the logger
type Config struct {
- Level *AllowedLevel
- Format *AllowedFormat
+ Level *Level
+ Format *Format
Style LogStyle
Writer io.Writer
}
+func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) slog.Attr {
+ return func(groups []string, a slog.Attr) slog.Attr {
+ key := a.Key
+ switch key {
+ case slog.TimeKey, "ts":
+ if t, ok := a.Value.Any().(time.Time); ok {
+ a.Key = "ts"
+
+ // This timestamp format differs from RFC3339Nano by using .000 instead
+ // of .999999999 which changes the timestamp from 9 variable to 3 fixed
+ // decimals (.130 instead of .130987456).
+ a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00"))
+ } else {
+ // If we can't cast the any from the value to a
+ // time.Time, it means the caller logged
+ // another attribute with a key of `ts`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to `logged_ts`.
+ a.Key = reservedKeyPrefix + key
+ }
+ case slog.SourceKey, "caller":
+ if src, ok := a.Value.Any().(*slog.Source); ok {
+ a.Key = "caller"
+ switch lvl.String() {
+ case "debug":
+ a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line))
+ default:
+ a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
+ }
+ } else {
+ // If we can't cast the any from the value to
+ // an *slog.Source, it means the caller logged
+ // another attribute with a key of `caller`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to
+ // `logged_caller`.
+ a.Key = reservedKeyPrefix + key
+ }
+ case slog.LevelKey:
+ if lvl, ok := a.Value.Any().(slog.Level); ok {
+ a.Value = slog.StringValue(strings.ToLower(lvl.String()))
+ } else {
+ // If we can't cast the any from the value to
+ // an slog.Level, it means the caller logged
+ // another attribute with a key of `level`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to `logged_level`.
+ a.Key = reservedKeyPrefix + key
+ }
+ default:
+ }
+ return a
+ }
+}
+
+func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr {
+ key := a.Key
+ switch key {
+ case slog.TimeKey:
+ // Note that we do not change the timezone to UTC anymore.
+ if _, ok := a.Value.Any().(time.Time); !ok {
+ // If we can't cast the any from the value to a
+ // time.Time, it means the caller logged
+ // another attribute with a key of `time`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to `logged_time`.
+ a.Key = reservedKeyPrefix + key
+ }
+ case slog.SourceKey:
+ if src, ok := a.Value.Any().(*slog.Source); ok {
+ a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
+ } else {
+ // If we can't cast the any from the value to
+ // an *slog.Source, it means the caller logged
+ // another attribute with a key of `source`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to
+ // `logged_source`.
+ a.Key = reservedKeyPrefix + key
+ }
+ case slog.LevelKey:
+ if _, ok := a.Value.Any().(slog.Level); !ok {
+ // If we can't cast the any from the value to
+ // an slog.Level, it means the caller logged
+ // another attribute with a key of `level`.
+ // Prevent duplicate keys (necessary for proper
+ // JSON) by renaming the key to
+ // `logged_level`.
+ a.Key = reservedKeyPrefix + key
+ }
+ default:
+ }
+ return a
+}
+
// New returns a new slog.Logger. Each logged line will be annotated
// with a timestamp. The output always goes to stderr.
func New(config *Config) *slog.Logger {
if config.Level == nil {
- config.Level = &AllowedLevel{}
- _ = config.Level.Set("info")
+ config.Level = NewLevel()
}
if config.Writer == nil {
@@ -181,11 +250,11 @@ func New(config *Config) *slog.Logger {
logHandlerOpts := &slog.HandlerOptions{
Level: config.Level.lvl,
AddSource: true,
- ReplaceAttr: defaultReplaceAttrFunc,
+ ReplaceAttr: defaultReplaceAttr,
}
if config.Style == GoKitStyle {
- logHandlerOpts.ReplaceAttr = goKitStyleReplaceAttrFunc
+ logHandlerOpts.ReplaceAttr = newGoKitStyleReplaceAttrFunc(config.Level)
}
if config.Format != nil && config.Format.s == "json" {
@@ -197,5 +266,5 @@ func New(config *Config) *slog.Logger {
// NewNopLogger is a convenience function to return an slog.Logger that writes
// to io.Discard.
func NewNopLogger() *slog.Logger {
- return slog.New(slog.NewTextHandler(io.Discard, nil))
+ return New(&Config{Writer: io.Discard})
}
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go
index d417c15e0d..86ee6c3b73 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go
@@ -22,11 +22,13 @@ import (
"bytes"
_ "embed"
"net/http"
+ "strings"
"text/template"
)
// Config represents the configuration of the web listener.
type LandingConfig struct {
+ RoutePrefix string // The route prefix for the exporter.
HeaderColor string // Used for the landing page header.
CSS string // CSS style tag for the landing page.
Name string // The name of the exporter, generally suffixed by _exporter.
@@ -62,6 +64,7 @@ type LandingLinks struct {
type LandingPageHandler struct {
landingPage []byte
+ routePrefix string
}
var (
@@ -93,6 +96,15 @@ func NewLandingPage(c LandingConfig) (*LandingPageHandler, error) {
}
c.CSS = buf.String()
}
+ if c.RoutePrefix == "" {
+ c.RoutePrefix = "/"
+ } else if !strings.HasSuffix(c.RoutePrefix, "/") {
+ c.RoutePrefix += "/"
+ }
+ // Strip leading '/' from Links if present
+ for i, link := range c.Links {
+ c.Links[i].Address = strings.TrimPrefix(link.Address, "/")
+ }
t := template.Must(template.New("landing page").Parse(landingPagehtmlContent))
buf.Reset()
@@ -102,11 +114,12 @@ func NewLandingPage(c LandingConfig) (*LandingPageHandler, error) {
return &LandingPageHandler{
landingPage: buf.Bytes(),
+ routePrefix: c.RoutePrefix,
}, nil
}
func (h *LandingPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path != "/" {
+ if r.URL.Path != h.routePrefix {
http.NotFound(w, r)
return
}
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html
index e1ac0aecdd..829f4a9c73 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html
@@ -15,13 +15,13 @@