fix(deps): update module github.com/prometheus/common to v0.64.0 (main) (#16750)

Signed-off-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Paul Rogers <paul.rogers@grafana.com>
Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>
pull/18104/head
renovate[bot] 7 months ago committed by GitHub
parent 1effa30851
commit 8b265ede15
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      clients/pkg/promtail/promtail_test.go
  2. 3
      clients/pkg/promtail/wal/wal.go
  3. 54
      go.mod
  4. 181
      go.sum
  5. 6
      pkg/chunkenc/symbols.go
  6. 3
      pkg/compactor/deletion/job_runner_test.go
  7. 18
      pkg/distributor/distributor.go
  8. 10
      pkg/distributor/distributor_test.go
  9. 3
      pkg/ingester/checkpoint.go
  10. 3
      pkg/ingester/wal.go
  11. 3
      pkg/loghttp/push/otlp.go
  12. 6
      pkg/querier/queryrange/queryrangebase/results_cache.go
  13. 84
      pkg/ruler/base/api_test.go
  14. 14
      pkg/ruler/base/manager.go
  15. 110
      pkg/ruler/base/mapper_test.go
  16. 34
      pkg/ruler/compat.go
  17. 38
      pkg/ruler/compat_test.go
  18. 6
      pkg/ruler/grouploader.go
  19. 14
      pkg/ruler/grouploader_test.go
  20. 26
      pkg/ruler/rulespb/compat.go
  21. 2
      pkg/ruler/rulestore/bucketclient/bucket_client_test.go
  22. 6
      pkg/ruler/rulestore/local/local_test.go
  23. 6
      pkg/ruler/storage/instance/instance.go
  24. 5
      pkg/ruler/storage/wal/wal.go
  25. 3
      pkg/storage/stores/shipper/indexshipper/tsdb/head_wal.go
  26. 10
      pkg/tool/commands/rules.go
  27. 21
      pkg/tool/commands/rules_test.go
  28. 8
      pkg/tool/rules/compare.go
  29. 129
      pkg/tool/rules/compare_test.go
  30. 8
      pkg/tool/rules/parser_test.go
  31. 42
      pkg/tool/rules/rules.go
  32. 78
      pkg/tool/rules/rules_test.go
  33. 14
      tools/lambda-promtail/go.mod
  34. 28
      tools/lambda-promtail/go.sum
  35. 14
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
  36. 7
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
  37. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
  38. 14
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
  39. 12
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
  40. 18
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
  41. 11
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
  42. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
  43. 5
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
  44. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
  45. 7
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
  46. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
  47. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
  48. 433
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
  49. 28
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
  50. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
  51. 4
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
  52. 12
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
  53. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
  54. 2
      vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
  55. 51
      vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
  56. 11
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
  57. 9
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
  58. 61
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
  59. 7
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
  60. 3
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
  61. 8
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
  62. 2
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
  63. 3
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
  64. 20
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
  65. 55
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
  66. 66
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
  67. 4
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
  68. 2
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
  69. 28
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
  70. 37
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
  71. 717
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
  72. 25
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
  73. 13
      vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
  74. 67
      vendor/github.com/digitalocean/godo/CHANGELOG.md
  75. 4
      vendor/github.com/digitalocean/godo/apps.gen.go
  76. 8
      vendor/github.com/digitalocean/godo/apps_accessors.go
  77. 76
      vendor/github.com/digitalocean/godo/databases.go
  78. 6
      vendor/github.com/digitalocean/godo/godo.go
  79. 134
      vendor/github.com/digitalocean/godo/kubernetes.go
  80. 82
      vendor/github.com/digitalocean/godo/load_balancers.go
  81. 415
      vendor/github.com/digitalocean/godo/partner_network_connect.go
  82. 8
      vendor/github.com/digitalocean/godo/snapshots.go
  83. 186
      vendor/github.com/digitalocean/godo/spaces_keys.go
  84. 18
      vendor/github.com/go-viper/mapstructure/v2/.editorconfig
  85. 4
      vendor/github.com/go-viper/mapstructure/v2/.envrc
  86. 6
      vendor/github.com/go-viper/mapstructure/v2/.gitignore
  87. 23
      vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
  88. 104
      vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
  89. 21
      vendor/github.com/go-viper/mapstructure/v2/LICENSE
  90. 80
      vendor/github.com/go-viper/mapstructure/v2/README.md
  91. 630
      vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
  92. 472
      vendor/github.com/go-viper/mapstructure/v2/flake.lock
  93. 39
      vendor/github.com/go-viper/mapstructure/v2/flake.nix
  94. 11
      vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
  95. 9
      vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
  96. 61
      vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
  97. 1620
      vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
  98. 44
      vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
  99. 10
      vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
  100. 26
      vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md
  101. Some files were not shown because too many files have changed in this diff Show More

@ -536,7 +536,7 @@ func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName
case textparse.EntrySeries:
var res labels.Labels
_, _, v := pr.Series()
pr.Metric(&res)
pr.Labels(&res)
switch res.Get(labels.MetricName) {
case metricName:
rb[res.Get(label)] = v

@ -8,6 +8,7 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/ingester/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@ -38,7 +39,7 @@ type wrapper struct {
func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) {
// TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk
// yet. This will attest for the lack of buffering in the channel Writer exposes.
tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone)
tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, compression.None)
if err != nil {
return nil, fmt.Errorf("failde to create tsdb WAL: %w", err)
}

@ -84,8 +84,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.62.0
github.com/prometheus/prometheus v0.302.1
github.com/prometheus/common v0.64.0
github.com/prometheus/prometheus v0.304.1
github.com/redis/go-redis/v9 v9.10.0
github.com/segmentio/fasthash v1.0.3
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
@ -138,7 +138,7 @@ require (
github.com/parquet-go/parquet-go v0.25.1
github.com/prometheus/alertmanager v0.28.1
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea
github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb
github.com/prometheus/sigv4 v0.1.2
github.com/richardartoul/molecule v1.0.0
github.com/schollz/progressbar/v3 v3.18.0
@ -195,15 +195,20 @@ require (
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-redsync/redsync/v4 v4.13.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
github.com/gophercloud/gophercloud/v2 v2.4.0 // indirect
github.com/gophercloud/gophercloud/v2 v2.7.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/grafana/otel-profiling-go v0.5.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/jaegertracing/jaeger-idl v0.5.0 // indirect
github.com/kamstrup/intmap v0.5.1 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.1.2 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
@ -220,14 +225,16 @@ require (
github.com/muesli/termenv v0.16.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/ncw/swift v1.0.53 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 // indirect
github.com/oklog/ulid/v2 v2.1.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect
github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sahilm/fuzzy v0.1.1 // indirect
@ -243,15 +250,20 @@ require (
github.com/zeebo/errs v1.4.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/component v0.118.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect
go.opentelemetry.io/collector/consumer v1.24.0 // indirect
go.opentelemetry.io/collector/pipeline v0.118.0 // indirect
go.opentelemetry.io/collector/processor v0.118.0 // indirect
go.opentelemetry.io/collector/component v1.30.0 // indirect
go.opentelemetry.io/collector/confmap v1.30.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect
go.opentelemetry.io/collector/consumer v1.30.0 // indirect
go.opentelemetry.io/collector/featuregate v1.30.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect
go.opentelemetry.io/collector/pipeline v0.124.0 // indirect
go.opentelemetry.io/collector/processor v1.30.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect
go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 // indirect
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
go.opentelemetry.io/otel/log v0.11.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
@ -268,9 +280,9 @@ require (
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 // indirect
@ -280,7 +292,7 @@ require (
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Code-Hex/go-generics-cache v1.5.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.1 // indirect
@ -309,7 +321,7 @@ require (
github.com/dennwc/varint v1.0.0 // indirect
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/digitalocean/godo v1.132.0 // indirect
github.com/digitalocean/godo v1.144.0 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
@ -379,7 +391,7 @@ require (
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.63 // indirect
github.com/miekg/dns v1.1.65 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
@ -393,7 +405,7 @@ require (
github.com/oschwald/maxminddb-golang v1.13.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
github.com/prometheus/exporter-toolkit v0.14.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rs/xid v1.6.0 // indirect
@ -413,7 +425,7 @@ require (
go.etcd.io/etcd/client/v3 v3.5.4 // indirect
go.mongodb.org/mongo-driver v1.17.2 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/semconv v0.118.0 // indirect
go.opentelemetry.io/collector/semconv v0.124.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
go.opentelemetry.io/otel v1.36.0
@ -431,7 +443,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
k8s.io/api v0.32.3 // indirect
k8s.io/client-go v0.32.1 // indirect
k8s.io/client-go v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
rsc.io/binaryregexp v0.2.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect

181
go.sum

@ -66,14 +66,14 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@ -113,8 +113,8 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 h1:H5xDQaE3XowWfhZRUpnfC+rGZMEVoSiji+b+/HFAPU4=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
@ -355,8 +355,8 @@ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsY
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ=
github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0=
github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
@ -513,8 +513,8 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA=
github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=
github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E=
github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
@ -647,8 +647,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg=
github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4=
github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
@ -695,8 +695,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
@ -763,8 +763,8 @@ github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc
github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY=
github.com/heroku/x v0.4.3 h1:HF1P4Mu79BKDVk4pt+oRDpcOSTRTpHq28RYAOkuJmds=
github.com/heroku/x v0.4.3/go.mod h1:htQnSDQPP7rNbrOQ8rczL7tbdNtQHXCPoSxYomu+eI8=
github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q=
github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84=
github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -781,8 +781,8 @@ github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68Z
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/influxdata/telegraf v1.34.1 h1:BWnIm52buIBv1hPRoMFNBE/wuoSZ0Yeny4EP0ngMSbE=
github.com/influxdata/telegraf v1.34.1/go.mod h1:F/4F/nmAKRZlDNhrD5aIQi+AaiHaiNKku0kJFsF6iag=
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw=
github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE=
github.com/jaegertracing/jaeger-idl v0.5.0/go.mod h1:ON90zFo9eoyXrt9F/KN8YeF3zxcnujaisMweFY/rg5k=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@ -825,8 +825,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kamstrup/intmap v0.5.1 h1:ENGAowczZA+PJPYYlreoqJvWgQVtAmX1l899WfYFVK0=
github.com/kamstrup/intmap v0.5.1/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@ -839,8 +839,8 @@ github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
@ -870,8 +870,8 @@ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU=
github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
github.com/linode/linodego v1.49.0 h1:MNd3qwvQzbXB5mCpvdCqlUIu1RPA9oC+50LyB9kK+GQ=
github.com/linode/linodego v1.49.0/go.mod h1:B+HAM3//4w1wOS0BwdaQBKwBxlfe6kYJ7bSC6jJ/xtc=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI=
@ -906,8 +906,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
@ -990,6 +990,8 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -1001,14 +1003,14 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@ -1035,14 +1037,15 @@ github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o=
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw=
github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo=
github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@ -1101,14 +1104,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea h1:NacrTIqDsM6iOtfex6OAFvVmtxjbiLC2a34/ba6nM9Q=
github.com/prometheus/otlptranslator v0.0.0-20250604181132-1aca92dfe1ea/go.mod h1:v1PzmPjSnNkmZSDvKJ9OmsWcmWMEF5+JdllEcXrRfzM=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb h1:wuS7VydG/rDWTbYMp07paPv3R1hiPC9WgingWs+xgi0=
github.com/prometheus/otlptranslator v0.0.0-20250414121140-35db323fe9fb/go.mod h1:M7gjuJF83qnpgElJIPfhiK+YAHlvot5epcAV+Rie7eo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -1119,10 +1122,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.302.1 h1:xqVdrwrB4WNpdgJqxsz5loqFWNUZitsK8myqLuSZ6Ag=
github.com/prometheus/prometheus v0.302.1/go.mod h1:YcyCoTbUR/TM8rY3Aoeqr0AWTu/pu1Ehh+trpX3eRzg=
github.com/prometheus/prometheus v0.304.1 h1:e4kpJMb2Vh/PcR6LInake+ofcvFYHT+bCfmBvOkaZbY=
github.com/prometheus/prometheus v0.304.1/go.mod h1:ioGx2SGKTY+fLnJSQCdTHqARVldGNS8OlIe3kvp98so=
github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8=
github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -1149,8 +1154,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA=
github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@ -1312,38 +1317,44 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w=
go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M=
go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE=
go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504=
go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8=
go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU=
go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM=
go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE=
go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w=
go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec=
go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU=
go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s=
go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY=
go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ=
go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY=
go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg=
go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls=
go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E=
go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4=
go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38=
go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw=
go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E=
go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM=
go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA=
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8=
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0=
go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs=
go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0=
go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0=
go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg=
go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0=
go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4=
go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8=
go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk=
go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y=
go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8=
go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI=
go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8=
go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ=
go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM=
go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw=
go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc=
go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74=
go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws=
go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4=
go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU=
go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU=
go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM=
go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU=
go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w=
go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE=
go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo=
go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY=
go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o=
go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw=
go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk=
go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8=
go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE=
go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0=
go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w=
go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI=
go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU=
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0=
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
@ -1361,12 +1372,14 @@ go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y=
go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
@ -1850,8 +1863,8 @@ k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=

@ -45,14 +45,12 @@ type symbolizer struct {
readOnly bool
// Runtime-only map to track which symbols are label names and have been normalized
normalizedNames map[uint32]string
normalizer *otlptranslator.LabelNamer
}
func newSymbolizer() *symbolizer {
return &symbolizer{
symbolsMap: map[string]uint32{},
normalizedNames: map[uint32]string{},
normalizer: &otlptranslator.LabelNamer{},
}
}
@ -125,7 +123,7 @@ func (s *symbolizer) Lookup(syms symbols, buf *log.BufferedLabelsBuilder) labels
} else {
// If we haven't seen this name before, look it up and normalize it
name = s.lookup(symbol.Name)
normalized := s.normalizer.Build(name)
normalized := otlptranslator.NormalizeLabel(name)
s.mtx.Lock()
s.normalizedNames[symbol.Name] = normalized
s.mtx.Unlock()
@ -340,7 +338,6 @@ func symbolizerFromCheckpoint(b []byte) *symbolizer {
// Labels are key-value pairs, preallocate to half the number to store just the keys,
// likely less memory than the exponential growth Go will do.
normalizedNames: make(map[uint32]string, numLabels/2),
normalizer: &otlptranslator.LabelNamer{},
}
for i := 0; i < numLabels; i++ {
@ -371,7 +368,6 @@ func symbolizerFromEnc(b []byte, pool compression.ReaderPool) (*symbolizer, erro
labels: make([]string, 0, numLabels),
// Same as symbolizerFromCheckpoint
normalizedNames: make(map[uint32]string, numLabels/2),
normalizer: &otlptranslator.LabelNamer{},
compressedSize: len(b),
readOnly: true,
}

@ -11,7 +11,6 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/compactor/jobqueue"
"github.com/grafana/loki/v3/pkg/compactor/retention"
@ -20,6 +19,8 @@ import (
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/pkg/push"
)
type mockChunkClient struct {

@ -15,9 +15,16 @@ import (
"time"
"unicode/utf8"
otlptranslate "github.com/prometheus/otlptranslator"
"go.opentelemetry.io/otel/trace"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/status"
"github.com/prometheus/prometheus/model/labels"
"github.com/twmb/franz-go/pkg/kgo"
"google.golang.org/grpc/codes"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/kv"
"github.com/grafana/dskit/limiter"
@ -29,12 +36,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/otlptranslator"
"github.com/prometheus/prometheus/model/labels"
"github.com/twmb/franz-go/pkg/kgo"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"google.golang.org/grpc/codes"
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/compactor/retention"
@ -211,9 +213,6 @@ type Distributor struct {
kafkaWriteBytesTotal prometheus.Counter
kafkaWriteLatency prometheus.Histogram
kafkaRecordsPerRequest prometheus.Histogram
// OTLP Label Normalizer
normalizer *otlptranslator.LabelNamer
}
// New a distributor creates.
@ -374,7 +373,6 @@ func New(
partitionRing: partitionRing,
ingestLimits: newIngestLimits(limitsFrontendClient, registerer),
numMetadataPartitions: numMetadataPartitions,
normalizer: &otlptranslator.LabelNamer{},
}
if overrides.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy {
@ -648,7 +646,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
var normalized string
structuredMetadata := logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)
for i := range entry.StructuredMetadata {
normalized = d.normalizer.Build(structuredMetadata[i].Name)
normalized = otlptranslate.NormalizeLabel(structuredMetadata[i].Name)
if normalized != structuredMetadata[i].Name {
structuredMetadata[i].Name = normalized
d.tenantPushSanitizedStructuredMetadata.WithLabelValues(tenantID).Inc()

@ -14,6 +14,10 @@ import (
"time"
"unicode/utf8"
"github.com/prometheus/client_golang/prometheus/testutil"
otlptranslate "github.com/prometheus/otlptranslator"
"github.com/c2h5oh/datasize"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
@ -26,9 +30,7 @@ import (
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/otlptranslator"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -2067,8 +2069,6 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr
time.Sleep(i.succeedAfter)
}
normalizer := &otlptranslator.LabelNamer{}
i.mu.Lock()
defer i.mu.Unlock()
for _, s := range in.Streams {
@ -2077,7 +2077,7 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr
if strings.ContainsRune(sm.Value, utf8.RuneError) {
return nil, fmt.Errorf("sm value was not sanitized before being pushed to ignester, invalid utf 8 rune %d", utf8.RuneError)
}
if sm.Name != normalizer.Build(sm.Name) {
if sm.Name != otlptranslate.NormalizeLabel(sm.Name) {
return nil, fmt.Errorf("sm name was not sanitized before being sent to ingester, contained characters %s", sm.Name)
}

@ -18,6 +18,7 @@ import (
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
prompool "github.com/prometheus/prometheus/util/pool"
"github.com/grafana/loki/v3/pkg/chunkenc"
@ -348,7 +349,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) {
return false, fmt.Errorf("create checkpoint dir: %w", err)
}
checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, wlog.CompressionNone)
checkpoint, err := wlog.NewSize(util_log.SlogFromGoKit(log.With(util_log.Logger, "component", "checkpoint_wal")), nil, checkpointDirTemp, walSegmentSize, compression.None)
if err != nil {
return false, fmt.Errorf("open checkpoint: %w", err)
}

@ -10,6 +10,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/ingester/wal"
"github.com/grafana/loki/v3/pkg/util/flagext"
@ -82,7 +83,7 @@ func newWAL(cfg WALConfig, registerer prometheus.Registerer, metrics *ingesterMe
return noopWAL{}, nil
}
tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, wlog.CompressionNone)
tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(util_log.Logger), registerer, cfg.Dir, walSegmentSize, compression.None)
if err != nil {
return nil, err
}

@ -522,13 +522,12 @@ func attributesToLabels(attrs pcommon.Map, prefix string) push.LabelsAdapter {
func attributeToLabels(k string, v pcommon.Value, prefix string) push.LabelsAdapter {
var labelsAdapter push.LabelsAdapter
normalizer := &otlptranslator.LabelNamer{}
keyWithPrefix := k
if prefix != "" {
keyWithPrefix = prefix + "_" + k
}
keyWithPrefix = normalizer.Build(keyWithPrefix)
keyWithPrefix = otlptranslator.NormalizeLabel(keyWithPrefix)
typ := v.Type()
if typ == pcommon.ValueTypeMap {

@ -267,7 +267,11 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool {
}
// This resolves the start() and end() used with the @ modifier.
expr = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd())
expr, err = promql.PreprocessExpr(expr, r.GetStart(), r.GetEnd())
if err != nil {
level.Warn(s.logger).Log("msg", "failed to preprocess query, considering @ modifier as not cachable", "query", query, "err", err)
return false
}
end := r.GetEnd().UnixMilli()
atModCachable := true

@ -654,55 +654,19 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) {
"test": {
{
Name: "group1",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{
Value: "UP_RULE",
Tag: "!!str",
Kind: 8,
Line: 5,
Column: 19,
},
Expr: yaml.Node{
Value: "up",
Tag: "!!str",
Kind: 8,
Line: 6,
Column: 17,
},
Record: "UP_RULE",
Expr: "up",
},
{
Alert: yaml.Node{
Value: "UP_ALERT",
Tag: "!!str",
Kind: 8,
Line: 7,
Column: 18,
},
Expr: yaml.Node{
Value: "up < 1",
Tag: "!!str",
Kind: 8,
Line: 8,
Column: 17,
},
Alert: "UP_ALERT",
Expr: "up < 1",
Labels: map[string]string{"foo": "bar"},
},
{
Alert: yaml.Node{
Value: "DOWN_ALERT",
Tag: "!!str",
Kind: 8,
Line: 11,
Column: 18,
},
Expr: yaml.Node{
Value: "down < 1",
Tag: "!!str",
Kind: 8,
Line: 12,
Column: 17,
},
Alert: "DOWN_ALERT",
Expr: "down < 1",
Labels: map[string]string{"namespace": "delta"},
},
},
@ -714,39 +678,15 @@ func TestRuler_GetRulesLabelFilter(t *testing.T) {
"test": {
{
Name: "group1",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{
Value: "UP_ALERT",
Tag: "!!str",
Kind: 8,
Line: 5,
Column: 18,
},
Expr: yaml.Node{
Value: "up < 1",
Tag: "!!str",
Kind: 8,
Line: 6,
Column: 17,
},
Alert: "UP_ALERT",
Expr: "up < 1",
Labels: map[string]string{"foo": "bar"},
},
{
Alert: yaml.Node{
Value: "DOWN_ALERT",
Tag: "!!str",
Kind: 8,
Line: 9,
Column: 18,
},
Expr: yaml.Node{
Value: "down < 1",
Tag: "!!str",
Kind: 8,
Line: 10,
Column: 17,
},
Alert: "DOWN_ALERT",
Expr: "down < 1",
Labels: map[string]string{"namespace": "delta"},
},
},

@ -20,6 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/context/ctxhttp"
"gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/ruler/rulespb"
)
@ -300,12 +301,17 @@ func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error
}
for i, r := range g.Rules {
for _, err := range r.Validate() {
ruleNode := rulefmt.RuleNode{
Record: yaml.Node{Value: r.Record},
Alert: yaml.Node{Value: r.Alert},
Expr: yaml.Node{Value: r.Expr},
}
for _, err := range r.Validate(ruleNode) {
var ruleName string
if r.Alert.Value != "" {
ruleName = r.Alert.Value
if r.Alert != "" {
ruleName = r.Alert
} else {
ruleName = r.Record.Value
ruleName = r.Record
}
errs = append(errs, &rulefmt.Error{
Group: g.Name,

@ -10,7 +10,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
var (
@ -36,31 +35,28 @@ var (
)
func setupRuleSets() {
recordNode := yaml.Node{}
recordNode.SetString("example_rule")
exprNode := yaml.Node{}
exprNode.SetString("example_expr")
recordNodeUpdated := yaml.Node{}
recordNodeUpdated.SetString("example_ruleupdated")
exprNodeUpdated := yaml.Node{}
exprNodeUpdated.SetString("example_exprupdated")
record := "example_rule"
expr := "example_expr"
recordUpdated := "example_ruleupdated"
exprUpdated := "example_exprupdated"
initialRuleSet = map[string][]rulefmt.RuleGroup{
"file /one": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -70,19 +66,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -92,28 +88,28 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_three",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -123,19 +119,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -143,10 +139,10 @@ func setupRuleSets() {
"file /two": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -156,19 +152,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -176,10 +172,10 @@ func setupRuleSets() {
"file /two": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNodeUpdated,
Expr: exprNodeUpdated,
Record: recordUpdated,
Expr: exprUpdated,
},
},
},
@ -189,19 +185,19 @@ func setupRuleSets() {
"file /one": {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
{
Name: "rulegroup_two",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},
@ -211,10 +207,10 @@ func setupRuleSets() {
specialCharFile: {
{
Name: "rulegroup_one",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: recordNode,
Expr: exprNode,
Record: record,
Expr: expr,
},
},
},

@ -236,7 +236,7 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) {
set[g.Name] = struct{}{}
for _, r := range g.Rules {
if err := validateRuleNode(&r, g.Name); err != nil {
if err := validateRule(&r, g.Name); err != nil {
errs = append(errs, err)
}
}
@ -245,38 +245,38 @@ func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) {
return errs
}
func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
if r.Record.Value != "" && r.Alert.Value != "" {
func validateRule(r *rulefmt.Rule, groupName string) error {
if r.Record != "" && r.Alert != "" {
return errors.Errorf("only one of 'record' and 'alert' must be set")
}
if r.Record.Value == "" && r.Alert.Value == "" {
if r.Record == "" && r.Alert == "" {
return errors.Errorf("one of 'record' or 'alert' must be set")
}
if r.Expr.Value == "" {
if r.Expr == "" {
return errors.Errorf("field 'expr' must be set in rule")
} else if _, err := syntax.ParseExpr(r.Expr.Value); err != nil {
if r.Record.Value != "" {
return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record.Value, groupName)
} else if _, err := syntax.ParseExpr(r.Expr); err != nil {
if r.Record != "" {
return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record, groupName)
}
return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert.Value, groupName)
return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert, groupName)
}
if r.Record.Value != "" {
if r.Record != "" {
if len(r.Annotations) > 0 {
return errors.Errorf("invalid field 'annotations' in recording rule")
}
if r.For != 0 {
return errors.Errorf("invalid field 'for' in recording rule")
}
if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
return errors.Errorf("invalid recording rule name: %s", r.Record.Value)
if !model.IsValidLegacyMetricName(r.Record) {
return errors.Errorf("invalid recording rule name: %s", r.Record)
}
}
for k, v := range r.Labels {
if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
if !model.LabelName(k).IsValidLegacy() || k == model.MetricNameLabel {
return errors.Errorf("invalid label name: %s", k)
}
@ -286,7 +286,7 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
}
for k := range r.Annotations {
if !model.LabelName(k).IsValid() {
if !model.LabelName(k).IsValidLegacy() {
return errors.Errorf("invalid annotation name: %s", k)
}
}
@ -300,8 +300,8 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error {
// testTemplateParsing checks if the templates used in labels and annotations
// of the alerting rules are parsed correctly.
func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {
if rl.Alert.Value == "" {
func testTemplateParsing(rl *rulefmt.Rule) (errs []error) {
if rl.Alert == "" {
// Not an alerting rule.
return errs
}
@ -317,7 +317,7 @@ func testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {
tmpl := template.NewTemplateExpander(
context.TODO(),
strings.Join(append(defs, text), ""),
"__alert_"+rl.Alert.Value,
"__alert_"+rl.Alert,
tmplData,
model.Time(timestamp.FromTime(time.Now())),
nil,

@ -10,8 +10,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logql"
rulerbase "github.com/grafana/loki/v3/pkg/ruler/base"
@ -23,14 +21,14 @@ import (
func TestInvalidRuleGroup(t *testing.T) {
ruleGroupValid := rulefmt.RuleGroup{
Name: "test",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "alert-1-name"},
Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"},
Alert: "alert-1-name",
Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)",
},
{
Alert: yaml.Node{Value: "record-1-name"},
Expr: yaml.Node{Value: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)"},
Alert: "record-1-name",
Expr: "sum by (job) (rate({namespace=~\"test\"} [5m]) > 0)",
},
},
}
@ -38,14 +36,14 @@ func TestInvalidRuleGroup(t *testing.T) {
ruleGroupInValid := rulefmt.RuleGroup{
Name: "test",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "alert-1-name"},
Expr: yaml.Node{Value: "bad_value"},
Alert: "alert-1-name",
Expr: "bad_value",
},
{
Record: yaml.Node{Value: "record-1-name"},
Expr: yaml.Node{Value: "bad_value"},
Record: "record-1-name",
Expr: "bad_value",
},
},
}
@ -56,21 +54,21 @@ func TestInvalidRuleGroup(t *testing.T) {
// TestInvalidRuleExprParsing tests that a validation error is raised when rule expression is invalid
func TestInvalidRuleExprParsing(t *testing.T) {
expectedAlertErrorMsg := "could not parse expression for alert 'alert-1-name' in group 'test': parse error"
alertRuleExprInvalid := &rulefmt.RuleNode{
Alert: yaml.Node{Value: "alert-1-name"},
Expr: yaml.Node{Value: "bad_value"},
alertRuleExprInvalid := &rulefmt.Rule{
Alert: "alert-1-name",
Expr: "bad_value",
}
alertErr := validateRuleNode(alertRuleExprInvalid, "test")
alertErr := validateRule(alertRuleExprInvalid, "test")
assert.Containsf(t, alertErr.Error(), expectedAlertErrorMsg, "expected error containing '%s', got '%s'", expectedAlertErrorMsg, alertErr)
expectedRecordErrorMsg := "could not parse expression for record 'record-1-name' in group 'test': parse error"
recordRuleExprInvalid := &rulefmt.RuleNode{
Record: yaml.Node{Value: "record-1-name"},
Expr: yaml.Node{Value: "bad_value"},
recordRuleExprInvalid := &rulefmt.Rule{
Record: "record-1-name",
Expr: "bad_value",
}
recordErr := validateRuleNode(recordRuleExprInvalid, "test")
recordErr := validateRule(recordRuleExprInvalid, "test")
assert.Containsf(t, recordErr.Error(), expectedRecordErrorMsg, "expected error containing '%s', got '%s'", expectedRecordErrorMsg, recordErr)
}

@ -109,9 +109,9 @@ func (l *CachingGroupLoader) AlertingRules() []rulefmt.Rule {
for _, g := range group.Groups {
for _, rule := range g.Rules {
rules = append(rules, rulefmt.Rule{
Record: rule.Record.Value,
Alert: rule.Alert.Value,
Expr: rule.Expr.Value,
Record: rule.Record,
Alert: rule.Alert,
Expr: rule.Expr,
For: rule.For,
Labels: rule.Labels,
Annotations: rule.Annotations,

@ -7,17 +7,11 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func init() {
model.NameValidationScheme = model.LegacyValidation
}
func Test_GroupLoader(t *testing.T) {
for _, tc := range []struct {
desc string
@ -364,8 +358,8 @@ var (
ruleGroup1 = &rulefmt.RuleGroups{
Groups: []rulefmt.RuleGroup{
{
Rules: []rulefmt.RuleNode{
{Alert: yaml.Node{Value: "alert-1-name"}},
Rules: []rulefmt.Rule{
{Alert: "alert-1-name"},
},
},
},
@ -373,8 +367,8 @@ var (
ruleGroup2 = &rulefmt.RuleGroups{
Groups: []rulefmt.RuleGroup{
{
Rules: []rulefmt.RuleNode{
{Alert: yaml.Node{Value: "alert-2-name"}},
Rules: []rulefmt.Rule{
{Alert: "alert-2-name"},
},
},
},

@ -6,7 +6,6 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/logproto" //lint:ignore faillint allowed to import other protobuf
)
@ -24,13 +23,13 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc
return &rg
}
func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc {
func formattedRuleToProto(rls []rulefmt.Rule) []*RuleDesc {
rules := make([]*RuleDesc, len(rls))
for i := range rls {
rules[i] = &RuleDesc{
Expr: rls[i].Expr.Value,
Record: rls[i].Record.Value,
Alert: rls[i].Alert.Value,
Expr: rls[i].Expr,
Record: rls[i].Record,
Alert: rls[i].Alert,
For: time.Duration(rls[i].For),
Labels: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)),
Annotations: logproto.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)),
@ -45,29 +44,24 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup {
formattedRuleGroup := rulefmt.RuleGroup{
Name: rg.GetName(),
Interval: model.Duration(rg.Interval),
Rules: make([]rulefmt.RuleNode, len(rg.GetRules())),
Rules: make([]rulefmt.Rule, len(rg.GetRules())),
Limit: int(rg.GetLimit()),
}
for i, rl := range rg.GetRules() {
exprNode := yaml.Node{}
exprNode.SetString(rl.GetExpr())
expr := rl.GetExpr()
newRule := rulefmt.RuleNode{
Expr: exprNode,
newRule := rulefmt.Rule{
Expr: expr,
Labels: logproto.FromLabelAdaptersToLabels(rl.Labels).Map(),
Annotations: logproto.FromLabelAdaptersToLabels(rl.Annotations).Map(),
For: model.Duration(rl.GetFor()),
}
if rl.GetRecord() != "" {
recordNode := yaml.Node{}
recordNode.SetString(rl.GetRecord())
newRule.Record = recordNode
newRule.Record = rl.GetRecord()
} else {
alertNode := yaml.Node{}
alertNode.SetString(rl.GetAlert())
newRule.Alert = alertNode
newRule.Alert = rl.GetAlert()
}
formattedRuleGroup.Rules[i] = newRule

@ -106,7 +106,7 @@ func TestListRules(t *testing.T) {
func TestLoadRules(t *testing.T) {
runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) {
groups := []testGroup{
{user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.RuleNode{{
{user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.Rule{{
For: model.Duration(5 * time.Minute),
Labels: map[string]string{"label1": "value1"},
}}, Limit: 10}},

@ -30,10 +30,10 @@ func TestClient_LoadAllRuleGroups(t *testing.T) {
{
Name: "rule",
Interval: model.Duration(100 * time.Second),
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Kind: yaml.ScalarNode, Value: "test_rule"},
Expr: yaml.Node{Kind: yaml.ScalarNode, Value: "up"},
Record: "test_rule",
Expr: "up",
},
},
},

@ -284,6 +284,10 @@ func (n noopScrapeManager) Get() (*scrape.Manager, error) {
return nil, errors.New("No-op Scrape manager not ready")
}
func (n noopScrapeManager) Ready() bool {
return false
}
// initialize sets up the various Prometheus components with their initial
// settings. initialize will be called each time the Instance is run. Prometheus
// components cannot be reused after they are stopped so we need to recreate them
@ -304,7 +308,7 @@ func (i *Instance) initialize(_ context.Context, reg prometheus.Registerer, cfg
// Setup the remote storage
remoteLogger := log.With(i.logger, "component", "remote")
i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{}, false)
i.remoteStore = remote.NewStorage(util_log.SlogFromGoKit(remoteLogger), reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, noopScrapeManager{})
err = i.remoteStore.ApplyConfig(&config.Config{
RemoteWriteConfigs: cfg.RemoteWrite,
})

@ -28,6 +28,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"go.uber.org/atomic"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@ -70,7 +71,7 @@ type Storage struct {
// NewStorage makes a new Storage.
func NewStorage(logger log.Logger, metrics *Metrics, registerer prometheus.Registerer, path string, enableReplay bool) (*Storage, error) {
w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, wlog.CompressionSnappy)
w, err := wlog.NewSize(util_log.SlogFromGoKit(logger), registerer, SubDirectory(path), wlog.DefaultSegmentSize, compression.Snappy)
if err != nil {
return nil, err
}
@ -373,7 +374,7 @@ func (w *Storage) Truncate(mint int64) error {
return nil
}
keep := func(id chunks.HeadSeriesRef) bool {
keep := func(id chunks.HeadSeriesRef, _ int) bool {
if w.series.getByID(id) != nil {
return true
}

@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/util/encoding"
@ -205,7 +206,7 @@ func newHeadWAL(log log.Logger, dir string, t time.Time) (*headWAL, error) {
// NB: if we use a non-nil Prometheus Registerer, ensure
// that the underlying metrics won't conflict with existing WAL metrics in the ingester.
// Likely, this can be done by adding extra label(s)
wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, wlog.CompressionNone)
wal, err := wlog.NewSize(util_log.SlogFromGoKit(log), nil, dir, walSegmentSize, compression.None)
if err != nil {
return nil, err
}

@ -628,7 +628,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error {
}
// Do not apply the aggregation label to excluded rule groups.
applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
_, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name]
return !excluded
}
@ -749,11 +749,11 @@ func checkDuplicates(groups []rwrulefmt.RuleGroup) []compareRuleType {
return duplicates
}
func ruleMetric(rule rulefmt.RuleNode) string {
if rule.Alert.Value != "" {
return rule.Alert.Value
func ruleMetric(rule rulefmt.Rule) string {
if rule.Alert != "" {
return rule.Alert
}
return rule.Record.Value
return rule.Record
}
// End taken from https://github.com/prometheus/prometheus/blob/8c8de46003d1800c9d40121b4a5e5de8582ef6e1/cmd/promtool/main.go#L403

@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
@ -21,14 +20,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "up"},
Expr: yaml.Node{Value: "up==1"},
Record: "up",
Expr: "up==1",
},
{
Record: yaml.Node{Value: "down"},
Expr: yaml.Node{Value: "up==0"},
Record: "down",
Expr: "up==0",
},
},
},
@ -41,14 +40,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "up"},
Expr: yaml.Node{Value: "up==1"},
Record: "up",
Expr: "up==1",
},
{
Record: yaml.Node{Value: "up"},
Expr: yaml.Node{Value: "up==0"},
Record: "up",
Expr: "up==0",
},
},
},

@ -102,10 +102,10 @@ func CompareGroups(groupOne, groupTwo rwrulefmt.RuleGroup) error {
return nil
}
func rulesEqual(a, b *rulefmt.RuleNode) bool {
if a.Alert.Value != b.Alert.Value ||
a.Record.Value != b.Record.Value ||
a.Expr.Value != b.Expr.Value ||
func rulesEqual(a, b *rulefmt.Rule) bool {
if a.Alert != b.Alert ||
a.Record != b.Record ||
a.Expr != b.Expr ||
a.For != b.For {
return false
}

@ -4,7 +4,6 @@ import (
"testing"
"github.com/prometheus/prometheus/model/rulefmt"
yaml "gopkg.in/yaml.v3"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
@ -12,21 +11,21 @@ import (
func Test_rulesEqual(t *testing.T) {
tests := []struct {
name string
a *rulefmt.RuleNode
b *rulefmt.RuleNode
a *rulefmt.Rule
b *rulefmt.Rule
want bool
}{
{
name: "rule_node_identical",
a: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
a: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
b: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
b: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: map[string]string{"c": "d", "a": "b"},
Labels: nil,
},
@ -34,53 +33,53 @@ func Test_rulesEqual(t *testing.T) {
},
{
name: "rule_node_diff",
a: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
a: &rulefmt.Rule{
Record: "one",
Expr: "up",
},
b: &rulefmt.RuleNode{
Record: yaml.Node{Value: "two"},
Expr: yaml.Node{Value: "up"},
b: &rulefmt.Rule{
Record: "two",
Expr: "up",
},
want: false,
},
{
name: "rule_node_annotations_diff",
a: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
a: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b"},
},
b: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one", Column: 10},
Expr: yaml.Node{Value: "up"},
b: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: map[string]string{"c": "d"},
},
want: false,
},
{
name: "rule_node_annotations_nil_diff",
a: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
a: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b"},
},
b: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one", Column: 10},
Expr: yaml.Node{Value: "up"},
b: &rulefmt.Rule{
Record: "one",
Expr: "up",
Annotations: nil,
},
want: false,
},
{
name: "rule_node_yaml_diff",
a: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
a: &rulefmt.Rule{
Record: "one",
Expr: "up",
},
b: &rulefmt.RuleNode{
Record: yaml.Node{Value: "one", Column: 10},
Expr: yaml.Node{Value: "up"},
b: &rulefmt.Rule{
Record: "one",
Expr: "up",
},
want: true,
},
@ -106,10 +105,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -119,10 +118,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -136,10 +135,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -149,16 +148,16 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -172,10 +171,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -188,10 +187,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -208,10 +207,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -224,10 +223,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -245,10 +244,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@ -261,10 +260,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: "one"},
Expr: yaml.Node{Value: "up"},
Record: "one",
Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},

@ -28,7 +28,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@ -51,7 +51,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@ -81,7 +81,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "testgrp2",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@ -96,7 +96,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "other_testgrp2",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
// currently the tests only check length
},

@ -7,6 +7,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/promql/parser"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
logql "github.com/grafana/loki/v3/pkg/logql/syntax"
@ -40,13 +41,13 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
for i, group := range r.Groups {
for j, rule := range group.Rules {
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("linting %s", queryLanguage)
exp, err := parseFn(rule.Expr.Value)
exp, err := parseFn(rule.Expr)
if err != nil {
return count, mod, err
}
count++
if rule.Expr.Value != exp.String() {
if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
@ -54,7 +55,7 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
}).Debugf("expression differs")
mod++
r.Groups[i].Rules[j].Expr.Value = exp.String()
r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@ -75,10 +76,10 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
for _, group := range r.Groups {
for _, rule := range group.Rules {
// Assume if there is a rule.Record that this is a recording rule.
if rule.Record.Value == "" {
if rule.Record == "" {
continue
}
name = rule.Record.Value
name = rule.Record
log.WithFields(log.Fields{"rule": name}).Debugf("linting recording rule name")
chunks := strings.Split(name, ":")
if len(chunks) < reqChunks {
@ -98,7 +99,7 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
// AggregateBy modifies the aggregation rules in groups to include a given Label.
// If the applyTo function is provided, the aggregation is applied only to rules
// for which the applyTo function returns true.
func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool) (int, int, error) {
func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool) (int, int, error) {
// `count` represents the number of rules we evaluated.
// `mod` represents the number of rules we modified - a modification can either be a lint or adding the
// label in the aggregation.
@ -118,7 +119,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
}
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("evaluating...")
exp, err := parser.ParseExpr(rule.Expr.Value)
exp, err := parser.ParseExpr(rule.Expr)
if err != nil {
return count, mod, err
}
@ -130,14 +131,14 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
parser.Inspect(exp, f)
// Only modify the ones that actually changed.
if rule.Expr.Value != exp.String() {
if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
"afterExpr": exp.String(),
}).Debugf("expression differs")
mod++
r.Groups[i].Rules[j].Expr.Value = exp.String()
r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@ -147,7 +148,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
// exprNodeInspectorFunc returns a PromQL inspector.
// It modifies most PromQL expressions to include a given label.
func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error {
func exprNodeInspectorFunc(rule rulefmt.Rule, label string) func(node parser.Node, path []parser.Node) error {
return func(node parser.Node, _ []parser.Node) error {
var err error
switch n := node.(type) {
@ -239,12 +240,17 @@ func (r RuleNamespace) Validate() []error {
func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
var errs []error
for i, r := range g.Rules {
for _, err := range r.Validate() {
ruleNode := rulefmt.RuleNode{
Record: yaml.Node{Value: r.Record},
Alert: yaml.Node{Value: r.Alert},
Expr: yaml.Node{Value: r.Expr},
}
for _, err := range r.Validate(ruleNode) {
var ruleName string
if r.Alert.Value != "" {
ruleName = r.Alert.Value
if r.Alert != "" {
ruleName = r.Alert
} else {
ruleName = r.Record.Value
ruleName = r.Record
}
errs = append(errs, &rulefmt.Error{
Group: g.Name,
@ -258,10 +264,10 @@ func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
return errs
}
func getRuleName(r rulefmt.RuleNode) string {
if r.Record.Value != "" {
return r.Record.Value
func getRuleName(r rulefmt.Rule) string {
if r.Record != "" {
return r.Record
}
return r.Alert.Value
return r.Alert
}

@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
"gotest.tools/assert"
"github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
@ -15,7 +14,7 @@ func TestAggregateBy(t *testing.T) {
tt := []struct {
name string
rn RuleNamespace
applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool
applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool
expectedExpr []string
count, modified int
expect error
@ -31,8 +30,8 @@ func TestAggregateBy(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
Name: "WithoutAggregation", Rules: []rulefmt.RuleNode{
{Alert: yaml.Node{Value: "WithoutAggregation"}, Expr: yaml.Node{Value: "up != 1"}},
Name: "WithoutAggregation", Rules: []rulefmt.Rule{
{Alert: "WithoutAggregation", Expr: "up != 1"},
},
},
},
@ -48,11 +47,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "SkipWithout",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "SkipWithout"},
Expr: yaml.Node{
Value: `
Alert: "SkipWithout",
Expr: `
min without (alertmanager) (
rate(prometheus_notifications_errors_total{job="default/prometheus"}[5m])
/
@ -60,7 +58,6 @@ func TestAggregateBy(t *testing.T) {
)
* 100
> 3`,
},
},
},
},
@ -77,16 +74,14 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "WithAggregation",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "WithAggregation"},
Expr: yaml.Node{
Value: `
Alert: "WithAggregation",
Expr: `
sum(rate(cortex_prometheus_rule_evaluation_failures_total[1m])) by (namespace, job)
/
sum(rate(cortex_prometheus_rule_evaluations_total[1m])) by (namespace, job)
> 0.01`,
},
},
},
},
@ -103,15 +98,11 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{
Value: "CountAggregation",
},
Expr: yaml.Node{
Value: `
Alert: "CountAggregation",
Expr: `
count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1`,
},
},
},
},
@ -128,10 +119,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "BinaryExpressions",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "VectorMatching"},
Expr: yaml.Node{Value: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`},
Alert: "VectorMatching",
Expr: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`,
},
},
},
@ -148,35 +139,27 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{
Value: "CountAggregation",
},
Expr: yaml.Node{
Value: `count by (namespace) (test_series) > 1`,
},
Alert: "CountAggregation",
Expr: `count by (namespace) (test_series) > 1`,
},
},
},
}, {
RuleGroup: rulefmt.RuleGroup{
Name: "CountSkipped",
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{
Value: "CountSkipped",
},
Expr: yaml.Node{
Value: `count by (namespace) (test_series) > 1`,
},
Alert: "CountSkipped",
Expr: `count by (namespace) (test_series) > 1`,
},
},
},
},
},
},
applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
return group.Name != "CountSkipped"
},
expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`},
@ -196,7 +179,7 @@ func TestAggregateBy(t *testing.T) {
expectedIdx := 0
for _, g := range tc.rn.Groups {
for _, r := range g.Rules {
require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr.Value)
require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr)
expectedIdx++
}
}
@ -255,10 +238,10 @@ func TestLintExpressions(t *testing.T) {
r := RuleNamespace{Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Alert: yaml.Node{Value: "AName"},
Expr: yaml.Node{Value: tc.expr},
Alert: "AName",
Expr: tc.expr,
},
},
},
@ -267,7 +250,7 @@ func TestLintExpressions(t *testing.T) {
}
c, m, err := r.LintExpressions()
rexpr := r.Groups[0].Rules[0].Expr.Value
rexpr := r.Groups[0].Rules[0].Expr
require.Equal(t, tc.count, c)
require.Equal(t, tc.modified, m)
@ -325,10 +308,11 @@ func TestCheckRecordingRules(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
Rules: []rulefmt.RuleNode{
Rules: []rulefmt.Rule{
{
Record: yaml.Node{Value: tc.ruleName},
Expr: yaml.Node{Value: "rate(some_metric_total)[5m]"}},
Record: tc.ruleName,
Expr: "rate(some_metric_total)[5m]",
},
},
},
},

@ -106,7 +106,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.21.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/redis/go-redis/v9 v9.7.3 // indirect
@ -131,14 +131,14 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.28.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect

@ -416,8 +416,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
@ -531,8 +531,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -573,13 +573,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -588,8 +588,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -622,16 +622,16 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

@ -1,5 +1,19 @@
# Release History
## 1.18.0 (2025-04-03)
### Features Added
* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token
## 1.17.1 (2025-03-20)
### Other Changes
* Upgraded to Go 1.23
* Upgraded dependencies
## 1.17.0 (2025-01-07)
### Features Added

@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
Token string
// Token is the access token
Token string
// ExpiresOn indicates when the token expires
ExpiresOn time.Time
// RefreshOn is a suggested time to refresh the token.
// Clients should ignore this value when it's zero.
RefreshOn time.Time
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.

@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.17.0"
Version = "v1.18.0"
)

@ -51,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne
return tk, tk.ExpiresOn, nil
}
// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it.
var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool {
if tk.RefreshOn.IsZero() {
return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now())
}
// no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn
return tk.RefreshOn.Before(time.Now())
}
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
@ -69,11 +78,14 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
return authNZ(policy.TokenRequestOptions{Scopes: scopes})
}
}
mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{
ShouldRefresh: shouldRefresh,
})
return &BearerTokenPolicy{
authzHandler: ah,
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
mainResource: mr,
allowHTTP: opts.InsecureAllowCredentialWithHTTP,
}
}

@ -1,5 +1,17 @@
# Release History
## 1.9.0 (2025-04-08)
### Features Added
* `GetToken()` sets `AccessToken.RefreshOn` when the token provider specifies a value
### Other Changes
* `NewManagedIdentityCredential` logs the configured user-assigned identity, if any
* Deprecated `UsernamePasswordCredential` because it can't support multifactor
authentication (MFA), which Microsoft Entra ID requires for most tenants. See
https://aka.ms/azsdk/identity/mfa for migration guidance.
* Updated dependencies
## 1.8.2 (2025-02-12)
### Other Changes

@ -21,7 +21,7 @@ go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
## Prerequisites
- an [Azure subscription](https://azure.microsoft.com/free/)
- Go 1.18
- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go
### Authenticating during local development
@ -146,7 +146,6 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|-|-
|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
### Authenticating via Development Tools
@ -159,7 +158,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
#### Service principal with secret
### Service principal with secret
|variable name|value
|-|-
@ -167,7 +166,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_SECRET`|one of the application's client secrets
#### Service principal with certificate
### Service principal with certificate
|variable name|value
|-|-
@ -176,16 +175,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
#### Username and password
|variable name|value
|-|-
|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
|`AZURE_USERNAME`|a username (usually an email address)
|`AZURE_PASSWORD`|that user's password
Configuration is attempted in the above order. For example, if values for a
client secret and certificate are both present, the client secret will be used.
Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used.
## Token caching

@ -22,12 +22,11 @@ Some credential types support opt-in persistent token caching (see [the below ta
Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
| Operating system | Encryption facility |
| ---------------- | ---------------------------------------------- |
| Linux | kernel key retention service (keyctl) |
| macOS | Keychain (requires cgo and native build tools) |
| Windows | Data Protection API (DPAPI) |
| Operating system | Encryption facility | Limitations |
| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. |
| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). |
| Windows | Data Protection API (DPAPI) | No specific limitations. |
Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example].
### Credentials supporting token caching

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
"Tag": "go/azidentity_c55452bbf6"
"Tag": "go/azidentity_191110b0dd"
}

@ -22,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
)
@ -208,6 +209,10 @@ type msalConfidentialClient interface {
AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error)
}
type msalManagedIdentityClient interface {
AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error)
}
// enables fakes for test scenarios
type msalPublicClient interface {
AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error)

@ -118,7 +118,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", "))
log.Write(EventAuthentication, msg)
}
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) {

@ -60,7 +60,10 @@ type EnvironmentCredentialOptions struct {
// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this
// function isn't able to parse your certificate, use [ClientCertificateCredential] instead.
//
// # User with username and password
// # Deprecated: User with username and password
//
// User password authentication is deprecated because it can't support multifactor authentication. See
// [Entra ID documentation] for migration guidance.
//
// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
//
@ -75,6 +78,8 @@ type EnvironmentCredentialOptions struct {
// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants
// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set
// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant.
//
// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type EnvironmentCredential struct {
cred azcore.TokenCredential
}

@ -1,4 +1,4 @@
go 1.18
go 1.23.0
use (
.

@ -9,7 +9,7 @@
}
},
"GoVersion": [
"1.22.1"
"env:GO_VERSION_PREVIOUS"
],
"IDENTITY_IMDS_AVAILABLE": "1"
}

@ -8,24 +8,18 @@ package azidentity
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
)
const (
@ -41,59 +35,20 @@ const (
msiResID = "msi_res_id"
msiSecret = "MSI_SECRET"
imdsAPIVersion = "2018-02-01"
azureArcAPIVersion = "2019-08-15"
azureArcAPIVersion = "2020-06-01"
qpClientID = "client_id"
serviceFabricAPIVersion = "2019-07-01-preview"
)
var imdsProbeTimeout = time.Second
type msiType int
const (
msiTypeAppService msiType = iota
msiTypeAzureArc
msiTypeAzureML
msiTypeCloudShell
msiTypeIMDS
msiTypeServiceFabric
)
type managedIdentityClient struct {
azClient *azcore.Client
endpoint string
id ManagedIDKind
msiType msiType
probeIMDS bool
azClient *azcore.Client
imds, probeIMDS, userAssigned bool
// chained indicates whether the client is part of a credential chain. If true, the client will return
// a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response.
chained bool
}
// arcKeyDirectory returns the directory expected to contain Azure Arc keys
var arcKeyDirectory = func() (string, error) {
switch runtime.GOOS {
case "linux":
return "/var/opt/azcmagent/tokens", nil
case "windows":
pd := os.Getenv("ProgramData")
if pd == "" {
return "", errors.New("environment variable ProgramData has no value")
}
return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil
default:
return "", fmt.Errorf("unsupported OS %q", runtime.GOOS)
}
}
type wrappedNumber json.Number
func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
c := string(b)
if c == "\"\"" {
return nil
}
return json.Unmarshal(b, (*json.Number)(n))
chained bool
msalClient msalManagedIdentityClient
}
// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
@ -141,51 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
options = &ManagedIdentityCredentialOptions{}
}
cp := options.ClientOptions
c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
env := "IMDS"
if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
if _, ok := os.LookupEnv(identityHeader); ok {
if _, ok := os.LookupEnv(identityServerThumbprint); ok {
if options.ID != nil {
return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
}
env = "Service Fabric"
c.endpoint = endpoint
c.msiType = msiTypeServiceFabric
} else {
env = "App Service"
c.endpoint = endpoint
c.msiType = msiTypeAppService
}
} else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
if options.ID != nil {
return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime")
}
env = "Azure Arc"
c.endpoint = endpoint
c.msiType = msiTypeAzureArc
}
} else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
c.endpoint = endpoint
if _, ok := os.LookupEnv(msiSecret); ok {
if options.ID != nil && options.ID.idKind() != miClientID {
return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only")
}
env = "Azure ML"
c.msiType = msiTypeAzureML
} else {
if options.ID != nil {
return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities")
}
env = "Cloud Shell"
c.msiType = msiTypeCloudShell
}
} else {
c := managedIdentityClient{}
source, err := managedidentity.GetSource()
if err != nil {
return nil, err
}
env := string(source)
if source == managedidentity.DefaultToIMDS {
env = "IMDS"
c.imds = true
c.probeIMDS = options.dac
setIMDSRetryOptionDefaults(&cp.Retry)
}
client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{
c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{
Tracing: azruntime.TracingOptions{
Namespace: traceNamespace,
},
@ -193,28 +117,53 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
if err != nil {
return nil, err
}
c.azClient = client
id := managedidentity.SystemAssigned()
if options.ID != nil {
c.userAssigned = true
switch s := options.ID.String(); options.ID.idKind() {
case miClientID:
id = managedidentity.UserAssignedClientID(s)
case miObjectID:
id = managedidentity.UserAssignedObjectID(s)
case miResourceID:
id = managedidentity.UserAssignedResourceID(s)
}
}
msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled())
if err != nil {
return nil, err
}
c.msalClient = &msalClient
if log.Should(EventAuthentication) {
log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env)
if options.ID != nil {
kind := "client"
switch options.ID.(type) {
case ObjectID:
kind = "object"
case ResourceID:
kind = "resource"
}
msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String())
}
log.Write(EventAuthentication, msg)
}
return &c, nil
}
// provideToken acquires a token for MSAL's confidential.Client, which caches the token
func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) {
result := confidential.TokenProviderResult{}
tk, err := c.authenticate(ctx, c.id, params.Scopes)
if err == nil {
result.AccessToken = tk.Token
result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds())
}
return result, err
func (*managedIdentityClient) CloseIdleConnections() {
// do nothing
}
func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) {
return doForClient(c.azClient, r)
}
// authenticate acquires an access token
func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
// no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
// and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
if c.probeIMDS {
@ -222,7 +171,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
defer cancel()
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint)
if err != nil {
return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
}
@ -237,32 +186,26 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
c.probeIMDS = false
}
msg, err := c.createAuthRequest(ctx, id, scopes)
if err != nil {
return azcore.AccessToken{}, err
}
resp, err := c.azClient.Pipeline().Do(msg)
if err != nil {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil)
}
if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
tk, err := c.createAccessToken(resp)
if err != nil && c.chained && c.msiType == msiTypeIMDS {
// failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at
ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims))
if err == nil {
msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", "))
log.Write(EventAuthentication, msg)
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
if c.imds {
var ije msalerrors.InvalidJsonErr
if c.chained && errors.As(err, &ije) {
// an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at
// the same address. Return a credentialUnavailableError so credential chains continue to their next credential
err = newCredentialUnavailableError(credNameManagedIdentity, err.Error())
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
}
resp := getResponseFromError(err)
if resp == nil {
return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
}
return tk, err
}
if c.msiType == msiTypeIMDS {
switch resp.StatusCode {
case http.StatusBadRequest:
if id != nil {
// return authenticationFailedError, halting any encompassing credential chain,
// because the explicit user-assigned identity implies the developer expected this to work
if c.userAssigned {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
}
msg := "failed to authenticate a system assigned identity"
@ -278,237 +221,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body)))
}
}
if c.chained {
// the response may be from something other than IMDS, for example a proxy returning
// 404. Return credentialUnavailableError so credential chains continue to their
// next credential, include the response in the error message to help debugging
err = newAuthenticationFailedError(credNameManagedIdentity, "", resp)
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
}
}
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp)
}
func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
value := struct {
// these are the only fields that we use
Token string `json:"access_token,omitempty"`
RefreshToken string `json:"refresh_token,omitempty"`
ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
}{}
if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res)
}
if value.ExpiresIn != "" {
expiresIn, err := json.Number(value.ExpiresIn).Int64()
if err != nil {
return azcore.AccessToken{}, err
}
return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
}
switch v := value.ExpiresOn.(type) {
case float64:
return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
case string:
if expiresOn, err := strconv.Atoi(v); err == nil {
return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
}
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res)
default:
msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res)
}
}
func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
switch c.msiType {
case msiTypeIMDS:
return c.createIMDSAuthRequest(ctx, id, scopes)
case msiTypeAppService:
return c.createAppServiceAuthRequest(ctx, id, scopes)
case msiTypeAzureArc:
// need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
key, err := c.getAzureArcSecretKey(ctx, scopes)
if err != nil {
msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil)
}
return c.createAzureArcAuthRequest(ctx, scopes, key)
case msiTypeAzureML:
return c.createAzureMLAuthRequest(ctx, id, scopes)
case msiTypeServiceFabric:
return c.createServiceFabricAuthRequest(ctx, scopes)
case msiTypeCloudShell:
return c.createCloudShellAuthRequest(ctx, scopes)
default:
return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
}
}
func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
}
request.Raw().Header.Set(headerMetadata, "true")
q := request.Raw().URL.Query()
q.Set("api-version", imdsAPIVersion)
q.Set("resource", strings.Join(scopes, " "))
if id != nil {
switch id.idKind() {
case miClientID:
q.Set(qpClientID, id.String())
case miObjectID:
q.Set("object_id", id.String())
case miResourceID:
q.Set(msiResID, id.String())
}
}
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
}
request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
q := request.Raw().URL.Query()
q.Set("api-version", "2019-08-01")
q.Set("resource", scopes[0])
if id != nil {
switch id.idKind() {
case miClientID:
q.Set(qpClientID, id.String())
case miObjectID:
q.Set("principal_id", id.String())
case miResourceID:
q.Set(miResID, id.String())
}
}
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
}
request.Raw().Header.Set("secret", os.Getenv(msiSecret))
q := request.Raw().URL.Query()
q.Set("api-version", "2017-09-01")
q.Set("resource", strings.Join(scopes, " "))
q.Set("clientid", os.Getenv(defaultIdentityClientID))
if id != nil {
switch id.idKind() {
case miClientID:
q.Set("clientid", id.String())
case miObjectID:
return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil)
case miResourceID:
return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil)
}
}
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
}
q := request.Raw().URL.Query()
request.Raw().Header.Set("Accept", "application/json")
request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
q.Set("api-version", serviceFabricAPIVersion)
q.Set("resource", strings.Join(scopes, " "))
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
// create the request to retreive the secret key challenge provided by the HIMDS service
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return "", err
}
request.Raw().Header.Set(headerMetadata, "true")
q := request.Raw().URL.Query()
q.Set("api-version", azureArcAPIVersion)
q.Set("resource", strings.Join(resources, " "))
request.Raw().URL.RawQuery = q.Encode()
// send the initial request to get the short-lived secret key
response, err := c.azClient.Pipeline().Do(request)
if err != nil {
return "", err
}
// the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
// of the secret key file. Any other status code indicates an error in the request.
if response.StatusCode != 401 {
msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response)
}
header := response.Header.Get("WWW-Authenticate")
if len(header) == 0 {
return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil)
}
// the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
_, p, found := strings.Cut(header, "=")
if !found {
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil)
}
expected, err := arcKeyDirectory()
if err != nil {
return "", err
}
if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil)
}
f, err := os.Stat(p)
if err != nil {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil)
}
if s := f.Size(); s > 4096 {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil)
}
key, err := os.ReadFile(p)
if err != nil {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil)
}
return string(key), nil
}
func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
}
request.Raw().Header.Set(headerMetadata, "true")
request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
q := request.Raw().URL.Query()
q.Set("api-version", azureArcAPIVersion)
q.Set("resource", strings.Join(resources, " "))
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
if err != nil {
return nil, err
}
request.Raw().Header.Set(headerMetadata, "true")
data := url.Values{}
data.Set("resource", strings.Join(scopes, " "))
dataEncoded := data.Encode()
body := streaming.NopCloser(strings.NewReader(dataEncoded))
if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
return nil, err
}
return request, nil
err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
return azcore.AccessToken{}, err
}

@ -14,7 +14,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
const credNameManagedIdentity = "ManagedIdentityCredential"
@ -110,8 +109,7 @@ type ManagedIdentityCredentialOptions struct {
//
// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
client *confidentialClient
mic *managedIdentityClient
mic *managedIdentityClient
}
// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
@ -123,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
if err != nil {
return nil, err
}
cred := confidential.NewCredFromTokenProvider(mic.provideToken)
// It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key.
// ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL.
clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY"
if options.ID != nil {
clientID = options.ID.String()
}
// similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
ClientOptions: options.ClientOptions,
})
if err != nil {
return nil, err
}
return &ManagedIdentityCredential{client: c, mic: mic}, nil
return &ManagedIdentityCredential{mic: mic}, nil
}
// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
if len(opts.Scopes) != 1 {
err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
return azcore.AccessToken{}, err
}
// managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
// managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
tk, err := c.client.GetToken(ctx, opts)
return tk, err
return c.mic.GetToken(ctx, opts)
}
var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)

@ -243,7 +243,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke
} else {
err = newAuthenticationFailedErrorFromMSAL(p.name, err)
}
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
// resolveTenant returns the correct WithTenantID() argument for a token request given the client's

@ -72,6 +72,7 @@ az container create -g $rg -n $aciName --image $image `
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--cpu 1 `
--ip-address Public `
--memory 1.0 `
--os-type Linux `
--role "Storage Blob Data Reader" `
@ -82,7 +83,8 @@ az container create -g $rg -n $aciName --image $image `
AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) `
FUNCTIONS_CUSTOMHANDLER_PORT=80
Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv
Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
Write-Host "Deploying to Azure Functions"

@ -17,6 +17,11 @@ import (
const credNameUserPassword = "UsernamePasswordCredential"
// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
//
// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor
// authentication. See [Entra ID documentation] for migration guidance.
//
// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredentialOptions struct {
azcore.ClientOptions
@ -43,8 +48,13 @@ type UsernamePasswordCredentialOptions struct {
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
// with any form of multi-factor authentication, and the application must already have user or admin consent.
// with any form of multifactor authentication, and the application must already have user or admin consent.
// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
//
// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation]
// for migration guidance.
//
// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredential struct {
client *publicClient
}

@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
version = "v1.8.2"
version = "v1.9.0"
)

@ -44,7 +44,7 @@ func Should(cls Event) bool {
if log.lst == nil {
return false
}
if log.cls == nil || len(log.cls) == 0 {
if len(log.cls) == 0 {
return true
}
for _, c := range log.cls {

@ -11,9 +11,17 @@ import (
"time"
)
// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it.
var backoff = func(now, lastAttempt time.Time) bool {
return lastAttempt.Add(30 * time.Second).After(now)
}
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration.
type ShouldRefresh[TResource, TState any] func(TResource, TState) bool
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct {
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
// shouldRefresh indicates whether the resource should be refreshed before expiration
shouldRefresh ShouldRefresh[TResource, TState]
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})}
r.shouldRefresh = r.expiringSoon
return r
}
// ResourceOptions contains optional configuration for Resource
type ResourceOptions[TResource, TState any] struct {
// ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite
// the currently held resource not having expired. [Resource.Get] ignores all errors from
// refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh
// when the resource has expired (it unconditionally updates expired resources). When
// ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5
// minutes.
ShouldRefresh ShouldRefresh[TResource, TState]
}
// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] {
r := NewResource(ar)
if opts.ShouldRefresh != nil {
r.shouldRefresh = opts.ShouldRefresh
}
return r
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
// If the resource is expiring within this time window, update it eagerly.
// This allows other threads/goroutines to keep running by using the not-yet-expired
// resource value while one thread/goroutine updates the resource.
const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
const backoff = 30 * time.Second // Minimum wait time between eager update attempts
now, acquire, expired := time.Now(), false, false
// acquire exclusive lock
@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
} else if er.expiration.Add(-window).Before(now) {
// The resource is valid but is expiring within the time window
if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
} else if er.shouldRefresh(resource, state) {
if !(er.acquiring || backoff(now, er.lastAttempt)) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() {
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}
func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool {
// call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter
return er.expiration.Add(-5 * time.Minute).Before(time.Now())
}

@ -65,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
type TokenSource = base.TokenSource
const (
TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
TokenSourceCache = base.TokenSourceCache
)
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
// must contain the public certificate and the private key. If a PEM block is encrypted and
// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
@ -639,7 +646,7 @@ func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
return cca.base.AuthResultFromToken(ctx, authParams, token, true)
return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
@ -733,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
if err != nil {
return AuthResult{}, err
}
return cca.base.AuthResultFromToken(ctx, authParams, token, true)
return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf

@ -64,11 +64,20 @@ type CallErr struct {
Err error
}
type InvalidJsonErr struct {
Err error
}
// Errors implements error.Error().
func (e CallErr) Error() string {
return e.Err.Error()
}
// Errors implements error.Error().
func (e InvalidJsonErr) Error() string {
return e.Err.Error()
}
// Verbose prints a versbose error message with the request or response.
func (e CallErr) Verbose() string {
e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need

@ -5,16 +5,17 @@ package base
import (
"context"
"errors"
"fmt"
"net/url"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
@ -94,6 +95,7 @@ type AuthResult struct {
// AuthResultMetadata which contains meta data for the AuthResult
type AuthResultMetadata struct {
RefreshOn time.Time
TokenSource TokenSource
}
@ -101,9 +103,8 @@ type TokenSource int
// These are all the types of token flows.
const (
SourceUnknown TokenSource = 0
IdentityProvider TokenSource = 1
Cache TokenSource = 2
TokenSourceIdentityProvider TokenSource = 0
TokenSourceCache TokenSource = 1
)
// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
@ -111,7 +112,6 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
if err := storageTokenResponse.AccessToken.Validate(); err != nil {
return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
}
account := storageTokenResponse.Account
accessToken := storageTokenResponse.AccessToken.Secret
grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
@ -132,7 +132,8 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
GrantedScopes: grantedScopes,
DeclinedScopes: nil,
Metadata: AuthResultMetadata{
TokenSource: Cache,
TokenSource: TokenSourceCache,
RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T,
},
}, nil
}
@ -146,10 +147,11 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco
Account: account,
IDToken: tokenResponse.IDToken,
AccessToken: tokenResponse.AccessToken,
ExpiresOn: tokenResponse.ExpiresOn.T,
ExpiresOn: tokenResponse.ExpiresOn,
GrantedScopes: tokenResponse.GrantedScopes.Slice,
Metadata: AuthResultMetadata{
TokenSource: IdentityProvider,
TokenSource: TokenSourceIdentityProvider,
RefreshOn: tokenResponse.RefreshOn.T,
},
}, nil
}
@ -165,6 +167,8 @@ type Client struct {
AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
cacheAccessor cache.ExportReplace
cacheAccessorMu *sync.RWMutex
canRefresh map[string]*atomic.Value
canRefreshMu *sync.Mutex
}
// Option is an optional argument to the New constructor.
@ -241,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O
cacheAccessorMu: &sync.RWMutex{},
manager: storage.New(token),
pmanager: storage.NewPartitionedManager(token),
canRefresh: make(map[string]*atomic.Value),
canRefreshMu: &sync.Mutex{},
}
for _, o := range options {
if err = o(&client); err != nil {
@ -345,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if silent.Claims == "" {
ar, err = AuthResultFromStorage(storageTokenResponse)
if err == nil {
if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) {
b.canRefreshMu.Lock()
refreshValue, ok := b.canRefresh[tenant]
if !ok {
refreshValue = &atomic.Value{}
refreshValue.Store(false)
b.canRefresh[tenant] = refreshValue
}
b.canRefreshMu.Unlock()
if refreshValue.CompareAndSwap(false, true) {
defer refreshValue.Store(false)
// Added a check to see if the token is still same because there is a chance
// that the token is already refreshed by another thread.
// If the token is not same, we don't need to refresh it.
// Which means it refreshed.
if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken {
if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil {
return b.AuthResultFromToken(ctx, authParams, tr)
}
}
}
}
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
@ -362,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if err != nil {
return ar, err
}
return b.AuthResultFromToken(ctx, authParams, token, true)
return b.AuthResultFromToken(ctx, authParams, token)
}
func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
@ -391,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui
return AuthResult{}, err
}
return b.AuthResultFromToken(ctx, authParams, token, true)
return b.AuthResultFromToken(ctx, authParams, token)
}
// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
@ -420,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq
authParams.UserAssertion = onBehalfOfParams.UserAssertion
token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
if err == nil {
ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
ar, err = b.AuthResultFromToken(ctx, authParams, token)
}
return ar, err
}
func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
if !cacheWrite {
return NewAuthResult(token, shared.Account{})
}
func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
var m manager = b.manager
if authParams.AuthorizationType == authority.ATOnBehalfOf {
m = b.pmanager
@ -458,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
return ar, err
}
// This function wraps time.Now() and is used for refreshing the application
// was created to test the function against refreshin
var Now = time.Now
func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
if b.cacheAccessor != nil {
b.cacheAccessorMu.RLock()

@ -72,6 +72,7 @@ type AccessToken struct {
ClientID string `json:"client_id,omitempty"`
Secret string `json:"secret,omitempty"`
Scopes string `json:"target,omitempty"`
RefreshOn internalTime.Unix `json:"refresh_on,omitempty"`
ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
@ -83,7 +84,7 @@ type AccessToken struct {
}
// NewAccessToken is the constructor for AccessToken.
func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
return AccessToken{
HomeAccountID: homeID,
Environment: env,
@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
Secret: token,
Scopes: scopes,
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
RefreshOn: internalTime.Unix{T: refreshOn.UTC()},
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
TokenType: tokenType,
@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
// Key outputs the key that can be used to uniquely look up this entry in a map.
func (a AccessToken) Key() string {
ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}
key := strings.Join(
[]string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
ks,
shared.CacheKeySeparator,
)
// add token type to key for new access tokens types. skip for bearer token type to

@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
realm,
clientID,
cachedAt,
tokenResponse.ExpiresOn.T,
tokenResponse.RefreshOn.T,
tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,

@ -173,6 +173,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
environment := authParameters.AuthorityInfo.Host
realm := authParameters.AuthorityInfo.Tenant
clientID := authParameters.ClientID
target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
cachedAt := time.Now()
authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
@ -193,7 +194,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
realm,
clientID,
cachedAt,
tokenResponse.ExpiresOn.T,
tokenResponse.RefreshOn.T,
tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
@ -265,6 +267,9 @@ func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo author
}
func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
if m.requests == nil {
return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil")
}
m.aadCacheMu.Lock()
defer m.aadCacheMu.Unlock()
discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
@ -459,6 +464,7 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s
func (m *Manager) writeAccount(account shared.Account) error {
key := account.Key()
m.contractMu.Lock()
defer m.contractMu.Unlock()
m.contract.Accounts[key] = account

@ -31,4 +31,6 @@ type TokenProviderResult struct {
AccessToken string
// ExpiresInSeconds is the lifetime of the token in seconds
ExpiresInSeconds int
// RefreshInSeconds indicates the suggested time to refresh the token, if any
RefreshInSeconds int
}

@ -146,7 +146,8 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
// Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
// change this to s.error() and make s.error() write the failPage instead of an error code.
_, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
s.putResult(Result{Err: fmt.Errorf(desc)})
s.putResult(Result{Err: fmt.Errorf("%s", desc)})
return
}

@ -111,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
Scopes: scopes,
TenantID: authParams.AuthorityInfo.Tenant,
}
tr, err := cred.TokenProvider(ctx, params)
pr, err := cred.TokenProvider(ctx, params)
if err != nil {
if len(scopes) == 0 {
err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
@ -119,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
}
return accesstokens.TokenResponse{}, err
}
return accesstokens.TokenResponse{
TokenType: authParams.AuthnScheme.AccessTokenType(),
AccessToken: tr.AccessToken,
ExpiresOn: internalTime.DurationTime{
T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
},
tr := accesstokens.TokenResponse{
TokenType: authParams.AuthnScheme.AccessTokenType(),
AccessToken: pr.AccessToken,
ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second),
GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
}, nil
}
if pr.RefreshInSeconds > 0 {
tr.RefreshOn = internalTime.DurationTime{
T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second),
}
}
return tr, nil
}
if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {

@ -17,6 +17,7 @@ import (
/* #nosec */
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
@ -68,7 +69,7 @@ type DeviceCodeResponse struct {
UserCode string `json:"user_code"`
DeviceCode string `json:"device_code"`
VerificationURL string `json:"verification_url"`
VerificationURL string `json:"verification_uri"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
Message string `json:"message"`
@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
}
return c.AssertionCallback(ctx, options)
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
claims := jwt.MapClaims{
"aud": authParams.Endpoints.TokenEndpoint,
"exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
"iss": authParams.ClientID,
"jti": uuid.New().String(),
"nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
"sub": authParams.ClientID,
})
}
isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS ||
authParams.AuthorityInfo.AuthorityType == authority.DSTS
var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256
thumbprintKey := "x5t#S256"
if isADFSorDSTS {
signingMethod = jwt.SigningMethodRS256
thumbprintKey = "x5t"
}
token := jwt.NewWithClaims(signingMethod, claims)
token.Header = map[string]interface{}{
"alg": "RS256",
"typ": "JWT",
"x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
"alg": signingMethod.Alg(),
"typ": "JWT",
thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())),
}
if authParams.SendX5C {
@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
assertion, err := token.SignedString(c.Key)
if err != nil {
return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
return "", fmt.Errorf("unable to sign JWT token: %w", err)
}
return assertion, nil
}
// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
// https://tools.ietf.org/html/rfc7517#section-4.8
func thumbprint(cert *x509.Certificate) []byte {
/* #nosec */
a := sha1.Sum(cert.Raw)
return a[:]
func thumbprint(cert *x509.Certificate, alg string) []byte {
switch alg {
case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this
hash := sha1.Sum(cert.Raw) /* #nosec */
return hash[:]
default:
hash := sha256.Sum256(cert.Raw)
return hash[:]
}
}
// Client represents the REST calls to get tokens from token generator backends.
@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A
qv.Set(clientID, authParameters.ClientID)
addScopeQueryParam(qv, authParameters)
token, err := c.doTokenResp(ctx, authParameters, qv)
if err != nil {
return token, fmt.Errorf("FromClientSecret(): %w", err)
}
return token, nil
return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth
qv.Set(clientInfo, clientInfoVal)
addScopeQueryParam(qv, authParameters)
token, err := c.doTokenResp(ctx, authParameters, qv)
if err != nil {
return token, fmt.Errorf("FromAssertion(): %w", err)
}
return token, nil
return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {

@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"time"
@ -173,14 +174,75 @@ type TokenResponse struct {
FamilyID string `json:"foci"`
IDToken IDToken `json:"id_token"`
ClientInfo ClientInfo `json:"client_info"`
ExpiresOn internalTime.DurationTime `json:"expires_in"`
RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"`
ExpiresOn time.Time `json:"-"`
ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
GrantedScopes Scopes `json:"scope"`
DeclinedScopes []string // This is derived
AdditionalFields map[string]interface{}
scopesComputed bool
}
func (tr *TokenResponse) UnmarshalJSON(data []byte) error {
type Alias TokenResponse
aux := &struct {
ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"`
ExpiresOn any `json:"expires_on,omitempty"`
*Alias
}{
Alias: (*Alias)(tr),
}
// Unmarshal the JSON data into the aux struct
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
// Function to parse different date formats
// This is a workaround for the issue described here:
// https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963
parseExpiresOn := func(expiresOn string) (time.Time, error) {
var formats = []string{
"01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss
"2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss
time.RFC3339Nano, // ISO 8601 (with nanosecond precision)
}
for _, format := range formats {
if t, err := time.Parse(format, expiresOn); err == nil {
return t, nil
}
}
return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn)
}
scopesComputed bool
if expiresOnStr, ok := aux.ExpiresOn.(string); ok {
if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil {
tr.ExpiresOn = time.Unix(ts, 0)
return nil
}
if expiresOnStr != "" {
if t, err := parseExpiresOn(expiresOnStr); err != nil {
return err
} else {
tr.ExpiresOn = t
return nil
}
}
}
// Check if ExpiresOn is a number (Unix timestamp or ISO 8601)
if expiresOnNum, ok := aux.ExpiresOn.(float64); ok {
tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0)
return nil
}
if !aux.ExpiresIn.T.IsZero() {
tr.ExpiresOn = aux.ExpiresIn.T
return nil
}
return errors.New("expires_in and expires_on are both missing or invalid")
}
// ComputeScope computes the final scopes based on what was granted by the server and

@ -98,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
if resp != nil {
if err := unmarshal(data, resp); err != nil {
return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))}
}
}
return nil
@ -221,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values
}
if resp != nil {
if err := unmarshal(data, resp); err != nil {
return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))}
}
}
return nil

@ -5,4 +5,4 @@
package version
// Version is the version of this client package that is communicated to the server.
const Version = "1.2.0"
const Version = "1.4.2"

@ -0,0 +1,28 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
package managedidentity
import (
"context"
"net/http"
"os"
)
func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil)
if err != nil {
return nil, err
}
req.Header.Set("secret", os.Getenv(msiSecretEnvVar))
q := req.URL.Query()
q.Set(apiVersionQueryParameterName, azureMLAPIVersion)
q.Set(resourceQueryParameterName, resource)
q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID"))
if cid, ok := id.(UserAssignedClientID); ok {
q.Set("clientid", string(cid))
}
req.URL.RawQuery = q.Encode()
return req, nil
}

@ -0,0 +1,37 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
package managedidentity
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
)
func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
msiEndpoint := os.Getenv(msiEndpointEnvVar)
msiEndpointParsed, err := url.Parse(msiEndpoint)
if err != nil {
return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err)
}
data := url.Values{}
data.Set(resourceQueryParameterName, resource)
msiDataEncoded := data.Encode()
body := io.NopCloser(strings.NewReader(msiDataEncoded))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body)
if err != nil {
return nil, fmt.Errorf("error creating http request %s", err)
}
req.Header.Set(metaHTTPHeaderName, "true")
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req, nil
}

@ -0,0 +1,717 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
/*
Package managedidentity provides a client for retrieval of Managed Identity applications.
The Managed Identity Client is used to acquire a token for managed identity assigned to
an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token
without using credentials.
*/
package managedidentity
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
)
// AuthResult contains the results of one token acquisition operation.
// For details see https://aka.ms/msal-net-authenticationresult
type AuthResult = base.AuthResult
type TokenSource = base.TokenSource
const (
TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
TokenSourceCache = base.TokenSourceCache
)
const (
// DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set.
DefaultToIMDS Source = "DefaultToIMDS"
AzureArc Source = "AzureArc"
ServiceFabric Source = "ServiceFabric"
CloudShell Source = "CloudShell"
AzureML Source = "AzureML"
AppService Source = "AppService"
// General request query parameter names
metaHTTPHeaderName = "Metadata"
apiVersionQueryParameterName = "api-version"
resourceQueryParameterName = "resource"
wwwAuthenticateHeaderName = "www-authenticate"
// UAMI query parameter name
miQueryParameterClientId = "client_id"
miQueryParameterObjectId = "object_id"
miQueryParameterPrincipalId = "principal_id"
miQueryParameterResourceIdIMDS = "msi_res_id"
miQueryParameterResourceId = "mi_res_id"
// IMDS
imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
imdsAPIVersion = "2018-02-01"
systemAssignedManagedIdentity = "system_assigned_managed_identity"
// Azure Arc
azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token"
azureArcAPIVersion = "2020-06-01"
azureArcFileExtension = ".key"
azureArcMaxFileSizeBytes int64 = 4096
linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101
linuxHimdsPath = "/opt/azcmagent/bin/himds"
azureConnectedMachine = "AzureConnectedMachineAgent"
himdsExecutableName = "himds.exe"
tokenName = "Tokens"
// App Service
appServiceAPIVersion = "2019-08-01"
// AzureML
azureMLAPIVersion = "2017-09-01"
// Service Fabric
serviceFabricAPIVersion = "2019-07-01-preview"
// Environment Variables
identityEndpointEnvVar = "IDENTITY_ENDPOINT"
identityHeaderEnvVar = "IDENTITY_HEADER"
azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST"
imdsEndVar = "IMDS_ENDPOINT"
msiEndpointEnvVar = "MSI_ENDPOINT"
msiSecretEnvVar = "MSI_SECRET"
identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT"
defaultRetryCount = 3
)
var retryCodesForIMDS = []int{
http.StatusNotFound, // 404
http.StatusGone, // 410
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
http.StatusNotImplemented, // 501
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
http.StatusHTTPVersionNotSupported, // 505
http.StatusVariantAlsoNegotiates, // 506
http.StatusInsufficientStorage, // 507
http.StatusLoopDetected, // 508
http.StatusNotExtended, // 510
http.StatusNetworkAuthenticationRequired, // 511
}
var retryStatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
var getAzureArcPlatformPath = func(platform string) string {
switch platform {
case "windows":
return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName)
case "linux":
return linuxTokenPath
default:
return ""
}
}
var getAzureArcHimdsFilePath = func(platform string) string {
switch platform {
case "windows":
return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName)
case "linux":
return linuxHimdsPath
default:
return ""
}
}
type Source string
type ID interface {
value() string
}
type systemAssignedValue string // its private for a reason to make the input consistent.
type UserAssignedClientID string
type UserAssignedObjectID string
type UserAssignedResourceID string
func (s systemAssignedValue) value() string { return string(s) }
func (c UserAssignedClientID) value() string { return string(c) }
func (o UserAssignedObjectID) value() string { return string(o) }
func (r UserAssignedResourceID) value() string { return string(r) }
func SystemAssigned() ID {
return systemAssignedValue(systemAssignedManagedIdentity)
}
// cache never uses the client because instance discovery is always disabled.
var cacheManager *storage.Manager = storage.New(nil)
type Client struct {
httpClient ops.HTTPClient
miType ID
source Source
authParams authority.AuthParams
retryPolicyEnabled bool
canRefresh *atomic.Value
}
type AcquireTokenOptions struct {
claims string
}
type ClientOption func(*Client)
type AcquireTokenOption func(o *AcquireTokenOptions)
// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies.
// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
func WithClaims(claims string) AcquireTokenOption {
return func(o *AcquireTokenOptions) {
o.claims = claims
}
}
// WithHTTPClient allows for a custom HTTP client to be set.
func WithHTTPClient(httpClient ops.HTTPClient) ClientOption {
return func(c *Client) {
c.httpClient = httpClient
}
}
func WithRetryPolicyDisabled() ClientOption {
return func(c *Client) {
c.retryPolicyEnabled = false
}
}
// Client to be used to acquire tokens for managed identity.
// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID]
//
// Options: [WithHTTPClient]
func New(id ID, options ...ClientOption) (Client, error) {
source, err := GetSource()
if err != nil {
return Client{}, err
}
// Check for user-assigned restrictions based on the source
switch source {
case AzureArc:
switch id.(type) {
case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities")
}
case AzureML:
switch id.(type) {
case UserAssignedObjectID, UserAssignedResourceID:
return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only")
}
case CloudShell:
switch id.(type) {
case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities")
}
case ServiceFabric:
switch id.(type) {
case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
}
}
switch t := id.(type) {
case UserAssignedClientID:
if len(string(t)) == 0 {
return Client{}, fmt.Errorf("empty %T", t)
}
case UserAssignedResourceID:
if len(string(t)) == 0 {
return Client{}, fmt.Errorf("empty %T", t)
}
case UserAssignedObjectID:
if len(string(t)) == 0 {
return Client{}, fmt.Errorf("empty %T", t)
}
case systemAssignedValue:
default:
return Client{}, fmt.Errorf("unsupported type %T", id)
}
zero := atomic.Value{}
zero.Store(false)
client := Client{
miType: id,
httpClient: shared.DefaultClient,
retryPolicyEnabled: true,
source: source,
canRefresh: &zero,
}
for _, option := range options {
option(&client)
}
fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true)
if err != nil {
return Client{}, err
}
client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo)
return client, nil
}
// GetSource detects and returns the managed identity source available on the environment.
func GetSource() (Source, error) {
identityEndpoint := os.Getenv(identityEndpointEnvVar)
identityHeader := os.Getenv(identityHeaderEnvVar)
identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar)
msiEndpoint := os.Getenv(msiEndpointEnvVar)
msiSecret := os.Getenv(msiSecretEnvVar)
imdsEndpoint := os.Getenv(imdsEndVar)
if identityEndpoint != "" && identityHeader != "" {
if identityServerThumbprint != "" {
return ServiceFabric, nil
}
return AppService, nil
} else if msiEndpoint != "" {
if msiSecret != "" {
return AzureML, nil
} else {
return CloudShell, nil
}
} else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) {
return AzureArc, nil
}
return DefaultToIMDS, nil
}
// This function wraps time.Now() and is used for refreshing the application
// was created to test the function against refreshin
var now = time.Now
// Acquires tokens from the configured managed identity on an azure resource.
//
// Resource: scopes application is requesting access to
// Options: [WithClaims]
func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) {
resource = strings.TrimSuffix(resource, "/.default")
o := AcquireTokenOptions{}
for _, option := range options {
option(&o)
}
c.authParams.Scopes = []string{resource}
// ignore cached access tokens when given claims
if o.claims == "" {
stResp, err := cacheManager.Read(ctx, c.authParams)
if err != nil {
return AuthResult{}, err
}
ar, err := base.AuthResultFromStorage(stResp)
if err == nil {
if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) {
defer c.canRefresh.Store(false)
if tr, er := c.getToken(ctx, resource); er == nil {
return tr, nil
}
}
ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
}
return c.getToken(ctx, resource)
}
func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) {
switch c.source {
case AzureArc:
return c.acquireTokenForAzureArc(ctx, resource)
case AzureML:
return c.acquireTokenForAzureML(ctx, resource)
case CloudShell:
return c.acquireTokenForCloudShell(ctx, resource)
case DefaultToIMDS:
return c.acquireTokenForIMDS(ctx, resource)
case AppService:
return c.acquireTokenForAppService(ctx, resource)
case ServiceFabric:
return c.acquireTokenForServiceFabric(ctx, resource)
default:
return AuthResult{}, fmt.Errorf("unsupported source %q", c.source)
}
}
func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) {
req, err := createAppServiceAuthRequest(ctx, c.miType, resource)
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(req, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) {
req, err := createIMDSAuthRequest(ctx, c.miType, resource)
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(req, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) {
req, err := createCloudShellAuthRequest(ctx, resource)
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(req, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) {
req, err := createAzureMLAuthRequest(ctx, c.miType, resource)
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(req, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) {
req, err := createServiceFabricAuthRequest(ctx, resource)
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(req, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) {
req, err := createAzureArcAuthRequest(ctx, resource, "")
if err != nil {
return AuthResult{}, err
}
response, err := c.httpClient.Do(req)
if err != nil {
return AuthResult{}, err
}
defer response.Body.Close()
if response.StatusCode != http.StatusUnauthorized {
return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode)
}
secret, err := c.getAzureArcSecretKey(response, runtime.GOOS)
if err != nil {
return AuthResult{}, err
}
secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret))
if err != nil {
return AuthResult{}, err
}
tokenResponse, err := c.getTokenForRequest(secondRequest, resource)
if err != nil {
return AuthResult{}, err
}
return authResultFromToken(c.authParams, tokenResponse)
}
func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
if cacheManager == nil {
return AuthResult{}, errors.New("cache instance is nil")
}
account, err := cacheManager.Write(authParams, token)
if err != nil {
return AuthResult{}, err
}
// if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away
if token.RefreshOn.T.IsZero() {
if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour {
token.RefreshOn.T = time.Now().Add(lifetime / 2)
}
}
ar, err := base.NewAuthResult(token, account)
if err != nil {
return AuthResult{}, err
}
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
// contains checks if the element is present in the list.
func contains[T comparable](list []T, element T) bool {
for _, v := range list {
if v == element {
return true
}
}
return false
}
// retry performs an HTTP request with retries based on the provided options.
func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) {
var resp *http.Response
var err error
for attempt := 0; attempt < maxRetries; attempt++ {
tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute)
defer tryCancel()
if resp != nil && resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
cloneReq := req.Clone(tryCtx)
resp, err = c.httpClient.Do(cloneReq)
retrylist := retryStatusCodes
if c.source == DefaultToIMDS {
retrylist = retryCodesForIMDS
}
if err == nil && !contains(retrylist, resp.StatusCode) {
return resp, nil
}
select {
case <-time.After(time.Second):
case <-req.Context().Done():
err = req.Context().Err()
return resp, err
}
}
return resp, err
}
func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) {
r := accesstokens.TokenResponse{}
var resp *http.Response
var err error
if c.retryPolicyEnabled {
resp, err = c.retry(defaultRetryCount, req)
} else {
resp, err = c.httpClient.Do(req)
}
if err != nil {
return r, err
}
responseBytes, err := io.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return r, err
}
switch resp.StatusCode {
case http.StatusOK, http.StatusAccepted:
default:
sd := strings.TrimSpace(string(responseBytes))
if sd != "" {
return r, errors.CallErr{
Req: req,
Resp: resp,
Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s",
req.URL.String(),
req.Method,
resp.StatusCode,
sd),
}
}
return r, errors.CallErr{
Req: req,
Resp: resp,
Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode),
}
}
err = json.Unmarshal(responseBytes, &r)
if err != nil {
return r, errors.InvalidJsonErr{
Err: fmt.Errorf("error parsing the json error: %s", err),
}
}
r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource)
return r, err
}
func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
identityEndpoint := os.Getenv(identityEndpointEnvVar)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar))
q := req.URL.Query()
q.Set("api-version", appServiceAPIVersion)
q.Set("resource", resource)
switch t := id.(type) {
case UserAssignedClientID:
q.Set(miQueryParameterClientId, string(t))
case UserAssignedResourceID:
q.Set(miQueryParameterResourceId, string(t))
case UserAssignedObjectID:
q.Set(miQueryParameterObjectId, string(t))
case systemAssignedValue:
default:
return nil, fmt.Errorf("unsupported type %T", id)
}
req.URL.RawQuery = q.Encode()
return req, nil
}
func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
msiEndpoint, err := url.Parse(imdsDefaultEndpoint)
if err != nil {
return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err)
}
msiParameters := msiEndpoint.Query()
msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion)
msiParameters.Set(resourceQueryParameterName, resource)
switch t := id.(type) {
case UserAssignedClientID:
msiParameters.Set(miQueryParameterClientId, string(t))
case UserAssignedResourceID:
msiParameters.Set(miQueryParameterResourceIdIMDS, string(t))
case UserAssignedObjectID:
msiParameters.Set(miQueryParameterObjectId, string(t))
case systemAssignedValue: // not adding anything
default:
return nil, fmt.Errorf("unsupported type %T", id)
}
msiEndpoint.RawQuery = msiParameters.Encode()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
if err != nil {
return nil, fmt.Errorf("error creating http request %s", err)
}
req.Header.Set(metaHTTPHeaderName, "true")
return req, nil
}
func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) {
identityEndpoint := os.Getenv(identityEndpointEnvVar)
if identityEndpoint == "" {
identityEndpoint = azureArcEndpoint
}
msiEndpoint, parseErr := url.Parse(identityEndpoint)
if parseErr != nil {
return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr)
}
msiParameters := msiEndpoint.Query()
msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion)
msiParameters.Set(resourceQueryParameterName, resource)
msiEndpoint.RawQuery = msiParameters.Encode()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
if err != nil {
return nil, fmt.Errorf("error creating http request %s", err)
}
req.Header.Set(metaHTTPHeaderName, "true")
if key != "" {
req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
}
return req, nil
}
func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool {
if identityEndpoint != "" && imdsEndpoint != "" {
return true
}
himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS)
if himdsFilePath != "" {
if _, err := os.Stat(himdsFilePath); err == nil {
return true
}
}
return false
}
func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) {
wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName)
if len(wwwAuthenticateHeader) == 0 {
return "", errors.New("response has no www-authenticate header")
}
// check if the platform is supported
expectedSecretFilePath := getAzureArcPlatformPath(platform)
if expectedSecretFilePath == "" {
return "", errors.New("platform not supported, expected linux or windows")
}
parts := strings.Split(wwwAuthenticateHeader, "Basic realm=")
if len(parts) < 2 {
return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader)
}
secretFilePath := parts
// check that the file in the file path is a .key file
fileName := filepath.Base(secretFilePath[1])
if !strings.HasSuffix(fileName, azureArcFileExtension) {
return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName))
}
// check that file path from header matches the expected file path for the platform
if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) {
return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1]))
}
fileInfo, err := os.Stat(secretFilePath[1])
if err != nil {
return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err)
}
// Throw an error if the secret file's size is greater than 4096 bytes
if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes {
return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s)
}
// Attempt to read the contents of the secret file
secret, err := os.ReadFile(secretFilePath[1])
if err != nil {
return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err)
}
return string(secret), nil
}

@ -0,0 +1,25 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
package managedidentity
import (
"context"
"net/http"
"os"
)
func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
identityEndpoint := os.Getenv(identityEndpointEnvVar)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/json")
req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar))
q := req.URL.Query()
q.Set("api-version", serviceFabricAPIVersion)
q.Set("resource", resource)
req.URL.RawQuery = q.Encode()
return req, nil
}

@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
type TokenSource = base.TokenSource
const (
TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
TokenSourceCache = base.TokenSourceCache
)
var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
// clientOptions configures the Client's behavior.
@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
return pca.base.AuthResultFromToken(ctx, authParams, token, true)
return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type DeviceCodeResult = accesstokens.DeviceCodeResult
@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error
if err != nil {
return AuthResult{}, err
}
return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
return d.client.base.AuthResultFromToken(ctx, d.authParams, token)
}
// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
return AuthResult{}, err
}
return pca.base.AuthResultFromToken(ctx, authParams, token, true)
return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type interactiveAuthResult struct {

@ -1,5 +1,72 @@
# Change Log
## [v1.144.0] - 2025-04-24
- #818 - @dweinshenker - Support Valkey in DatabaseOptions
## [v1.143.0] - 2025-04-22
- #815 - @StephenVarela - Support Load Balancers tls-cipher-policy
## [v1.142.0] - 2025-03-27
- #813 - @lfundaro-do - partner-network-connect: fix typo
- #811 - @lfundaro-do - fix partner attachment rename
- #810 - @apinonformoso - VPC-4359: remove custom unmarshaler for PNCs
- #809 - @apinonformoso - hotfix: json field name
- #808 - @apinonformoso - fix partner network connect json tags
- #807 - @bentranter - Bump Go version to v1.23
## [v1.141.0] - 2025-03-20
- #805 - @singhsaubhikdo - BLOCK-4316: Adds region param in ListSnapshot for resource type volume
- #802 - @apinonformoso - VPC-4312: rename partner interconnect attachment to partner network connect
- #774 - @blesswinsamuel - APPS-10284 Remove "closed beta" note in archive feature to prep for GA release
- #797 - @kperath - add support for cluster status messages
## [v1.140.0] - 2025-03-14
- #800 - @lee-aaron - support Spaces Keys GET by Access Key ID
## [v1.139.0] - 2025-03-12
- #798 - @dylanrhysscott - Fix: Update godo to use simplified template response and provide consistent struct naming
- #796 - @apinonformoso - fix partner interconnect attachment json request response
- #795 - @dylanrhysscott - CON-11904 Ensure taints are correctly returned via node template endpoint
- #794 - @brunograsselli - Update partner interconnect attachment comments
- #793 - @apinonformoso - add auth_key field
- #789 - @guptado - [VPC-3917] Update get service key response model
## [v1.138.0] - 2025-02-18
- #785 - @guptado - Support partner interconnect GetBgpAuthKey and RegenerateServiceKey operations
- #787 - @andrewsomething - ci: upgrade to actions/cache@v4
- #786 - @m3co-code - add flags for doks routing-agent plugin
- #784 - @asaha2 - Support name and id filters for list op
## [v1.137.0] - 2025-02-12
- #782 - @apinonformoso - fix partner interconnect json tag
- #781 - @dylanrhysscott - CON-11810 Implement GetNodePoolTemplate endpoint for DOKS godo client
## [v1.136.0] - 2025-01-28
- #776 - @danaelhe - Databases: Support online-migrations
- #777 - @apinonformoso - update bgp to be a pointer
## [v1.135.0] - 2025-01-27
- #766 - @dhij - kubernetes: add cluster autoscaler config
- #775 - @jvasilevsky - LBASA-3620: add network_stack field to load balancers model
- #773 - @blesswinsamuel - Add field to customize the offline page during app maintenance
## [v1.134.0] - 2025-01-15
- #771 - @d-honeybadger - add ID field to KubernetesClusterUser response
- #768 - @lee-aaron - support Spaces Keys API
## [v1.133.0] - 2025-01-10
- #769 - @guptado - support partner interconnect attachment operations
- #767 - @loosla - [kubernetes]: make kubernetes maintenance_policy day case insensitive
## [v1.132.0] - 2024-12-17
- #764 - @greeshmapill - APPS-9365: Add bitbucket source to App Spec

@ -468,8 +468,10 @@ type AppLogDestinationSpecPapertrail struct {
type AppMaintenanceSpec struct {
// Indicates whether maintenance mode should be enabled for the app.
Enabled bool `json:"enabled,omitempty"`
// Indicates whether the app should be archived. Setting this to true implies that enabled is set to true. Note that this feature is currently in closed beta.
// Indicates whether the app should be archived. Setting this to true implies that enabled is set to true.
Archive bool `json:"archive,omitempty"`
// A custom offline page to display when maintenance mode is enabled or the app is archived.
OfflinePageURL string `json:"offline_page_url,omitempty"`
}
// AppRouteSpec struct for AppRouteSpec

@ -1453,6 +1453,14 @@ func (a *AppMaintenanceSpec) GetEnabled() bool {
return a.Enabled
}
// GetOfflinePageURL returns the OfflinePageURL field.
func (a *AppMaintenanceSpec) GetOfflinePageURL() string {
if a == nil {
return ""
}
return a.OfflinePageURL
}
// GetAppID returns the AppID field.
func (a *AppProposeRequest) GetAppID() string {
if a == nil {

@ -42,6 +42,8 @@ const (
databaseIndexPath = databaseBasePath + "/%s/indexes/%s"
databaseLogsinkPath = databaseBasePath + "/%s/logsink/%s"
databaseLogsinksPath = databaseBasePath + "/%s/logsink"
databaseOnlineMigrationsPath = databaseBasePath + "/%s/online-migration"
databaseOnlineMigrationPath = databaseBasePath + "/%s/online-migration/%s"
)
// SQL Mode constants allow for MySQL-specific SQL flavor configuration.
@ -179,6 +181,9 @@ type DatabasesService interface {
ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error)
UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error)
DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error)
StartOnlineMigration(ctx context.Context, databaseID string, onlineMigrationRequest *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error)
StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error)
GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error)
}
// DatabasesServiceOp handles communication with the Databases related methods
@ -366,6 +371,13 @@ type DatabaseLogsink struct {
Config *DatabaseLogsinkConfig `json:"config,omitempty"`
}
// DatabaseOnlineMigrationStatus represents an online migration status
type DatabaseOnlineMigrationStatus struct {
ID string `json:"id"`
Status string `json:"status"`
CreatedAt string `json:"created_at"`
}
// TopicPartition represents the state of a Kafka topic partition
type TopicPartition struct {
EarliestOffset uint64 `json:"earliest_offset,omitempty"`
@ -515,6 +527,13 @@ type DatabaseFirewallRule struct {
CreatedAt time.Time `json:"created_at"`
}
// DatabaseStartOnlineMigrationRequest is used to start an online migration for a database cluster
type DatabaseStartOnlineMigrationRequest struct {
Source *DatabaseOnlineMigrationConfig `json:"source"`
DisableSSL bool `json:"disable_ssl,omitempty"`
IgnoreDBs []string `json:"ignore_dbs,omitempty"`
}
// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster
type DatabaseCreateLogsinkRequest struct {
Name string `json:"sink_name"`
@ -544,6 +563,15 @@ type DatabaseLogsinkConfig struct {
Cert string `json:"cert,omitempty"`
}
// DatabaseOnlineMigrationConfig represents the configuration options for database online migrations.
type DatabaseOnlineMigrationConfig struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
DatabaseName string `json:"dbname,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
// PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters.
type PostgreSQLConfig struct {
AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"`
@ -871,6 +899,7 @@ type DatabaseOptions struct {
RedisOptions DatabaseEngineOptions `json:"redis"`
KafkaOptions DatabaseEngineOptions `json:"kafka"`
OpensearchOptions DatabaseEngineOptions `json:"opensearch"`
ValkeyOptions DatabaseEngineOptions `json:"valkey"`
}
// DatabaseEngineOptions represents the configuration options that are available for a given database engine
@ -1975,3 +2004,50 @@ func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, lo
}
return resp, nil
}
// StartOnlineMigration starts an online migration for a database. Migrating a cluster establishes a connection with an existing cluster
// and replicates its contents to the target cluster. Online migration is only available for MySQL, PostgreSQL, and Redis clusters.
func (svc *DatabasesServiceOp) StartOnlineMigration(ctx context.Context, databaseID string, onlineMigration *DatabaseStartOnlineMigrationRequest) (*DatabaseOnlineMigrationStatus, *Response, error) {
path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, onlineMigration)
if err != nil {
return nil, nil, err
}
root := new(DatabaseOnlineMigrationStatus)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, nil
}
// GetOnlineMigrationStatus retrieves the status of the most recent online migration
func (svc *DatabasesServiceOp) GetOnlineMigrationStatus(ctx context.Context, databaseID string) (*DatabaseOnlineMigrationStatus, *Response, error) {
path := fmt.Sprintf(databaseOnlineMigrationsPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(DatabaseOnlineMigrationStatus)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, nil
}
// StopOnlineMigration stops an online migration
func (svc *DatabasesServiceOp) StopOnlineMigration(ctx context.Context, databaseID, migrationID string) (*Response, error) {
path := fmt.Sprintf(databaseOnlineMigrationPath, databaseID, migrationID)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}

@ -21,7 +21,7 @@ import (
)
const (
libraryVersion = "1.132.0"
libraryVersion = "1.144.0"
defaultBaseURL = "https://api.digitalocean.com/"
userAgent = "godo/" + libraryVersion
mediaType = "application/json"
@ -88,11 +88,13 @@ type Client struct {
ReservedIPV6Actions ReservedIPV6ActionsService
Sizes SizesService
Snapshots SnapshotsService
SpacesKeys SpacesKeysService
Storage StorageService
StorageActions StorageActionsService
Tags TagsService
UptimeChecks UptimeChecksService
VPCs VPCsService
PartnerAttachment PartnerAttachmentService
// Optional function called after every successful request made to the DO APIs
onRequestCompleted RequestCompletionCallback
@ -302,11 +304,13 @@ func NewClient(httpClient *http.Client) *Client {
c.ReservedIPV6Actions = &ReservedIPV6ActionsServiceOp{client: c}
c.Sizes = &SizesServiceOp{client: c}
c.Snapshots = &SnapshotsServiceOp{client: c}
c.SpacesKeys = &SpacesKeysServiceOp{client: c}
c.Storage = &StorageServiceOp{client: c}
c.StorageActions = &StorageActionsServiceOp{client: c}
c.Tags = &TagsServiceOp{client: c}
c.UptimeChecks = &UptimeChecksServiceOp{client: c}
c.VPCs = &VPCsServiceOp{client: c}
c.PartnerAttachment = &PartnerAttachmentServiceOp{client: c}
c.headers = make(map[string]string)

@ -40,6 +40,7 @@ type KubernetesService interface {
CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error)
GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error)
GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error)
ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error)
UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error)
// RecycleNodePoolNodes is DEPRECATED please use DeleteNode
@ -54,6 +55,8 @@ type KubernetesService interface {
RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error)
GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error)
GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error)
}
var _ KubernetesService = &KubernetesServiceOp{}
@ -78,20 +81,24 @@ type KubernetesClusterCreateRequest struct {
NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
AutoUpgrade bool `json:"auto_upgrade"`
SurgeUpgrade bool `json:"surge_upgrade"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
AutoUpgrade bool `json:"auto_upgrade"`
SurgeUpgrade bool `json:"surge_upgrade"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
}
// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster.
type KubernetesClusterUpdateRequest struct {
Name string `json:"name,omitempty"`
Tags []string `json:"tags,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
Name string `json:"name,omitempty"`
Tags []string `json:"tags,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
// Convert cluster to run highly available control plane
HA *bool `json:"ha,omitempty"`
@ -187,6 +194,19 @@ type KubernetesGetClusterlintRequest struct {
RunId string `json:"run_id"`
}
type clusterStatusMessagesRoot struct {
Messages []*KubernetesClusterStatusMessage `json:"messages"`
}
type KubernetesClusterStatusMessage struct {
Message string `json:"message"`
Timestamp time.Time `json:"timestamp"`
}
type KubernetesGetClusterStatusMessagesRequest struct {
Since *time.Time `json:"since"`
}
// KubernetesCluster represents a Kubernetes cluster.
type KubernetesCluster struct {
ID string `json:"id,omitempty"`
@ -205,11 +225,13 @@ type KubernetesCluster struct {
NodePools []*KubernetesNodePool `json:"node_pools,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
RegistryEnabled bool `json:"registry_enabled,omitempty"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
RegistryEnabled bool `json:"registry_enabled,omitempty"`
ControlPlaneFirewall *KubernetesControlPlaneFirewall `json:"control_plane_firewall,omitempty"`
ClusterAutoscalerConfiguration *KubernetesClusterAutoscalerConfiguration `json:"cluster_autoscaler_configuration,omitempty"`
RoutingAgent *KubernetesRoutingAgent `json:"routing_agent,omitempty"`
Status *KubernetesClusterStatus `json:"status,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
@ -223,6 +245,7 @@ func (kc KubernetesCluster) URN() string {
// KubernetesClusterUser represents a Kubernetes cluster user.
type KubernetesClusterUser struct {
ID string `json:"id,omitempty"`
Username string `json:"username,omitempty"`
Groups []string `json:"groups,omitempty"`
}
@ -251,6 +274,17 @@ type KubernetesControlPlaneFirewall struct {
AllowedAddresses []string `json:"allowed_addresses"`
}
// KubernetesRoutingAgent represents information about the routing-agent cluster plugin.
type KubernetesRoutingAgent struct {
Enabled *bool `json:"enabled"`
}
// KubernetesClusterAutoscalerConfiguration represents Kubernetes cluster autoscaler configuration.
type KubernetesClusterAutoscalerConfiguration struct {
ScaleDownUtilizationThreshold *float64 `json:"scale_down_utilization_threshold"`
ScaleDownUnneededTime *string `json:"scale_down_unneeded_time"`
}
// KubernetesMaintenancePolicyDay represents the possible days of a maintenance
// window
type KubernetesMaintenancePolicyDay int
@ -315,7 +349,7 @@ var (
// KubernetesMaintenanceToDay returns the appropriate KubernetesMaintenancePolicyDay for the given string.
func KubernetesMaintenanceToDay(day string) (KubernetesMaintenancePolicyDay, error) {
d, ok := toDay[day]
d, ok := toDay[strings.ToLower(day)]
if !ok {
return 0, fmt.Errorf("unknown day: %q", day)
}
@ -416,6 +450,20 @@ type KubernetesNodePool struct {
Nodes []*KubernetesNode `json:"nodes,omitempty"`
}
// KubernetesNodePool represents the node pool template data for a given pool.
type KubernetesNodePoolTemplate struct {
Template *KubernetesNodeTemplate
}
// KubernetesNodePoolResources represents the resources within a given template for a node pool
// This follows https://pkg.go.dev/k8s.io/kubernetes@v1.32.1/pkg/scheduler/framework#Resource to represent
// node resources within the node object.
type KubernetesNodePoolResources struct {
CPU int64 `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods int64 `json:"pods,omitempty"`
}
// KubernetesNode represents a Node in a node pool in a Kubernetes cluster.
type KubernetesNode struct {
ID string `json:"id,omitempty"`
@ -427,6 +475,17 @@ type KubernetesNode struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// KubernetesNodeTemplate represents a template in a node pool in a Kubernetes cluster.
type KubernetesNodeTemplate struct {
ClusterUUID string `json:"cluster_uuid,omitempty"`
Name string `json:"name,omitempty"`
Slug string `json:"slug,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Taints []string `json:"taints,omitempty"`
Capacity *KubernetesNodePoolResources `json:"capacity,omitempty"`
Allocatable *KubernetesNodePoolResources `json:"allocatable,omitempty"`
}
// KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster.
type KubernetesNodeStatus struct {
State string `json:"state,omitempty"`
@ -794,6 +853,24 @@ func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, pool
return root.NodePool, resp, nil
}
// GetNodePoolTemplate retrieves the template used for a given node pool to scale up from zero.
func (svc *KubernetesServiceOp) GetNodePoolTemplate(ctx context.Context, clusterID string, nodePoolName string) (*KubernetesNodePoolTemplate, *Response, error) {
path, err := url.JoinPath(kubernetesClustersPath, clusterID, "node_pools_template", nodePoolName)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(KubernetesNodePoolTemplate)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, nil
}
// ListNodePools lists all the node pools found in a Kubernetes cluster.
func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) {
path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID)
@ -980,3 +1057,28 @@ func (svc *KubernetesServiceOp) GetClusterlintResults(ctx context.Context, clust
}
return root.Diagnostics, resp, nil
}
func (svc *KubernetesServiceOp) GetClusterStatusMessages(ctx context.Context, clusterID string, req *KubernetesGetClusterStatusMessagesRequest) ([]*KubernetesClusterStatusMessage, *Response, error) {
path := fmt.Sprintf("%s/%s/status_messages", kubernetesClustersPath, clusterID)
if req != nil {
v := make(url.Values)
if req.Since != nil {
v.Set("since", req.Since.Format(time.RFC3339))
}
if query := v.Encode(); query != "" {
path = path + "?" + query
}
}
request, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(clusterStatusMessagesRoot)
resp, err := svc.client.Do(ctx, request, root)
if err != nil {
return nil, resp, err
}
return root.Messages, resp, nil
}

@ -22,6 +22,14 @@ const (
// Load Balancer network types
LoadBalancerNetworkTypeExternal = "EXTERNAL"
LoadBalancerNetworkTypeInternal = "INTERNAL"
// Load Balancer network_stack types
LoadBalancerNetworkStackIPv4 = "IPV4"
LoadBalancerNetworkStackDualstack = "DUALSTACK"
// Supported TLS Cipher policies
LoadBalancerTLSCipherPolicyDefault = "DEFAULT"
LoadBalancerTLSCipherPolicyStrong = "STRONG"
)
// LoadBalancersService is an interface for managing load balancers with the DigitalOcean API.
@ -29,6 +37,8 @@ const (
type LoadBalancersService interface {
Get(context.Context, string) (*LoadBalancer, *Response, error)
List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error)
ListByNames(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error)
ListByUUIDs(context.Context, []string, *ListOptions) ([]LoadBalancer, *Response, error)
Create(context.Context, *LoadBalancerRequest) (*LoadBalancer, *Response, error)
Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error)
Delete(ctx context.Context, lbID string) (*Response, error)
@ -74,6 +84,8 @@ type LoadBalancer struct {
GLBSettings *GLBSettings `json:"glb_settings,omitempty"`
TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"`
Network string `json:"network,omitempty"`
NetworkStack string `json:"network_stack,omitempty"`
TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"`
}
// String creates a human-readable description of a LoadBalancer.
@ -108,6 +120,8 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest {
HTTPIdleTimeoutSeconds: l.HTTPIdleTimeoutSeconds,
TargetLoadBalancerIDs: append([]string(nil), l.TargetLoadBalancerIDs...),
Network: l.Network,
NetworkStack: l.NetworkStack,
TLSCipherPolicy: l.TLSCipherPolicy,
}
if l.DisableLetsEncryptDNSRecords != nil {
@ -247,6 +261,8 @@ type LoadBalancerRequest struct {
GLBSettings *GLBSettings `json:"glb_settings,omitempty"`
TargetLoadBalancerIDs []string `json:"target_load_balancer_ids,omitempty"`
Network string `json:"network,omitempty"`
NetworkStack string `json:"network_stack,omitempty"`
TLSCipherPolicy string `json:"tls_cipher_policy,omitempty"`
}
// String creates a human-readable description of a LoadBalancerRequest.
@ -396,6 +412,72 @@ func (l *LoadBalancersServiceOp) List(ctx context.Context, opt *ListOptions) ([]
return root.LoadBalancers, resp, err
}
// ListByNames lists load balancers filtered by resource names, with optional pagination.
func (l *LoadBalancersServiceOp) ListByNames(ctx context.Context, names []string, opt *ListOptions) ([]LoadBalancer, *Response, error) {
path, err := addOptions(loadBalancersBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
q := req.URL.Query()
for _, name := range names {
q.Add("names", name)
}
req.URL.RawQuery = q.Encode()
root := new(loadBalancersRoot)
resp, err := l.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.LoadBalancers, resp, err
}
// ListByUUIDs lists load balancers filtered by resource UUIDs, with optional pagination.
func (l *LoadBalancersServiceOp) ListByUUIDs(ctx context.Context, uuids []string, opt *ListOptions) ([]LoadBalancer, *Response, error) {
path, err := addOptions(loadBalancersBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
q := req.URL.Query()
for _, uuid := range uuids {
q.Add("uuids", uuid)
}
req.URL.RawQuery = q.Encode()
root := new(loadBalancersRoot)
resp, err := l.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.LoadBalancers, resp, err
}
// Create a new load balancer with a given configuration.
func (l *LoadBalancersServiceOp) Create(ctx context.Context, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) {
req, err := l.client.NewRequest(ctx, http.MethodPost, loadBalancersBasePath, lbr)

@ -0,0 +1,415 @@
package godo
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
const partnerNetworkConnectBasePath = "/v2/partner_network_connect/attachments"
// PartnerAttachmentService is an interface for managing Partner Attachments with the
// DigitalOcean API.
// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/PartnerNetworkConnect
type PartnerAttachmentService interface {
List(context.Context, *ListOptions) ([]*PartnerAttachment, *Response, error)
Create(context.Context, *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error)
Get(context.Context, string) (*PartnerAttachment, *Response, error)
Update(context.Context, string, *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error)
Delete(context.Context, string) (*Response, error)
GetServiceKey(context.Context, string) (*ServiceKey, *Response, error)
SetRoutes(context.Context, string, *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error)
ListRoutes(context.Context, string, *ListOptions) ([]*RemoteRoute, *Response, error)
GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error)
RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error)
}
var _ PartnerAttachmentService = &PartnerAttachmentServiceOp{}
// PartnerAttachmentServiceOp interfaces with the Partner Attachment endpoints in the DigitalOcean API.
type PartnerAttachmentServiceOp struct {
client *Client
}
// PartnerAttachmentCreateRequest represents a request to create a Partner Attachment.
type PartnerAttachmentCreateRequest struct {
// Name is the name of the Partner Attachment
Name string `json:"name,omitempty"`
// ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
// Region is the region where the Partner Attachment is created
Region string `json:"region,omitempty"`
// NaaSProvider is the name of the Network as a Service provider
NaaSProvider string `json:"naas_provider,omitempty"`
// VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
VPCIDs []string `json:"vpc_ids,omitempty"`
// BGP is the BGP configuration of the Partner Attachment
BGP BGP `json:"bgp,omitempty"`
}
type partnerAttachmentRequestBody struct {
// Name is the name of the Partner Attachment
Name string `json:"name,omitempty"`
// ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
// Region is the region where the Partner Attachment is created
Region string `json:"region,omitempty"`
// NaaSProvider is the name of the Network as a Service provider
NaaSProvider string `json:"naas_provider,omitempty"`
// VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
VPCIDs []string `json:"vpc_ids,omitempty"`
// BGP is the BGP configuration of the Partner Attachment
BGP *BGPInput `json:"bgp,omitempty"`
}
func (req *PartnerAttachmentCreateRequest) buildReq() *partnerAttachmentRequestBody {
request := &partnerAttachmentRequestBody{
Name: req.Name,
ConnectionBandwidthInMbps: req.ConnectionBandwidthInMbps,
Region: req.Region,
NaaSProvider: req.NaaSProvider,
VPCIDs: req.VPCIDs,
}
if req.BGP != (BGP{}) {
request.BGP = &BGPInput{
LocalASN: req.BGP.LocalASN,
LocalRouterIP: req.BGP.LocalRouterIP,
PeerASN: req.BGP.PeerASN,
PeerRouterIP: req.BGP.PeerRouterIP,
AuthKey: req.BGP.AuthKey,
}
}
return request
}
// PartnerAttachmentUpdateRequest represents a request to update a Partner Attachment.
type PartnerAttachmentUpdateRequest struct {
// Name is the name of the Partner Attachment
Name string `json:"name,omitempty"`
//VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
VPCIDs []string `json:"vpc_ids,omitempty"`
}
type PartnerAttachmentSetRoutesRequest struct {
// Routes is the list of routes to be used for the Partner Attachment
Routes []string `json:"routes,omitempty"`
}
// BGP represents the BGP configuration of a Partner Attachment.
type BGP struct {
// LocalASN is the local ASN
LocalASN int `json:"local_asn,omitempty"`
// LocalRouterIP is the local router IP
LocalRouterIP string `json:"local_router_ip,omitempty"`
// PeerASN is the peer ASN
PeerASN int `json:"peer_asn,omitempty"`
// PeerRouterIP is the peer router IP
PeerRouterIP string `json:"peer_router_ip,omitempty"`
// AuthKey is the authentication key
AuthKey string `json:"auth_key,omitempty"`
}
func (b *BGP) UnmarshalJSON(data []byte) error {
type Alias BGP
aux := &struct {
LocalASN *int `json:"local_asn,omitempty"`
LocalRouterASN *int `json:"local_router_asn,omitempty"`
PeerASN *int `json:"peer_asn,omitempty"`
PeerRouterASN *int `json:"peer_router_asn,omitempty"`
*Alias
}{
Alias: (*Alias)(b),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.LocalASN != nil {
b.LocalASN = *aux.LocalASN
} else if aux.LocalRouterASN != nil {
b.LocalASN = *aux.LocalRouterASN
}
if aux.PeerASN != nil {
b.PeerASN = *aux.PeerASN
} else if aux.PeerRouterASN != nil {
b.PeerASN = *aux.PeerRouterASN
}
return nil
}
// BGPInput represents the BGP configuration of a Partner Attachment.
type BGPInput struct {
// LocalASN is the local ASN
LocalASN int `json:"local_router_asn,omitempty"`
// LocalRouterIP is the local router IP
LocalRouterIP string `json:"local_router_ip,omitempty"`
// PeerASN is the peer ASN
PeerASN int `json:"peer_router_asn,omitempty"`
// PeerRouterIP is the peer router IP
PeerRouterIP string `json:"peer_router_ip,omitempty"`
// AuthKey is the authentication key
AuthKey string `json:"auth_key,omitempty"`
}
// ServiceKey represents the service key of a Partner Attachment.
type ServiceKey struct {
Value string `json:"value,omitempty"`
State string `json:"state,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
}
// RemoteRoute represents a route for a Partner Attachment.
type RemoteRoute struct {
// ID is the generated ID of the Route
ID string `json:"id,omitempty"`
// Cidr is the CIDR of the route
Cidr string `json:"cidr,omitempty"`
}
// PartnerAttachment represents a DigitalOcean Partner Attachment.
type PartnerAttachment struct {
// ID is the generated ID of the Partner Attachment
ID string `json:"id,omitempty"`
// Name is the name of the Partner Attachment
Name string `json:"name,omitempty"`
// State is the state of the Partner Attachment
State string `json:"state,omitempty"`
// ConnectionBandwidthInMbps is the bandwidth of the connection in Mbps
ConnectionBandwidthInMbps int `json:"connection_bandwidth_in_mbps,omitempty"`
// Region is the region where the Partner Attachment is created
Region string `json:"region,omitempty"`
// NaaSProvider is the name of the Network as a Service provider
NaaSProvider string `json:"naas_provider,omitempty"`
// VPCIDs is the IDs of the VPCs to which the Partner Attachment is connected to
VPCIDs []string `json:"vpc_ids,omitempty"`
// BGP is the BGP configuration of the Partner Attachment
BGP BGP `json:"bgp,omitempty"`
// CreatedAt is time when this Partner Attachment was first created
CreatedAt time.Time `json:"created_at,omitempty"`
}
type partnerNetworkConnectAttachmentRoot struct {
PartnerAttachment *PartnerAttachment `json:"partner_attachment"`
}
type partnerNetworkConnectAttachmentsRoot struct {
PartnerAttachments []*PartnerAttachment `json:"partner_attachments"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type serviceKeyRoot struct {
ServiceKey *ServiceKey `json:"service_key"`
}
type remoteRoutesRoot struct {
RemoteRoutes []*RemoteRoute `json:"remote_routes"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type BgpAuthKey struct {
Value string `json:"value"`
}
type bgpAuthKeyRoot struct {
BgpAuthKey *BgpAuthKey `json:"bgp_auth_key"`
}
type RegenerateServiceKey struct {
}
type regenerateServiceKeyRoot struct {
RegenerateServiceKey *RegenerateServiceKey `json:"-"`
}
// List returns a list of all Partner Attachment, with optional pagination.
func (s *PartnerAttachmentServiceOp) List(ctx context.Context, opt *ListOptions) ([]*PartnerAttachment, *Response, error) {
path, err := addOptions(partnerNetworkConnectBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(partnerNetworkConnectAttachmentsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.PartnerAttachments, resp, nil
}
// Create creates a new Partner Attachment.
func (s *PartnerAttachmentServiceOp) Create(ctx context.Context, create *PartnerAttachmentCreateRequest) (*PartnerAttachment, *Response, error) {
path := partnerNetworkConnectBasePath
req, err := s.client.NewRequest(ctx, http.MethodPost, path, create.buildReq())
if err != nil {
return nil, nil, err
}
root := new(partnerNetworkConnectAttachmentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.PartnerAttachment, resp, nil
}
// Get returns the details of a Partner Attachment.
func (s *PartnerAttachmentServiceOp) Get(ctx context.Context, id string) (*PartnerAttachment, *Response, error) {
path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(partnerNetworkConnectAttachmentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.PartnerAttachment, resp, nil
}
// Update updates a Partner Attachment properties.
func (s *PartnerAttachmentServiceOp) Update(ctx context.Context, id string, update *PartnerAttachmentUpdateRequest) (*PartnerAttachment, *Response, error) {
path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, update)
if err != nil {
return nil, nil, err
}
root := new(partnerNetworkConnectAttachmentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.PartnerAttachment, resp, nil
}
// Delete deletes a Partner Attachment.
func (s *PartnerAttachmentServiceOp) Delete(ctx context.Context, id string) (*Response, error) {
path := fmt.Sprintf("%s/%s", partnerNetworkConnectBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
func (s *PartnerAttachmentServiceOp) GetServiceKey(ctx context.Context, id string) (*ServiceKey, *Response, error) {
path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(serviceKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.ServiceKey, resp, nil
}
// ListRoutes lists all remote routes for a Partner Attachment.
func (s *PartnerAttachmentServiceOp) ListRoutes(ctx context.Context, id string, opt *ListOptions) ([]*RemoteRoute, *Response, error) {
path, err := addOptions(fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id), opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(remoteRoutesRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.RemoteRoutes, resp, nil
}
// SetRoutes updates specific properties of a Partner Attachment.
func (s *PartnerAttachmentServiceOp) SetRoutes(ctx context.Context, id string, set *PartnerAttachmentSetRoutesRequest) (*PartnerAttachment, *Response, error) {
path := fmt.Sprintf("%s/%s/remote_routes", partnerNetworkConnectBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodPut, path, set)
if err != nil {
return nil, nil, err
}
root := new(partnerNetworkConnectAttachmentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.PartnerAttachment, resp, nil
}
// GetBGPAuthKey returns Partner Attachment bgp auth key
func (s *PartnerAttachmentServiceOp) GetBGPAuthKey(ctx context.Context, iaID string) (*BgpAuthKey, *Response, error) {
path := fmt.Sprintf("%s/%s/bgp_auth_key", partnerNetworkConnectBasePath, iaID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(bgpAuthKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.BgpAuthKey, resp, nil
}
// RegenerateServiceKey regenerates the service key of a Partner Attachment.
func (s *PartnerAttachmentServiceOp) RegenerateServiceKey(ctx context.Context, iaID string) (*RegenerateServiceKey, *Response, error) {
path := fmt.Sprintf("%s/%s/service_key", partnerNetworkConnectBasePath, iaID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil)
if err != nil {
return nil, nil, err
}
root := new(regenerateServiceKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.RegenerateServiceKey, resp, nil
}

@ -14,6 +14,7 @@ const snapshotBasePath = "v2/snapshots"
type SnapshotsService interface {
List(context.Context, *ListOptions) ([]Snapshot, *Response, error)
ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error)
ListVolumeSnapshotByRegion(context.Context, string, *ListOptions) ([]Snapshot, *Response, error)
ListDroplet(context.Context, *ListOptions) ([]Snapshot, *Response, error)
Get(context.Context, string) (*Snapshot, *Response, error)
Delete(context.Context, string) (*Response, error)
@ -52,6 +53,7 @@ type snapshotsRoot struct {
type listSnapshotOptions struct {
ResourceType string `url:"resource_type,omitempty"`
Region string `url:"region,omitempty"`
}
func (s Snapshot) String() string {
@ -75,6 +77,12 @@ func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) (
return s.list(ctx, opt, &listOpt)
}
// ListVolumeSnapshotByRegion lists all the volume snapshot for given region
func (s *SnapshotsServiceOp) ListVolumeSnapshotByRegion(ctx context.Context, region string, opt *ListOptions) ([]Snapshot, *Response, error) {
listOpt := listSnapshotOptions{ResourceType: "volume", Region: region}
return s.list(ctx, opt, &listOpt)
}
// Get retrieves a snapshot by id.
func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) {
return s.get(ctx, snapshotID)

@ -0,0 +1,186 @@
package godo
import (
"context"
"fmt"
"net/http"
)
const spacesKeysBasePath = "v2/spaces/keys"
// SpacesKeysService is an interface for managing Spaces keys with the DigitalOcean API.
type SpacesKeysService interface {
List(context.Context, *ListOptions) ([]*SpacesKey, *Response, error)
Update(context.Context, string, *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error)
Create(context.Context, *SpacesKeyCreateRequest) (*SpacesKey, *Response, error)
Delete(context.Context, string) (*Response, error)
Get(context.Context, string) (*SpacesKey, *Response, error)
}
// SpacesKeysServiceOp handles communication with the Spaces key related methods of the
// DigitalOcean API.
type SpacesKeysServiceOp struct {
client *Client
}
var _ SpacesKeysService = &SpacesKeysServiceOp{}
// SpacesKeyPermission represents a permission for a Spaces grant
type SpacesKeyPermission string
const (
// SpacesKeyRead grants read-only access to the Spaces bucket
SpacesKeyRead SpacesKeyPermission = "read"
// SpacesKeyReadWrite grants read and write access to the Spaces bucket
SpacesKeyReadWrite SpacesKeyPermission = "readwrite"
// SpacesKeyFullAccess grants full access to the Spaces bucket
SpacesKeyFullAccess SpacesKeyPermission = "fullaccess"
)
// Grant represents a Grant for a Spaces key
type Grant struct {
Bucket string `json:"bucket"`
Permission SpacesKeyPermission `json:"permission"`
}
// SpacesKey represents a DigitalOcean Spaces key
type SpacesKey struct {
Name string `json:"name"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
Grants []*Grant `json:"grants"`
CreatedAt string `json:"created_at"`
}
// SpacesKeyRoot represents a response from the DigitalOcean API
type spacesKeyRoot struct {
Key *SpacesKey `json:"key"`
}
// SpacesKeyCreateRequest represents a request to create a Spaces key.
type SpacesKeyCreateRequest struct {
Name string `json:"name"`
Grants []*Grant `json:"grants"`
}
// SpacesKeyUpdateRequest represents a request to update a Spaces key.
type SpacesKeyUpdateRequest struct {
Name string `json:"name"`
Grants []*Grant `json:"grants"`
}
// spacesListKeysRoot represents a response from the DigitalOcean API
type spacesListKeysRoot struct {
Keys []*SpacesKey `json:"keys,omitempty"`
Links *Links `json:"links,omitempty"`
Meta *Meta `json:"meta"`
}
// Create creates a new Spaces key.
func (s *SpacesKeysServiceOp) Create(ctx context.Context, createRequest *SpacesKeyCreateRequest) (*SpacesKey, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
req, err := s.client.NewRequest(ctx, http.MethodPost, spacesKeysBasePath, createRequest)
if err != nil {
return nil, nil, err
}
root := new(spacesKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Key, resp, nil
}
// Delete deletes a Spaces key.
func (s *SpacesKeysServiceOp) Delete(ctx context.Context, accessKey string) (*Response, error) {
if accessKey == "" {
return nil, NewArgError("accessKey", "cannot be empty")
}
path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// Update updates a Spaces key.
func (s *SpacesKeysServiceOp) Update(ctx context.Context, accessKey string, updateRequest *SpacesKeyUpdateRequest) (*SpacesKey, *Response, error) {
if accessKey == "" {
return nil, nil, NewArgError("accessKey", "cannot be empty")
}
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(spacesKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Key, resp, nil
}
// List returns a list of Spaces keys.
func (s *SpacesKeysServiceOp) List(ctx context.Context, opts *ListOptions) ([]*SpacesKey, *Response, error) {
path, err := addOptions(spacesKeysBasePath, opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(spacesListKeysRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if root.Links != nil {
resp.Links = root.Links
}
if root.Meta != nil {
resp.Meta = root.Meta
}
return root.Keys, resp, nil
}
// Get retrieves a Spaces key.
func (s *SpacesKeysServiceOp) Get(ctx context.Context, accessKey string) (*SpacesKey, *Response, error) {
if accessKey == "" {
return nil, nil, NewArgError("accessKey", "cannot be empty")
}
path := fmt.Sprintf("%s/%s", spacesKeysBasePath, accessKey)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(spacesKeyRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Key, resp, nil
}

@ -0,0 +1,18 @@
root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{Makefile,*.mk}]
indent_style = tab
[*.nix]
indent_size = 2

@ -0,0 +1,4 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake . --impure

@ -0,0 +1,6 @@
/.devenv/
/.direnv/
/.pre-commit-config.yaml
/bin/
/build/
/var/

@ -0,0 +1,23 @@
run:
timeout: 5m
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/go-viper/mapstructure)
golint:
min-confidence: 0
goimports:
local-prefixes: github.com/go-viper/maptstructure
linters:
disable-all: true
enable:
- gci
- gofmt
- gofumpt
- goimports
- staticcheck
# - stylecheck

@ -0,0 +1,104 @@
> [!WARNING]
> As of v2 of this library, change log can be found in GitHub releases.
## 1.5.1
* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
* Fix map of slices not decoding properly in certain cases. [GH-266]
## 1.5.0
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
without `mapstructure` (or the configured tag name) set [GH-277]
* New option `ErrorUnset` which makes it an error if any fields
in a target struct are not set by the decoding process. [GH-225]
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
* Decoding to slice from array no longer crashes [GH-265]
* Decode nested struct pointers to map [GH-271]
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
* Fix issue where fields with `,omitempty` would sometimes decode
into a map with an empty string key [GH-281]
## 1.4.3
* Fix cases where `json.Number` didn't decode properly [GH-261]
## 1.4.2
* Custom name matchers to support any sort of casing, formatting, etc. for
field names. [GH-250]
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
## 1.4.1
* Fix regression where `*time.Time` value would be set to empty and not be sent
to decode hooks properly [GH-232]
## 1.4.0
* A new decode hook type `DecodeHookFuncValue` has been added that has
access to the full values. [GH-183]
* Squash is now supported with embedded fields that are struct pointers [GH-205]
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
## 1.3.3
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
## 1.3.2
* Decode into interface type with a struct value is supported [GH-187]
## 1.3.1
* Squash should only squash embedded structs. [GH-194]
## 1.3.0
* Added `",omitempty"` support. This will ignore zero values in the source
structure when encoding. [GH-145]
## 1.2.3
* Fix duplicate entries in Keys list with pointer values. [GH-185]
## 1.2.2
* Do not add unsettable (unexported) values to the unused metadata key
or "remain" value. [GH-150]
## 1.2.1
* Go modules checksum mismatch fix
## 1.2.0
* Added support to capture unused values in a field using the `",remain"` value
in the mapstructure tag. There is an example to showcase usage.
* Added `DecoderConfig` option to always squash embedded structs
* `json.Number` can decode into `uint` types
* Empty slices are preserved and not replaced with nil slices
* Fix panic that can occur in when decoding a map into a nil slice of structs
* Improved package documentation for godoc
## 1.1.2
* Fix error when decode hook decodes interface implementation into interface
type. [GH-140]
## 1.1.1
* Fix panic that can happen in `decodePtr`
## 1.1.0
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
* Support struct to struct decoding [GH-137]
* If source map value is nil, then destination map value is nil (instead of empty)
* If source slice value is nil, then destination slice value is nil (instead of empty)
* If source pointer is nil, then destination pointer is set to nil (instead of
allocated zero value of type)
## 1.0.0
* Initial tagged stable release.

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -0,0 +1,80 @@
# mapstructure
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
```shell
go get github.com/go-viper/mapstructure/v2
```
## Migrating from `github.com/mitchellh/mapstructure`
[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
The API is the same, so you don't need to change anything else.
Here is a script that can help you with the migration:
```shell
sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
```
If you need more time to migrate your code, that is absolutely fine.
Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
```shell
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
```
## Usage & Example
For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.
## Credits
Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
This is a maintained fork of the original library.
Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
## License
The project is licensed under the [MIT License](LICENSE).

@ -0,0 +1,630 @@
package mapstructure
import (
"encoding"
"errors"
"fmt"
"net"
"net/netip"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into a closure to be used directly
// if the type fails to convert we return a closure always erroring to keep the previous behaviour
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from.Type(), to.Type(), from.Interface())
}
case DecodeHookFuncKind:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from.Kind(), to.Kind(), from.Interface())
}
case DecodeHookFuncValue:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from, to)
}
default:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return nil, errors.New("invalid decode hook signature")
}
}
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value,
) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), from.Interface())
case DecodeHookFuncValue:
return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs))
for _, f := range fs {
cached = append(cached, cachedDecodeHook(f))
}
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
data := f.Interface()
newFrom := f
for _, c := range cached {
data, err = c(newFrom, t)
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
}
return data, nil
}
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff))
for _, f := range ff {
cached = append(cached, cachedDecodeHook(f))
}
return func(a, b reflect.Value) (interface{}, error) {
var allErrs string
var out interface{}
var err error
for _, c := range cached {
out, err = c(a, b)
if err != nil {
allErrs += err.Error() + "\n"
continue
}
return out, nil
}
return nil, errors.New(allErrs)
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.SliceOf(f) {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
// StringToURLHookFunc returns a DecodeHookFunc that converts
// strings to *url.URL.
func StringToURLHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(&url.URL{}) {
return data, nil
}
// Convert it by parsing
return url.Parse(data.(string))
}
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{},
) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
}
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
// strings to the UnmarshalText function, when the target type
// implements the encoding.TextUnmarshaler interface
func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
result := reflect.New(t).Interface()
unmarshaller, ok := result.(encoding.TextUnmarshaler)
if !ok {
return data, nil
}
str, ok := data.(string)
if !ok {
str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
}
if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
return nil, err
}
return result, nil
}
}
// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
// strings to netip.Addr.
func StringToNetIPAddrHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.Addr{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddr(data.(string))
}
}
// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
// strings to netip.AddrPort.
func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.AddrPort{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddrPort(data.(string))
}
}
// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
// strings to basic types.
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
func StringToBasicTypeHookFunc() DecodeHookFunc {
return ComposeDecodeHookFunc(
StringToInt8HookFunc(),
StringToUint8HookFunc(),
StringToInt16HookFunc(),
StringToUint16HookFunc(),
StringToInt32HookFunc(),
StringToUint32HookFunc(),
StringToInt64HookFunc(),
StringToUint64HookFunc(),
StringToIntHookFunc(),
StringToUintHookFunc(),
StringToFloat32HookFunc(),
StringToFloat64HookFunc(),
StringToBoolHookFunc(),
// byte and rune are aliases for uint8 and int32 respectively
// StringToByteHookFunc(),
// StringToRuneHookFunc(),
StringToComplex64HookFunc(),
StringToComplex128HookFunc(),
)
}
// StringToInt8HookFunc returns a DecodeHookFunc that converts
// strings to int8.
func StringToInt8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 8)
return int8(i64), err
}
}
// StringToUint8HookFunc returns a DecodeHookFunc that converts
// strings to uint8.
func StringToUint8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 8)
return uint8(u64), err
}
}
// StringToInt16HookFunc returns a DecodeHookFunc that converts
// strings to int16.
func StringToInt16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 16)
return int16(i64), err
}
}
// StringToUint16HookFunc returns a DecodeHookFunc that converts
// strings to uint16.
func StringToUint16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 16)
return uint16(u64), err
}
}
// StringToInt32HookFunc returns a DecodeHookFunc that converts
// strings to int32.
func StringToInt32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 32)
return int32(i64), err
}
}
// StringToUint32HookFunc returns a DecodeHookFunc that converts
// strings to uint32.
func StringToUint32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 32)
return uint32(u64), err
}
}
// StringToInt64HookFunc returns a DecodeHookFunc that converts
// strings to int64.
func StringToInt64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseInt(data.(string), 0, 64)
}
}
// StringToUint64HookFunc returns a DecodeHookFunc that converts
// strings to uint64.
func StringToUint64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseUint(data.(string), 0, 64)
}
}
// StringToIntHookFunc returns a DecodeHookFunc that converts
// strings to int.
func StringToIntHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 0)
return int(i64), err
}
}
// StringToUintHookFunc returns a DecodeHookFunc that converts
// strings to uint.
func StringToUintHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 0)
return uint(u64), err
}
}
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
// strings to float32.
func StringToFloat32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
return data, nil
}
// Convert it by parsing
f64, err := strconv.ParseFloat(data.(string), 32)
return float32(f64), err
}
}
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
// strings to float64.
func StringToFloat64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseFloat(data.(string), 64)
}
}
// StringToBoolHookFunc returns a DecodeHookFunc that converts
// strings to bool.
func StringToBoolHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
return data, nil
}
// Convert it by parsing
return strconv.ParseBool(data.(string))
}
}
// StringToByteHookFunc returns a DecodeHookFunc that converts
// strings to byte.
func StringToByteHookFunc() DecodeHookFunc {
return StringToUint8HookFunc()
}
// StringToRuneHookFunc returns a DecodeHookFunc that converts
// strings to rune.
func StringToRuneHookFunc() DecodeHookFunc {
return StringToInt32HookFunc()
}
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
// strings to complex64.
func StringToComplex64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
return data, nil
}
// Convert it by parsing
c128, err := strconv.ParseComplex(data.(string), 64)
return complex64(c128), err
}
}
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
// strings to complex128.
func StringToComplex128HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
return data, nil
}
// Convert it by parsing
return strconv.ParseComplex(data.(string), 128)
}
}

@ -0,0 +1,472 @@
{
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "cachix",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1717245169,
"narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
"owner": "cachix",
"repo": "devenv",
"rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1717285511,
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1717284937,
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1717112898,
"narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

@ -0,0 +1,39 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
};
};
};
}

@ -0,0 +1,11 @@
package errors
import "errors"
func New(text string) error {
return errors.New(text)
}
func As(err error, target interface{}) bool {
return errors.As(err, target)
}

@ -0,0 +1,9 @@
//go:build go1.20
package errors
import "errors"
func Join(errs ...error) error {
return errors.Join(errs...)
}

@ -0,0 +1,61 @@
//go:build !go1.20
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
// Join returns an error that wraps the given errors.
// Any nil error values are discarded.
// Join returns nil if every value in errs is nil.
// The error formats as the concatenation of the strings obtained
// by calling the Error method of each element of errs, with a newline
// between each string.
//
// A non-nil error returned by Join implements the Unwrap() []error method.
func Join(errs ...error) error {
n := 0
for _, err := range errs {
if err != nil {
n++
}
}
if n == 0 {
return nil
}
e := &joinError{
errs: make([]error, 0, n),
}
for _, err := range errs {
if err != nil {
e.errs = append(e.errs, err)
}
}
return e
}
type joinError struct {
errs []error
}
func (e *joinError) Error() string {
// Since Join returns nil if every value in errs is nil,
// e.errs cannot be empty.
if len(e.errs) == 1 {
return e.errs[0].Error()
}
b := []byte(e.errs[0].Error())
for _, err := range e.errs[1:] {
b = append(b, '\n')
b = append(b, err.Error()...)
}
// At this point, b has at least one byte '\n'.
// return unsafe.String(&b[0], len(b))
return string(b)
}
func (e *joinError) Unwrap() []error {
return e.errs
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,44 @@
//go:build !go1.20
package mapstructure
import "reflect"
func isComparable(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Invalid:
return false
case reflect.Array:
switch v.Type().Elem().Kind() {
case reflect.Interface, reflect.Array, reflect.Struct:
for i := 0; i < v.Type().Len(); i++ {
// if !v.Index(i).Comparable() {
if !isComparable(v.Index(i)) {
return false
}
}
return true
}
return v.Type().Comparable()
case reflect.Interface:
// return v.Elem().Comparable()
return isComparable(v.Elem())
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
return false
// if !v.Field(i).Comparable() {
if !isComparable(v.Field(i)) {
return false
}
}
return true
default:
return v.Type().Comparable()
}
}

@ -0,0 +1,10 @@
//go:build go1.20
package mapstructure
import "reflect"
// TODO: remove once we drop support for Go <1.20
func isComparable(v reflect.Value) bool {
return v.Comparable()
}

@ -1,3 +1,29 @@
## v2.7.0 (2025-04-03)
* [GH-3306](https://github.com/gophercloud/gophercloud/pull/3306) [v2] identity: Add Get endpoint by ID
* [GH-3325](https://github.com/gophercloud/gophercloud/pull/3325) [v2] Switch to a version of gocovmerge compatible with go 1.22
* [GH-3327](https://github.com/gophercloud/gophercloud/pull/3327) Merge pull request #3209 from shiftstack/proper-service-discovery
* [GH-3328](https://github.com/gophercloud/gophercloud/pull/3328) [v2] Improve support for `network standard-attr-*` extensions
* [GH-3330](https://github.com/gophercloud/gophercloud/pull/3330) [v2] Enhance Snapshot struct and add ListDetail function in V3 blockstorage
* [GH-3333](https://github.com/gophercloud/gophercloud/pull/3333) [v2] vpnaas: add support for more ciphers (auth, encryption, pfs modes)
* [GH-3334](https://github.com/gophercloud/gophercloud/pull/3334) [v2] Added support for VIF's in Baremetal
* [GH-3335](https://github.com/gophercloud/gophercloud/pull/3335) [v2] Baremetal virtual media Get API
## v2.6.0 (2025-03-03)
* [GH-3309](https://github.com/gophercloud/gophercloud/pull/3309) Backport: Added support for hypervisor_hostname to v2
## v2.5.0 (2025-02-11)
* [GH-3278](https://github.com/gophercloud/gophercloud/pull/3278) [v2] test: Ensure that randomly created secgroup rules don't conflict
* [GH-3287](https://github.com/gophercloud/gophercloud/pull/3287) [v2] Fix panic in ExtractIntoStructPtr
* [GH-3288](https://github.com/gophercloud/gophercloud/pull/3288) [v2] Fix JSON field name hints in APIVersion structs
* [GH-3292](https://github.com/gophercloud/gophercloud/pull/3292) [v2] Add permissions to the label-issue workflow
* [GH-3294](https://github.com/gophercloud/gophercloud/pull/3294) [v2] Add support for zone sharing in DNS v2
* [GH-3296](https://github.com/gophercloud/gophercloud/pull/3296) build(deps): bump golang.org/x/crypto from 0.30.0 to 0.31.0
* [GH-3297](https://github.com/gophercloud/gophercloud/pull/3297) [v2] build(deps): bump golang.org/x/crypto from 0.31.0 to 0.32.0
* [GH-3298](https://github.com/gophercloud/gophercloud/pull/3298) [v2] build(deps): bump golang.org/x/crypto from 0.32.0 to 0.33.0
## v2.4.0 (2024-12-18)
* [GH-3270](https://github.com/gophercloud/gophercloud/pull/3270) [v2] SG rules: implement bulk create

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save