Merge branch 'main' into add-plugins-auto-update-feature

pull/104112/head
Hugo Oshiro 2 months ago
commit 777e0c6a98
  1. 23
      .betterer.eslint.config.js
  2. 150
      .betterer.results
  3. 1
      .github/CODEOWNERS
  4. 9
      .github/renovate.json5
  5. 30
      .github/workflows/pr-patch-check-event.yml
  6. 152
      CHANGELOG.md
  7. 29
      apps/advisor/pkg/app/app.go
  8. 3
      apps/advisor/pkg/app/checks/authchecks/check.go
  9. 3
      apps/advisor/pkg/app/checks/authchecks/list_format_validation.go
  10. 3
      apps/advisor/pkg/app/checks/authchecks/list_format_validation_test.go
  11. 20
      apps/advisor/pkg/app/checks/datasourcecheck/check.go
  12. 11
      apps/advisor/pkg/app/checks/datasourcecheck/check_test.go
  13. 3
      apps/advisor/pkg/app/checks/ifaces.go
  14. 7
      apps/advisor/pkg/app/checks/plugincheck/check.go
  15. 3
      apps/advisor/pkg/app/checks/plugincheck/check_test.go
  16. 34
      apps/advisor/pkg/app/checkscheduler/checkscheduler.go
  17. 30
      apps/advisor/pkg/app/checkscheduler/checkscheduler_test.go
  18. 33
      apps/advisor/pkg/app/checktyperegisterer/checktyperegisterer.go
  19. 6
      apps/advisor/pkg/app/checktyperegisterer/checktyperegisterer_test.go
  20. 14
      apps/advisor/pkg/app/utils.go
  21. 19
      apps/advisor/pkg/app/utils_test.go
  22. 186
      docs/sources/datasources/prometheus/_index.md
  23. 200
      docs/sources/datasources/prometheus/configure-prometheus-data-source.md
  24. 300
      docs/sources/datasources/prometheus/configure/_index.md
  25. 238
      docs/sources/datasources/prometheus/query-editor/_index.md
  26. 255
      docs/sources/datasources/prometheus/query-editor/index.md
  27. 64
      docs/sources/datasources/prometheus/template-variables/_index.md
  28. 73
      docs/sources/explore/simplified-exploration/metrics/index.md
  29. 57
      docs/sources/observability-as-code/_index.md
  30. 2
      docs/sources/observability-as-code/foundation-sdk/_index.md
  31. 74
      docs/sources/observability-as-code/get-started.md
  32. 5
      docs/sources/observability-as-code/grafana-cli/_index.md
  33. 5
      docs/sources/observability-as-code/grafana-cli/grafanacli-workflows.md
  34. 5
      docs/sources/observability-as-code/grafana-cli/install-grafana-cli.md
  35. 5
      docs/sources/observability-as-code/grafana-cli/set-up-grafana-cli.md
  36. 6
      docs/sources/observability-as-code/provision-resources/_index.md
  37. 9
      docs/sources/observability-as-code/provision-resources/file-path-setup.md
  38. 9
      docs/sources/observability-as-code/provision-resources/git-sync-setup.md
  39. 9
      docs/sources/observability-as-code/provision-resources/intro-git-sync.md
  40. 9
      docs/sources/observability-as-code/provision-resources/provisioned-dashboards.md
  41. 9
      docs/sources/observability-as-code/provision-resources/use-git-sync.md
  42. 4
      docs/sources/observability-as-code/schema-v2/_index.md
  43. 206
      docs/sources/panels-visualizations/query-transform-data/sql-expressions/index.md
  44. 232
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/_index.md
  45. 62
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-org-mapping/_index.md
  46. 77
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-signing-encryption/_index.md
  47. 20
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-single-logout/_index.md
  48. 104
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-team-role-mapping/_index.md
  49. 126
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-azuread/_index.md
  50. 51
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-okta/_index.md
  51. 797
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/index.md
  52. 113
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/saml-configuration-options/_index.md
  53. 58
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/saml-ui/_index.md
  54. 111
      docs/sources/setup-grafana/configure-security/configure-authentication/saml/troubleshoot-saml/_index.md
  55. 25
      docs/sources/shared/upgrade/intro_2.md
  56. 82
      docs/sources/upgrade-guide/upgrade-v12.0/index.md
  57. 4
      docs/sources/whatsnew/_index.md
  58. 95
      docs/sources/whatsnew/whats-new-in-v12-0.md
  59. 24
      e2e/dashboards-edit-v2-suite/dashboard-edit-flows.ts
  60. 60
      e2e/dashboards-edit-v2-suite/dashboards-edit-variables.spec.ts
  61. 7
      e2e/dashboards-suite/dashboard-templating.spec.ts
  62. 24
      go.mod
  63. 49
      go.sum
  64. 8
      go.work.sum
  65. 4
      packages/grafana-data/src/types/featureToggles.gen.ts
  66. 6
      packages/grafana-ui/src/components/Table/TableNG/Cells/HeaderCell.tsx
  67. 4
      packages/grafana-ui/src/components/Table/TableNG/Cells/SparklineCell.tsx
  68. 19
      packages/grafana-ui/src/components/Table/TableNG/Cells/TableCellNG.tsx
  69. 4
      packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts
  70. 101
      packages/grafana-ui/src/components/Table/TableNG/TableNG.tsx
  71. 2
      packages/grafana-ui/src/components/Table/TableNG/types.ts
  72. 50
      packages/grafana-ui/src/components/Table/TableNG/utils.ts
  73. 13
      packages/grafana-ui/src/utils/featureToggle.ts
  74. 6
      pkg/plugins/backendplugin/grpcplugin/client_proto.go
  75. 28
      pkg/registry/apis/secret/encryption/cipher/cipher.go
  76. 14
      pkg/registry/apis/secret/encryption/cipher/provider/aes256.go
  77. 44
      pkg/registry/apis/secret/encryption/cipher/provider/aes256_test.go
  78. 118
      pkg/registry/apis/secret/encryption/cipher/provider/cipher_aesgcm.go
  79. 144
      pkg/registry/apis/secret/encryption/cipher/provider/cipher_aesgcm_test.go
  80. 52
      pkg/registry/apis/secret/encryption/cipher/provider/decipher_aescfb.go
  81. 70
      pkg/registry/apis/secret/encryption/cipher/provider/decipher_aescfb_test.go
  82. 7
      pkg/registry/apis/secret/encryption/cipher/provider/errors.go
  83. 18
      pkg/registry/apis/secret/encryption/cipher/provider/provider.go
  84. 17
      pkg/registry/apis/secret/encryption/cipher/provider/provider_test.go
  85. 35
      pkg/registry/apis/secret/encryption/cipher/provider/test_fixtures/aescfb_encrypt_correct_output.rb
  86. 38
      pkg/registry/apis/secret/encryption/cipher/provider/test_fixtures/aesgcm_encrypt_correct_output.rb
  87. 199
      pkg/registry/apis/secret/encryption/cipher/service/service.go
  88. 78
      pkg/registry/apis/secret/encryption/cipher/service/service_test.go
  89. 3
      pkg/registry/apis/secret/encryption/secrets.go
  90. 9
      pkg/services/featuremgmt/registry.go
  91. 1
      pkg/services/featuremgmt/toggles_gen.csv
  92. 4
      pkg/services/featuremgmt/toggles_gen.go
  93. 636
      pkg/services/featuremgmt/toggles_gen.json
  94. 139
      pkg/services/folder/folderimpl/conversions.go
  95. 19
      pkg/services/folder/folderimpl/conversions_test.go
  96. 21
      pkg/services/folder/folderimpl/folder.go
  97. 11
      pkg/services/folder/folderimpl/folder_unifiedstorage.go
  98. 54
      pkg/services/folder/folderimpl/folder_unifiedstorage_test.go
  99. 4
      pkg/services/folder/folderimpl/unifiedstore.go
  100. 2
      pkg/services/live/runstream/manager.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -108,6 +108,29 @@ module.exports = [
'@typescript-eslint/consistent-type-assertions': ['error', { assertionStyle: 'never' }],
},
},
{
files: ['**/*.{js,jsx,ts,tsx}'],
ignores: [
'**/*.{test,spec}.{ts,tsx}',
'**/__mocks__/**',
'**/public/test/**',
'**/mocks.{ts,tsx}',
'**/spec/**/*.{ts,tsx}',
],
rules: {
'no-restricted-syntax': [
'error',
{
selector: 'Identifier[name=localStorage]',
message: 'Direct usage of localStorage is not allowed. Use `Store` from @grafana/data instead.',
},
{
selector: 'MemberExpression[object.name=localStorage]',
message: 'Direct usage of localStorage is not allowed. Use `Store` from @grafana/data instead.',
},
],
},
},
{
files: ['public/app/**/*.{ts,tsx}'],
rules: {

@ -11,9 +11,17 @@ exports[`better eslint`] = {
"apps/dashboard/tshack/v1alpha1_spec_gen.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"e2e/old-arch/utils/support/localStorage.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"e2e/old-arch/utils/support/types.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"e2e/utils/support/localStorage.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"e2e/utils/support/types.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
@ -360,6 +368,14 @@ exports[`better eslint`] = {
"packages/grafana-data/src/utils/location.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"packages/grafana-data/src/utils/store.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"]
],
"packages/grafana-data/src/utils/url.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Do not use any type assertions.", "1"],
@ -404,6 +420,14 @@ exports[`better eslint`] = {
"packages/grafana-prometheus/src/components/PromQueryField.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"packages/grafana-prometheus/src/components/metrics-browser/useMetricsLabelsValues.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"]
],
"packages/grafana-prometheus/src/datasource.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"],
@ -452,11 +476,12 @@ exports[`better eslint`] = {
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"packages/grafana-runtime/src/config.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Do not use any type assertions.", "1"],
[0, 0, 0, "Do not use any type assertions.", "2"],
[0, 0, 0, "Unexpected any. Specify a different type.", "3"],
[0, 0, 0, "Unexpected any. Specify a different type.", "4"]
[0, 0, 0, "Do not use any type assertions.", "3"],
[0, 0, 0, "Unexpected any. Specify a different type.", "4"],
[0, 0, 0, "Unexpected any. Specify a different type.", "5"]
],
"packages/grafana-runtime/src/services/EchoSrv.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
@ -501,6 +526,20 @@ exports[`better eslint`] = {
"packages/grafana-runtime/src/utils/queryResponse.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"packages/grafana-runtime/src/utils/userStorage.tsx:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "6"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "7"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "8"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "9"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "10"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "11"]
],
"packages/grafana-schema/src/veneer/common.types.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
@ -783,6 +822,9 @@ exports[`better eslint`] = {
"packages/grafana-ui/src/utils/dom.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"packages/grafana-ui/src/utils/logger.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"]
],
"packages/grafana-ui/src/utils/useAsyncDependency.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
@ -838,6 +880,11 @@ exports[`better eslint`] = {
"public/app/core/navigation/types.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"public/app/core/reducers/appNotification.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"]
],
"public/app/core/services/ResponseQueue.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
@ -990,9 +1037,7 @@ exports[`better eslint`] = {
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"],
[0, 0, 0, "Unexpected any. Specify a different type.", "2"],
[0, 0, 0, "Unexpected any. Specify a different type.", "3"],
[0, 0, 0, "Unexpected any. Specify a different type.", "4"],
[0, 0, 0, "Unexpected any. Specify a different type.", "5"]
[0, 0, 0, "Unexpected any. Specify a different type.", "3"]
],
"public/app/features/alerting/unified/components/receivers/form/fields/SubformArrayField.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
@ -1017,12 +1062,28 @@ exports[`better eslint`] = {
"public/app/features/alerting/unified/components/rule-editor/RuleInspector.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/alerting/unified/components/rule-editor/alert-rule-form/AlertRuleForm.tsx:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "6"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "7"]
],
"public/app/features/alerting/unified/components/silences/SilencesEditor.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/alerting/unified/components/silences/SilencesFilter.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/alerting/unified/featureToggles.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"]
],
"public/app/features/alerting/unified/hooks/useAlertmanagerConfig.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
@ -1032,9 +1093,16 @@ exports[`better eslint`] = {
"public/app/features/alerting/unified/insights/InsightsMenuButton.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/alerting/unified/rule-editor/formDefaults.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"]
],
"public/app/features/alerting/unified/types/receiver-form.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"]
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"public/app/features/alerting/unified/utils/misc.test.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
@ -1077,6 +1145,10 @@ exports[`better eslint`] = {
"public/app/features/auth-config/utils/data.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/browse-dashboards/api/useRecentlyDeletedStateManager.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"public/app/features/browse-dashboards/state/index.ts:5381": [
[0, 0, 0, "Do not use export all (\`export * from ...\`)", "0"],
[0, 0, 0, "Do not use export all (\`export * from ...\`)", "1"],
@ -1778,7 +1850,13 @@ exports[`better eslint`] = {
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/logs/utils.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"],
[0, 0, 0, "Do not use any type assertions.", "6"]
],
"public/app/features/manage-dashboards/components/ImportDashboardLibraryPanelsList.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
@ -1949,6 +2027,12 @@ exports[`better eslint`] = {
[0, 0, 0, "Unexpected any. Specify a different type.", "1"],
[0, 0, 0, "Unexpected any. Specify a different type.", "2"]
],
"public/app/features/scopes/selector/ScopesSelectorService.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"]
],
"public/app/features/search/page/components/columns.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
@ -1964,13 +2048,32 @@ exports[`better eslint`] = {
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/search/state/SearchStateManager.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "6"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "7"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "8"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "9"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "10"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "11"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "12"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "13"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "14"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "15"],
[0, 0, 0, "Do not use any type assertions.", "16"]
],
"public/app/features/search/types.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"public/app/features/search/utils.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Do not use any type assertions.", "3"]
],
"public/app/features/serviceaccounts/state/reducers.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
@ -2742,6 +2845,23 @@ exports[`better eslint`] = {
"public/app/plugins/datasource/loki/LanguageProvider.ts:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"public/app/plugins/datasource/loki/LogContextProvider.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"public/app/plugins/datasource/loki/components/LokiContextUi.tsx:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "2"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "3"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "4"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "5"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "6"]
],
"public/app/plugins/datasource/loki/components/LokiQueryEditor.tsx:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"public/app/plugins/datasource/loki/configuration/ConfigEditor.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
@ -2752,6 +2872,14 @@ exports[`better eslint`] = {
"public/app/plugins/datasource/loki/querybuilder/components/LokiQueryBuilder.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/plugins/datasource/loki/querybuilder/state.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"public/app/plugins/datasource/loki/shardQuerySplitting.ts:5381": [
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "0"],
[0, 0, 0, "Direct usage of localStorage is not allowed. Use \`Store\` from @grafana/data instead.", "1"]
],
"public/app/plugins/datasource/loki/types.ts:5381": [
[0, 0, 0, "Do not re-export imported variable (\`LokiQueryDirection\`)", "0"],
[0, 0, 0, "Do not re-export imported variable (\`LokiQueryType\`)", "1"],

@ -49,6 +49,7 @@
/docs/sources/developers/plugins/ @grafana/plugins-platform-frontend @grafana/plugins-platform-backend
/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md @imatwawana @baldm0mma
/docs/sources/panels-visualizations/query-transform-data/sql-expressions/index.md @lwandz13 @irenerl24
# END Technical documentation
# Backend code

@ -2,6 +2,15 @@
extends: ["config:recommended"],
enabledManagers: ["npm"],
ignoreDeps: [
// ignoring these until we can upgrade to react 19
// see epic here: https://github.com/grafana/grafana/issues/98813
'@types/react',
'@types/react-dom',
'eslint-plugin-react-hooks',
'react',
'react-dom',
'react-refresh',
"@types/history", // this can be removed entirely when we upgrade history since v5 exposes types directly
"history", // we should bump this together with react-router-dom (see https://github.com/grafana/grafana/issues/76744)
"react-router", // we should bump this together with history and react-router-dom

@ -1,22 +1,14 @@
# Owned by grafana-delivery-squad
# Intended to be dropped into the base repo Ex: grafana/grafana
name: Dispatch check for patch conflicts
run-name: dispatch-check-patch-conflicts-${{ github.base_ref }}-${{ github.head_ref }}
on:
pull_request_target:
types:
- opened
- reopened
- synchronize
branches:
push:
branches-ignore:
- "main"
- "v*.*.*"
- "release-*"
- "release-*.*.*"
tags-ignore:
- "*"
permissions: {}
# Since this is run on a pull request, we want to apply the patches intended for the
# target branch onto the source branch, to verify compatibility before merging.
jobs:
dispatch-job:
permissions:
@ -24,13 +16,11 @@ jobs:
contents: read
actions: write
env:
HEAD_REF: ${{ github.head_ref }}
BASE_REF: ${{ github.base_ref }}
REPO: ${{ github.repository }}
SENDER: ${{ github.event.sender.login }}
SHA: ${{ github.sha }}
PR_COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
runs-on: ubuntu-latest
if: github.repository == 'grafana/grafana'
steps:
- name: "Get vault secrets"
id: vault-secrets
@ -51,7 +41,7 @@ jobs:
with:
github-token: ${{ steps.generate_token.outputs.token }}
script: |
const {HEAD_REF, BASE_REF, REPO, SENDER, SHA, PR_COMMIT_SHA} = process.env;
const {REPO, SENDER, SHA} = process.env;
await github.rest.actions.createWorkflowDispatch({
owner: 'grafana',
@ -60,11 +50,11 @@ jobs:
ref: 'main',
inputs: {
src_repo: REPO,
src_ref: HEAD_REF,
src_ref: 'main',
src_merge_sha: SHA,
src_pr_commit_sha: PR_COMMIT_SHA,
src_pr_commit_sha: SHA,
patch_repo: REPO + '-security-patches',
patch_ref: BASE_REF,
patch_ref: 'main',
triggering_github_handle: SENDER
}
})

@ -1,3 +1,155 @@
<!-- 12.0.0 START -->
# 12.0.0 (2025-05-05)
### Features and enhancements
- **Alerting:** API to convert submitted Prometheus rules to GMA [#102231](https://github.com/grafana/grafana/pull/102231), [@fayzal-g](https://github.com/fayzal-g)
- **Alerting:** Add HMAC signature config to the webhook integration [#100960](https://github.com/grafana/grafana/pull/100960), [@alexander-akhmetov](https://github.com/alexander-akhmetov)
- **Alerting:** Add MissingSeriesEvalsToResolve to the APIs [#102150](https://github.com/grafana/grafana/pull/102150), [@alexander-akhmetov](https://github.com/alexander-akhmetov)
- **Alerting:** Add UI migration feature toggle [#102217](https://github.com/grafana/grafana/pull/102217), [@tomratcliffe](https://github.com/tomratcliffe)
- **Alerting:** Add backend support for keep_firing_for [#100750](https://github.com/grafana/grafana/pull/100750), [@alexander-akhmetov](https://github.com/alexander-akhmetov)
- **Alerting:** Add details and edit pages for groups [#100884](https://github.com/grafana/grafana/pull/100884), [@konrad147](https://github.com/konrad147)
- **Alerting:** Add keep_firing_for and Recovering state [#103248](https://github.com/grafana/grafana/pull/103248), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Add migration to clean up rule versions table [#102484](https://github.com/grafana/grafana/pull/102484), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Add missing_series_evals_to_resolve option to alert rule form [#102808](https://github.com/grafana/grafana/pull/102808), [@tomratcliffe](https://github.com/tomratcliffe)
- **Alerting:** Delete permanently deleted alert rules. [#102960](https://github.com/grafana/grafana/pull/102960), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Detect target folder rules and show warning [#103673](https://github.com/grafana/grafana/pull/103673), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Migration UI [#102010](https://github.com/grafana/grafana/pull/102010), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Recover deleted alert rules [#101869](https://github.com/grafana/grafana/pull/101869), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Remove constraints for uniqueness of rule title [#102067](https://github.com/grafana/grafana/pull/102067), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Remove feature flag `alertingNoDataErrorExecution` [#102156](https://github.com/grafana/grafana/pull/102156), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Sequential evaluation of rules in group [#98829](https://github.com/grafana/grafana/pull/98829), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Skip rules that are managed by plugins when importing datasource-managed rules [#103573](https://github.com/grafana/grafana/pull/103573), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Stop allowing manual editing/restore of internal AM config via settings [#103884](https://github.com/grafana/grafana/pull/103884), [@tomratcliffe](https://github.com/tomratcliffe)
- **Alerting:** Template preview enhancements [#103817](https://github.com/grafana/grafana/pull/103817), [@JacobsonMT](https://github.com/JacobsonMT)
- **Alerting:** Update alerting module to 58ba6c617ff05eb1d6f65c59d369a6a16923dff6 [#102812](https://github.com/grafana/grafana/pull/102812), [@yuri-tceretian](https://github.com/yuri-tceretian)
- **Alerting:** Use 'Grafana IRM' wording in alerting contact point [#102014](https://github.com/grafana/grafana/pull/102014), [@brojd](https://github.com/brojd)
- **Alerting:** Webhook Improvements - Templateable Payloads [#103818](https://github.com/grafana/grafana/pull/103818), [@JacobsonMT](https://github.com/JacobsonMT)
- **AppChrome:** Move kiosk button into profile menu [#103600](https://github.com/grafana/grafana/pull/103600), [@torkelo](https://github.com/torkelo)
- **AppPlatform:** Introduce experimental Github integration for dashboard configuration management [#96329](https://github.com/grafana/grafana/pull/96329), [@MissingRoberto](https://github.com/MissingRoberto)
- **Authorization:** Add group to role DisplayName to make filtered list more clear [#102950](https://github.com/grafana/grafana/pull/102950), [@forsethc](https://github.com/forsethc)
- **Azure Monitor:** Add logs query builder [#99055](https://github.com/grafana/grafana/pull/99055), [@alyssabull](https://github.com/alyssabull)
- **Azure:** Mark Azure Prometheus exemplars as GA and enable by default [#100595](https://github.com/grafana/grafana/pull/100595), [@aangelisc](https://github.com/aangelisc)
- **AzureMonitor:** Improve selection of Basic Logs tables in the query builder [#103820](https://github.com/grafana/grafana/pull/103820), [@aangelisc](https://github.com/aangelisc)
- **BrowseDashboards:** Switch to list view if sort is set [#102196](https://github.com/grafana/grafana/pull/102196), [@Clarity-89](https://github.com/Clarity-89)
- **Checkbox:** Add z-index to description [#103847](https://github.com/grafana/grafana/pull/103847), [@Clarity-89](https://github.com/Clarity-89)
- **Chore:** Promoting feature toggle pluginsSriChecks GA [#102212](https://github.com/grafana/grafana/pull/102212), [@tolzhabayev](https://github.com/tolzhabayev)
- **CloudMigrations:** Add sorting and error filtering to Snapshot Results backend [#102753](https://github.com/grafana/grafana/pull/102753), [@mmandrus](https://github.com/mmandrus)
- **CloudMigrations:** Change onPremToCloudMigrations feature toggle to GA [#103212](https://github.com/grafana/grafana/pull/103212), [@dana-axinte](https://github.com/dana-axinte)
- **CloudMigrations:** Enable high-level resource type selection [#103011](https://github.com/grafana/grafana/pull/103011), [@macabu](https://github.com/macabu)
- **CloudMigrations:** Implement table sorting in the UI [#103061](https://github.com/grafana/grafana/pull/103061), [@mmandrus](https://github.com/mmandrus)
- **CloudWatch:** Migrate to aws-sdk-go-v2 [#103106](https://github.com/grafana/grafana/pull/103106), [@njvrzm](https://github.com/njvrzm)
- **Cloudwatch:** Do not parse log query grouping field to float [#102244](https://github.com/grafana/grafana/pull/102244), [@iwysiu](https://github.com/iwysiu)
- **Cloudwatch:** Migrate to aws-sdk-go-v2 [#99643](https://github.com/grafana/grafana/pull/99643), [@njvrzm](https://github.com/njvrzm)
- **Cloudwatch:** Revert aws sdk go v2 [#103644](https://github.com/grafana/grafana/pull/103644), [@iwysiu](https://github.com/iwysiu)
- **Config:** Removes setting `viewers_can_edit` [#102275](https://github.com/grafana/grafana/pull/102275), [@eleijonmarck](https://github.com/eleijonmarck)
- **Dashboard Restore:** Remove experimental functionality under feature flag `dashboardRestore` for now - this will be reworked [#103204](https://github.com/grafana/grafana/pull/103204), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **Dashboards:** Add Dashboard Schema validation (1) [#103662](https://github.com/grafana/grafana/pull/103662), [@marcoabreu](https://github.com/marcoabreu)
- **Dashboards:** Add a config setting that limits the number of series that will be displayed in a panel. Users can opt in to render all series. [#103405](https://github.com/grafana/grafana/pull/103405), [@oscarkilhed](https://github.com/oscarkilhed)
- **Dashboards:** Prevent saving to a non-existent folder [#103503](https://github.com/grafana/grafana/pull/103503), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **Dashboards:** Prevent version restore to same data [#102665](https://github.com/grafana/grafana/pull/102665), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **Dependencies:** Bump github.com/redis/go-redis/v9 from 9.7.0 to 9.7.3 [#102555](https://github.com/grafana/grafana/pull/102555), [@dependabot[bot]](https://github.com/dependabot[bot])
- **Docs:** Standard Datetime units limited to millisecond precision [#103610](https://github.com/grafana/grafana/pull/103610), [@axelavargas](https://github.com/axelavargas)
- **ElasticSearch:** Improve index pattern error messaging and docs [#103899](https://github.com/grafana/grafana/pull/103899), [@idastambuk](https://github.com/idastambuk)
- **ElasticSearch:** Make script field input a text area [#103708](https://github.com/grafana/grafana/pull/103708), [@idastambuk](https://github.com/idastambuk)
- **Extensions:** Expose new observable APIs for accessing components and links [#103063](https://github.com/grafana/grafana/pull/103063), [@leventebalogh](https://github.com/leventebalogh)
- **Feat:** Make expressions work with plugins that set `alerting:false` but `backend:true` in their `plugin.json` files [#102232](https://github.com/grafana/grafana/pull/102232), [@tolzhabayev](https://github.com/tolzhabayev)
- **FlameGraphPanel:** Add units to standard options (#89815) [#102720](https://github.com/grafana/grafana/pull/102720), [@snyderdan](https://github.com/snyderdan)
- **Frontend:** Remove Angular [#99760](https://github.com/grafana/grafana/pull/99760), [@jackw](https://github.com/jackw)
- **Go:** Bump to 1.24.2 [#103521](https://github.com/grafana/grafana/pull/103521), [@Proximyst](https://github.com/Proximyst)
- **Go:** Bump to 1.24.2 (Enterprise)
- **I18n:** Add 13 new languages for translations [#102971](https://github.com/grafana/grafana/pull/102971), [@joshhunt](https://github.com/joshhunt)
- **Influx:** Support PDC for Influx SQL [#103032](https://github.com/grafana/grafana/pull/103032), [@aangelisc](https://github.com/aangelisc)
- **JWT:** Add org role mapping support to the JWT provider [#101584](https://github.com/grafana/grafana/pull/101584), [@QuentinBisson](https://github.com/QuentinBisson)
- **K8s:** Dashboards: Add fine grained access control checks to /apis [#104418](https://github.com/grafana/grafana/pull/104418), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **K8s:** Enable kubernetesClientDashboardsFolders by default [#103843](https://github.com/grafana/grafana/pull/103843), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **LBAC for data sources:** PublicPreview and self serve enablement [#102276](https://github.com/grafana/grafana/pull/102276), [@eleijonmarck](https://github.com/eleijonmarck)
- **Live:** Remove queryOverLive and live-service-web-worker experimental feature flags [#103518](https://github.com/grafana/grafana/pull/103518), [@ryantxu](https://github.com/ryantxu)
- **Logs Panel:** Add ISO8601 date to log download files [#102932](https://github.com/grafana/grafana/pull/102932), [@gtk-grafana](https://github.com/gtk-grafana)
- **Logs Table:** Add new Controls component to Explore [#103467](https://github.com/grafana/grafana/pull/103467), [@matyax](https://github.com/matyax)
- **Logs:** Add new Controls component to Explore [#103401](https://github.com/grafana/grafana/pull/103401), [@matyax](https://github.com/matyax)
- **Logs:** Always keep displayed fields with changed queries [#102493](https://github.com/grafana/grafana/pull/102493), [@svennergr](https://github.com/svennergr)
- **Logs:** Clean up Explore meta information [#103801](https://github.com/grafana/grafana/pull/103801), [@matyax](https://github.com/matyax)
- **Logs:** Prevent automatic scrolling on refresh after changing scroll position [#102463](https://github.com/grafana/grafana/pull/102463), [@matyax](https://github.com/matyax)
- **MetricsDrilldown:** Advance `exploreMetricsUseExternalAppPlugin` feature toggle stage [#102137](https://github.com/grafana/grafana/pull/102137), [@NWRichmond](https://github.com/NWRichmond)
- **MetricsDrilldown:** Advance `exploreMetricsUseExternalAppPlugin` to GA [#103653](https://github.com/grafana/grafana/pull/103653), [@NWRichmond](https://github.com/NWRichmond)
- **MetricsDrilldown:** Mark `exploreMetricsUseExternalAppPlugin` as not frontend-only [#102942](https://github.com/grafana/grafana/pull/102942), [@NWRichmond](https://github.com/NWRichmond)
- **MetricsDrilldown:** Remove legacy Metrics Drilldown code paths [#103845](https://github.com/grafana/grafana/pull/103845), [@NWRichmond](https://github.com/NWRichmond)
- **MetricsDrilldown:** Restore link to Metrics Drilldown from Explore [#104075](https://github.com/grafana/grafana/pull/104075), [@NWRichmond](https://github.com/NWRichmond)
- **NodeGraph:** Add node graph algorithm layout option [#102760](https://github.com/grafana/grafana/pull/102760), [@joey-grafana](https://github.com/joey-grafana)
- **Plugins:** Remove plugin dependency version (Enterprise)
- **Plugins:** Remove sort by options from plugins catalog [#102862](https://github.com/grafana/grafana/pull/102862), [@oshirohugo](https://github.com/oshirohugo)
- **Plugins:** Remove support for secrets manager plugins [#101467](https://github.com/grafana/grafana/pull/101467), [@wbrowne](https://github.com/wbrowne)
- **Plugins:** Remove support for secrets manager plugins (Enterprise)
- **Plugins:** Remove userStorageAPI feature toggle [#102915](https://github.com/grafana/grafana/pull/102915), [@oshirohugo](https://github.com/oshirohugo)
- **Prometheus:** Add back @lezer/highlight to dev dependency [#102632](https://github.com/grafana/grafana/pull/102632), [@idastambuk](https://github.com/idastambuk)
- **Prometheus:** Add support for cloud partners Prometheus data sources [#103482](https://github.com/grafana/grafana/pull/103482), [@kevinwcyu](https://github.com/kevinwcyu)
- **Prometheus:** Enable Combobox metric select by default [#101045](https://github.com/grafana/grafana/pull/101045), [@joshhunt](https://github.com/joshhunt)
- **Prometheus:** Enable prometheusRunQueriesInParallel feature toggle by default [#102127](https://github.com/grafana/grafana/pull/102127), [@itsmylife](https://github.com/itsmylife)
- **RecordedQueries:** Deprecate recorded queries UI messaging (Enterprise)
- **Security:** Update JWT library (CVE-2025-30204) [#102715](https://github.com/grafana/grafana/pull/102715), [@Proximyst](https://github.com/Proximyst)
- **Tempo:** Add support for ad-hoc filters [#102448](https://github.com/grafana/grafana/pull/102448), [@ifrost](https://github.com/ifrost)
- **Tempo:** Remove aggregate by [#98474](https://github.com/grafana/grafana/pull/98474), [@joey-grafana](https://github.com/joey-grafana)
- **TraceView:** Add scope attributes to span details [#103173](https://github.com/grafana/grafana/pull/103173), [@joey-grafana](https://github.com/joey-grafana)
- **TraceView:** Render all links in span details [#101881](https://github.com/grafana/grafana/pull/101881), [@ifrost](https://github.com/ifrost)
- **Traces:** Preinstall Traces Drilldown app with Grafana [#102986](https://github.com/grafana/grafana/pull/102986), [@ifrost](https://github.com/ifrost)
### Bug fixes
- **Alerting:** Fix Simple condition threshold inputs with negative values. [#102976](https://github.com/grafana/grafana/pull/102976), [@soniaAguilarPeiron](https://github.com/soniaAguilarPeiron)
- **Alerting:** Fix display of `Normal (Updated)` in alert history [#102476](https://github.com/grafana/grafana/pull/102476), [@tomratcliffe](https://github.com/tomratcliffe)
- **Alerting:** Fix rule instances table [#102290](https://github.com/grafana/grafana/pull/102290), [@konrad147](https://github.com/konrad147)
- **Alerting:** Make nested folders work in Alert List Panel [#103550](https://github.com/grafana/grafana/pull/103550), [@tomratcliffe](https://github.com/tomratcliffe)
- **Alerting:** Remove rule type switch for modified export mode [#102287](https://github.com/grafana/grafana/pull/102287), [@konrad147](https://github.com/konrad147)
- **Alerting:** Simplified alert rule toggle bug fixes [#102119](https://github.com/grafana/grafana/pull/102119), [@gillesdemey](https://github.com/gillesdemey)
- **Alertmanager:** Add Role-Based Access Control via reqAction Field [#101543](https://github.com/grafana/grafana/pull/101543), [@olegpixel](https://github.com/olegpixel)
- **App Platform:** Pin bleve to fix CVE-2022-31022 [#102513](https://github.com/grafana/grafana/pull/102513), [@Proximyst](https://github.com/Proximyst)
- **AppChrome/MegaMenu:** Fixes issue with default state being initialised to undocked [#103507](https://github.com/grafana/grafana/pull/103507), [@torkelo](https://github.com/torkelo)
- **AppTitle:** Fix overflowing text [#103583](https://github.com/grafana/grafana/pull/103583), [@tskarhed](https://github.com/tskarhed)
- **Azure:** Ensure basic logs queries are limited to a single resource [#103588](https://github.com/grafana/grafana/pull/103588), [@aangelisc](https://github.com/aangelisc)
- **CloudWatch:** Import new grafana-aws-sdk with PDC fix [#103249](https://github.com/grafana/grafana/pull/103249), [@njvrzm](https://github.com/njvrzm)
- **ColorPicker:** Fixed height when switching tabs [#103304](https://github.com/grafana/grafana/pull/103304), [@DanMPA](https://github.com/DanMPA)
- **Dashboard:** Fix Core Panel Migrations - table panel [#102146](https://github.com/grafana/grafana/pull/102146), [@axelavargas](https://github.com/axelavargas)
- **DashboardScenePage:** Correct slug in self referencing data links [#100048](https://github.com/grafana/grafana/pull/100048), [@Sergej-Vlasov](https://github.com/Sergej-Vlasov)
- **Dashboards:** Fix duplicate provisioning when errors occur on title-only based provisioning [#102249](https://github.com/grafana/grafana/pull/102249), [@stephaniehingtgen](https://github.com/stephaniehingtgen)
- **Dashboards:** Fix panel link to Grafana Metrics Drilldown [#103759](https://github.com/grafana/grafana/pull/103759), [@NWRichmond](https://github.com/NWRichmond)
- **Fix:** Change secure_json_data column data type to medium text only MYSQL [#102557](https://github.com/grafana/grafana/pull/102557), [@s4kh](https://github.com/s4kh)
- **GrafanaUI:** Prevent ToolbarButton from submitting form [#102228](https://github.com/grafana/grafana/pull/102228), [@kozhuhds](https://github.com/kozhuhds)
- **GrafanaUI:** Remove blurred background from overlay backdrops to improve performance [#103563](https://github.com/grafana/grafana/pull/103563), [@joshhunt](https://github.com/joshhunt)
- **LDAP test:** Fix page crash [#102587](https://github.com/grafana/grafana/pull/102587), [@ashharrison90](https://github.com/ashharrison90)
- **Navigation:** Fix bookmarks when Grafana is running under subpath [#102679](https://github.com/grafana/grafana/pull/102679), [@matejkubinec](https://github.com/matejkubinec)
- **PanelEdit:** Fixes suggestions not applying options or field config [#102675](https://github.com/grafana/grafana/pull/102675), [@torkelo](https://github.com/torkelo)
- **PluginProxy:** Fix nil pointer in OAuth forwarding [#103626](https://github.com/grafana/grafana/pull/103626), [@moustafab](https://github.com/moustafab)
- **Plugins:** Fix better UX for disabled Angular plugins [#101333](https://github.com/grafana/grafana/pull/101333), [@hugohaggmark](https://github.com/hugohaggmark)
- **Plugins:** Fix support for adhoc filters with raw queries in InfluxDB [#101966](https://github.com/grafana/grafana/pull/101966), [@beejeebus](https://github.com/beejeebus)
- **Renderer:** Fix regression on callback URL in plugin mode [#103787](https://github.com/grafana/grafana/pull/103787), [@AgnesToulet](https://github.com/AgnesToulet)
- **SQL:** Fix builder crashes when any in selected [#102871](https://github.com/grafana/grafana/pull/102871), [@zoltanbedi](https://github.com/zoltanbedi)
- **SSE:** Fix goroutine leak in math operation expression parsing [#102380](https://github.com/grafana/grafana/pull/102380), [@kylebrandt](https://github.com/kylebrandt)
- **Tempo:** Add fixes for broken exemplars [#103298](https://github.com/grafana/grafana/pull/103298), [@joey-grafana](https://github.com/joey-grafana)
### Breaking changes
- **Alerting:** Make $value return the query value in case when a single datasource is used [#102301](https://github.com/grafana/grafana/pull/102301), [@alexander-akhmetov](https://github.com/alexander-akhmetov)
- **Alerting:** Relax permissions for access a rule [#103664](https://github.com/grafana/grafana/pull/103664), [@moustafab](https://github.com/moustafab)
- **Alerting:** Remove feature toggles relating to Loki Alert State History [#103540](https://github.com/grafana/grafana/pull/103540), [@rwwiv](https://github.com/rwwiv)
- **Alerting:** Remove the POST endpoint for the internal Grafana Alertmanager config [#103819](https://github.com/grafana/grafana/pull/103819), [@rwwiv](https://github.com/rwwiv)
- **Anonymous:** Enforce org role Viewer setting [#102070](https://github.com/grafana/grafana/pull/102070), [@eleijonmarck](https://github.com/eleijonmarck)
- **Chore:** Enable Grafana version check when installing plugins [#103176](https://github.com/grafana/grafana/pull/103176), [@andresmgot](https://github.com/andresmgot)
- **Chore:** Enabling failWrongDSUID by default in Grafana 12 [#102192](https://github.com/grafana/grafana/pull/102192), [@tolzhabayev](https://github.com/tolzhabayev)
- **Config:** Removes setting `viewers_can_edit` [#101767](https://github.com/grafana/grafana/pull/101767), [@eleijonmarck](https://github.com/eleijonmarck)
- **Frontend:** Remove Angular (Enterprise)
- **Plugin Extensions:** Clean up the deprecated APIs [#102102](https://github.com/grafana/grafana/pull/102102), [@leventebalogh](https://github.com/leventebalogh)
- **Plugins:** Remove plugin dependency version [#103728](https://github.com/grafana/grafana/pull/103728), [@wbrowne](https://github.com/wbrowne)
- **Tempo:** Remove traceQLStreaming feature toggle [#103619](https://github.com/grafana/grafana/pull/103619), [@adrapereira](https://github.com/adrapereira)
### Plugin development fixes & changes
- **Combobox:** add grouping functionality [#100603](https://github.com/grafana/grafana/pull/100603), [@eledobleefe](https://github.com/eledobleefe)
- **Grafana UI:** Add `columnGap` + `rowGap` to `Stack`/`Grid` [#102883](https://github.com/grafana/grafana/pull/102883), [@ashharrison90](https://github.com/ashharrison90)
- **Grafana UI:** Clearly separate multiple warnings by using HTML tags [#97979](https://github.com/grafana/grafana/pull/97979), [@zenador](https://github.com/zenador)
<!-- 12.0.0 END -->
<!-- 11.6.1 START -->
# 11.6.1 (2025-04-23)

@ -6,6 +6,7 @@ import (
"github.com/grafana/grafana-app-sdk/app"
"github.com/grafana/grafana-app-sdk/k8s"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
"github.com/grafana/grafana-app-sdk/simple"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
@ -14,9 +15,7 @@ import (
"github.com/grafana/grafana/apps/advisor/pkg/app/checkscheduler"
"github.com/grafana/grafana/apps/advisor/pkg/app/checktyperegisterer"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/log"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
)
func New(cfg app.Config) (app.App, error) {
@ -26,7 +25,7 @@ func New(cfg app.Config) (app.App, error) {
return nil, fmt.Errorf("invalid config type")
}
checkRegistry := specificConfig.CheckRegistry
log := log.New("advisor.app")
log := logging.DefaultLogger.With("app", "advisor.app")
// Prepare storage client
clientGenerator := k8s.NewClientRegistry(cfg.KubeConfig, k8s.ClientConfig{})
@ -46,7 +45,7 @@ func New(cfg app.Config) (app.App, error) {
KubeConfig: cfg.KubeConfig,
InformerConfig: simple.AppInformerConfig{
ErrorHandler: func(ctx context.Context, err error) {
klog.ErrorS(err, "Informer processing error")
log.WithContext(ctx).Error("Informer processing error", "error", err)
},
},
ManagedKinds: []simple.AppManagedKind{
@ -61,31 +60,33 @@ func New(cfg app.Config) (app.App, error) {
}
if req.Action == resource.AdmissionActionCreate {
go func() {
log.Debug("Processing check", "namespace", req.Object.GetNamespace())
logger := log.WithContext(ctx).With("check", check.ID())
logger.Debug("Processing check", "namespace", req.Object.GetNamespace())
requester, err := identity.GetRequester(ctx)
if err != nil {
log.Error("Error getting requester", "error", err)
logger.Error("Error getting requester", "error", err)
return
}
ctx = identity.WithRequester(context.Background(), requester)
err = processCheck(ctx, client, req.Object, check)
err = processCheck(ctx, logger, client, req.Object, check)
if err != nil {
log.Error("Error processing check", "error", err)
logger.Error("Error processing check", "error", err)
}
}()
}
if req.Action == resource.AdmissionActionUpdate {
go func() {
log.Debug("Updating check", "namespace", req.Object.GetNamespace(), "name", req.Object.GetName())
logger := log.WithContext(ctx).With("check", check.ID())
logger.Debug("Updating check", "namespace", req.Object.GetNamespace(), "name", req.Object.GetName())
requester, err := identity.GetRequester(ctx)
if err != nil {
log.Error("Error getting requester", "error", err)
logger.Error("Error getting requester", "error", err)
return
}
ctx = identity.WithRequester(context.Background(), requester)
err = processCheckRetry(ctx, client, req.Object, check)
err = processCheckRetry(ctx, logger, client, req.Object, check)
if err != nil {
log.Error("Error processing check retry", "error", err)
logger.Error("Error processing check retry", "error", err)
}
}()
}
@ -111,14 +112,14 @@ func New(cfg app.Config) (app.App, error) {
}
// Save check types as resources
ctr, err := checktyperegisterer.New(cfg)
ctr, err := checktyperegisterer.New(cfg, log)
if err != nil {
return nil, err
}
a.AddRunnable(ctr)
// Start scheduler
csch, err := checkscheduler.New(cfg)
csch, err := checkscheduler.New(cfg, log)
if err != nil {
return nil, err
}

@ -5,7 +5,6 @@ import (
"fmt"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/ssosettings"
)
@ -17,13 +16,11 @@ var _ checks.Check = (*check)(nil)
type check struct {
ssoSettingsService ssosettings.Service
log log.Logger
}
func New(ssoSettingsService ssosettings.Service) checks.Check {
return &check{
ssoSettingsService: ssoSettingsService,
log: log.New("advisor.ssosettingcheck"),
}
}

@ -5,6 +5,7 @@ import (
"fmt"
"strings"
"github.com/grafana/grafana-app-sdk/logging"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/services/login"
@ -47,7 +48,7 @@ func (s *listFormatValidation) Resolution() string {
return "Configure the relevant SSO setting using a valid format, like space-separated (\"opt1 opt2\"), comma-separated values (\"opt1, opt2\") or JSON array format ([\"opt1\", \"opt2\"])."
}
func (s *listFormatValidation) Run(ctx context.Context, _ *advisor.CheckSpec, objToCheck any) (*advisor.CheckReportFailure, error) {
func (s *listFormatValidation) Run(ctx context.Context, log logging.Logger, _ *advisor.CheckSpec, objToCheck any) (*advisor.CheckReportFailure, error) {
setting, ok := objToCheck.(*models.SSOSettings)
if !ok {
return nil, fmt.Errorf("invalid item type %T", objToCheck)

@ -6,6 +6,7 @@ import (
"strings"
"testing"
"github.com/grafana/grafana-app-sdk/logging"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/services/login"
@ -195,7 +196,7 @@ func TestListFormatValidation_Run(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
failure, err := validator.Run(ctx, spec, tt.objToCheck)
failure, err := validator.Run(ctx, logging.DefaultLogger, spec, tt.objToCheck)
if tt.expectedError != "" {
require.Error(t, err)

@ -5,11 +5,11 @@ import (
"errors"
"fmt"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-plugin-sdk-go/backend"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/plugins/repo"
"github.com/grafana/grafana/pkg/services/datasources"
@ -30,7 +30,6 @@ type check struct {
PluginContextProvider pluginContextProvider
PluginClient plugins.Client
PluginRepo repo.Service
log log.Logger
}
func New(
@ -46,7 +45,6 @@ func New(
PluginContextProvider: pluginContextProvider,
PluginClient: pluginClient,
PluginRepo: pluginRepo,
log: log.New("advisor.datasourcecheck"),
}
}
@ -83,12 +81,10 @@ func (c *check) Steps() []checks.Step {
&healthCheckStep{
PluginContextProvider: c.PluginContextProvider,
PluginClient: c.PluginClient,
log: c.log,
},
&missingPluginStep{
PluginStore: c.PluginStore,
PluginRepo: c.PluginRepo,
log: c.log,
},
}
}
@ -112,7 +108,7 @@ func (s *uidValidationStep) Resolution() string {
"target=_blank>documentation</a> for more information or delete the data source and create a new one."
}
func (s *uidValidationStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
func (s *uidValidationStep) Run(ctx context.Context, log logging.Logger, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
ds, ok := i.(*datasources.DataSource)
if !ok {
return nil, fmt.Errorf("invalid item type %T", i)
@ -134,7 +130,6 @@ func (s *uidValidationStep) Run(ctx context.Context, obj *advisor.CheckSpec, i a
type healthCheckStep struct {
PluginContextProvider pluginContextProvider
PluginClient plugins.Client
log log.Logger
}
func (s *healthCheckStep) Title() string {
@ -153,7 +148,7 @@ func (s *healthCheckStep) ID() string {
return HealthCheckStepID
}
func (s *healthCheckStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
func (s *healthCheckStep) Run(ctx context.Context, log logging.Logger, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
ds, ok := i.(*datasources.DataSource)
if !ok {
return nil, fmt.Errorf("invalid item type %T", i)
@ -171,7 +166,7 @@ func (s *healthCheckStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any
return nil, nil
}
// Unable to check health check
s.log.Error("Failed to get plugin context", "datasource_uid", ds.UID, "error", err)
log.Error("Failed to get plugin context", "datasource_uid", ds.UID, "error", err)
return nil, nil
}
req := &backend.CheckHealthRequest{
@ -181,13 +176,13 @@ func (s *healthCheckStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any
resp, err := s.PluginClient.CheckHealth(ctx, req)
if err != nil || resp.Status != backend.HealthStatusOk {
if err != nil {
s.log.Debug("Failed to check health", "datasource_uid", ds.UID, "error", err)
log.Debug("Failed to check health", "datasource_uid", ds.UID, "error", err)
if errors.Is(err, plugins.ErrMethodNotImplemented) || errors.Is(err, plugins.ErrPluginUnavailable) {
// The plugin does not support backend health checks
return nil, nil
}
} else {
s.log.Debug("Failed to check health", "datasource_uid", ds.UID, "status", resp.Status, "message", resp.Message)
log.Debug("Failed to check health", "datasource_uid", ds.UID, "status", resp.Status, "message", resp.Message)
}
return checks.NewCheckReportFailure(
advisor.CheckReportFailureSeverityHigh,
@ -208,7 +203,6 @@ func (s *healthCheckStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any
type missingPluginStep struct {
PluginStore pluginstore.Store
PluginRepo repo.Service
log log.Logger
}
func (s *missingPluginStep) Title() string {
@ -227,7 +221,7 @@ func (s *missingPluginStep) ID() string {
return MissingPluginStepID
}
func (s *missingPluginStep) Run(ctx context.Context, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
func (s *missingPluginStep) Run(ctx context.Context, log logging.Logger, obj *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
ds, ok := i.(*datasources.DataSource)
if !ok {
return nil, fmt.Errorf("invalid item type %T", i)

@ -5,10 +5,10 @@ import (
"errors"
"testing"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-plugin-sdk-go/backend"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/plugins/repo"
"github.com/grafana/grafana/pkg/services/datasources"
@ -28,7 +28,7 @@ func runChecks(check *check) ([]advisor.CheckReportFailure, error) {
failures := []advisor.CheckReportFailure{}
for _, step := range check.Steps() {
for _, item := range items {
stepFailures, err := step.Run(ctx, &advisor.CheckSpec{}, item)
stepFailures, err := step.Run(ctx, logging.DefaultLogger, &advisor.CheckSpec{}, item)
if err != nil {
return nil, err
}
@ -60,7 +60,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -85,7 +84,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -111,7 +109,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -136,7 +133,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -160,7 +156,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -185,7 +180,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)
@ -211,7 +205,6 @@ func TestCheck_Run(t *testing.T) {
PluginClient: mockPluginClient,
PluginRepo: mockPluginRepo,
PluginStore: mockPluginStore,
log: log.New("advisor.datasourcecheck"),
}
failures, err := runChecks(check)

@ -3,6 +3,7 @@ package checks
import (
"context"
"github.com/grafana/grafana-app-sdk/logging"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
)
@ -29,5 +30,5 @@ type Step interface {
// Explains the action that needs to be taken to resolve the issue
Resolution() string
// Run executes the step for an item and returns a report
Run(ctx context.Context, obj *advisorv0alpha1.CheckSpec, item any) (*advisorv0alpha1.CheckReportFailure, error)
Run(ctx context.Context, log logging.Logger, obj *advisorv0alpha1.CheckSpec, item any) (*advisorv0alpha1.CheckReportFailure, error)
}

@ -3,13 +3,14 @@ package plugincheck
import (
"context"
"fmt"
"log"
sysruntime "runtime"
"github.com/Masterminds/semver/v3"
"github.com/grafana/grafana-app-sdk/logging"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/cmd/grafana-cli/services"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/plugins/repo"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginupdatechecker"
@ -94,7 +95,7 @@ func (s *deprecationStep) ID() string {
return DeprecationStepID
}
func (s *deprecationStep) Run(ctx context.Context, _ *advisor.CheckSpec, it any) (*advisor.CheckReportFailure, error) {
func (s *deprecationStep) Run(ctx context.Context, log logging.Logger, _ *advisor.CheckSpec, it any) (*advisor.CheckReportFailure, error) {
p, ok := it.(pluginstore.Plugin)
if !ok {
return nil, fmt.Errorf("invalid item type %T", it)
@ -150,7 +151,7 @@ func (s *updateStep) ID() string {
return UpdateStepID
}
func (s *updateStep) Run(ctx context.Context, _ *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
func (s *updateStep) Run(ctx context.Context, log logging.Logger, _ *advisor.CheckSpec, i any) (*advisor.CheckReportFailure, error) {
p, ok := i.(pluginstore.Plugin)
if !ok {
return nil, fmt.Errorf("invalid item type %T", i)

@ -4,6 +4,7 @@ import (
"context"
"testing"
"github.com/grafana/grafana-app-sdk/logging"
advisor "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/plugins/repo"
@ -170,7 +171,7 @@ func TestRun(t *testing.T) {
failures := []advisor.CheckReportFailure{}
for _, step := range check.Steps() {
for _, item := range items {
stepFailures, err := step.Run(context.Background(), &advisor.CheckSpec{}, item)
stepFailures, err := step.Run(context.Background(), logging.DefaultLogger, &advisor.CheckSpec{}, item)
assert.NoError(t, err)
if stepFailures != nil {
failures = append(failures, *stepFailures)

@ -9,14 +9,13 @@ import (
"github.com/grafana/grafana-app-sdk/app"
"github.com/grafana/grafana-app-sdk/k8s"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/infra/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)
const defaultEvaluationInterval = 24 * time.Hour
@ -31,11 +30,11 @@ type Runner struct {
evaluationInterval time.Duration
maxHistory int
namespace string
log log.Logger
log logging.Logger
}
// NewRunner creates a new Runner.
func New(cfg app.Config) (app.Runnable, error) {
func New(cfg app.Config, log logging.Logger) (app.Runnable, error) {
// Read config
specificConfig, ok := cfg.SpecificConfig.(checkregistry.AdvisorAppConfig)
if !ok {
@ -68,14 +67,15 @@ func New(cfg app.Config) (app.Runnable, error) {
evaluationInterval: evalInterval,
maxHistory: maxHistory,
namespace: namespace,
log: log.New("advisor.checkscheduler"),
log: log.With("runner", "advisor.checkscheduler"),
}, nil
}
func (r *Runner) Run(ctx context.Context) error {
lastCreated, err := r.checkLastCreated(ctx)
logger := r.log.WithContext(ctx)
if err != nil {
r.log.Error("Error getting last check creation time", "error", err)
logger.Error("Error getting last check creation time", "error", err)
// Wait for interval to create the next scheduled check
lastCreated = time.Now()
} else {
@ -83,7 +83,7 @@ func (r *Runner) Run(ctx context.Context) error {
if lastCreated.IsZero() {
err = r.createChecks(ctx)
if err != nil {
klog.Error("Error creating new check reports", "error", err)
logger.Error("Error creating new check reports", "error", err)
} else {
lastCreated = time.Now()
}
@ -103,12 +103,12 @@ func (r *Runner) Run(ctx context.Context) error {
case <-ticker.C:
err = r.createChecks(ctx)
if err != nil {
klog.Error("Error creating new check reports", "error", err)
logger.Error("Error creating new check reports", "error", err)
}
err = r.cleanupChecks(ctx)
err = r.cleanupChecks(ctx, logger)
if err != nil {
klog.Error("Error cleaning up old check reports", "error", err)
logger.Error("Error cleaning up old check reports", "error", err)
}
if nextSendInterval != r.evaluationInterval {
@ -116,7 +116,7 @@ func (r *Runner) Run(ctx context.Context) error {
}
ticker.Reset(nextSendInterval)
case <-ctx.Done():
r.markUnprocessedChecksAsErrored(ctx)
r.markUnprocessedChecksAsErrored(ctx, logger)
return ctx.Err()
}
}
@ -163,7 +163,7 @@ func (r *Runner) createChecks(ctx context.Context) error {
}
// cleanupChecks deletes the olders checks if the number of checks exceeds the limit.
func (r *Runner) cleanupChecks(ctx context.Context) error {
func (r *Runner) cleanupChecks(ctx context.Context, logger logging.Logger) error {
list, err := r.client.List(ctx, r.namespace, resource.ListOptions{Limit: -1})
if err != nil {
return err
@ -175,7 +175,7 @@ func (r *Runner) cleanupChecks(ctx context.Context) error {
labels := check.GetLabels()
checkType, ok := labels[checks.TypeLabel]
if !ok {
klog.Error("Check type not found in labels", "check", check)
logger.Error("Check type not found in labels", "check", check)
continue
}
checksByType[checkType] = append(checksByType[checkType], check)
@ -230,19 +230,19 @@ func getMaxHistory(pluginConfig map[string]string) (int, error) {
return maxHistory, nil
}
func (r *Runner) markUnprocessedChecksAsErrored(ctx context.Context) {
func (r *Runner) markUnprocessedChecksAsErrored(ctx context.Context, log logging.Logger) {
list, err := r.client.List(ctx, r.namespace, resource.ListOptions{})
if err != nil {
r.log.Error("Error getting checks", "error", err)
log.Error("Error getting checks", "error", err)
return
}
for _, check := range list.GetItems() {
if checks.GetStatusAnnotation(check) == "" {
r.log.Error("Check is unprocessed", "check", check.GetStaticMetadata().Identifier())
log.Error("Check is unprocessed", "check", check.GetStaticMetadata().Identifier())
err := checks.SetStatusAnnotation(ctx, r.client, check, checks.StatusAnnotationError)
if err != nil {
r.log.Error("Error setting check status to error", "error", err)
log.Error("Error setting check status to error", "error", err)
}
}
}

@ -8,10 +8,10 @@ import (
"testing"
"time"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -31,7 +31,7 @@ func TestRunner_Run(t *testing.T) {
runner := &Runner{
checkRegistry: mockCheckService,
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
evaluationInterval: 1 * time.Hour,
}
@ -51,7 +51,7 @@ func TestRunner_checkLastCreated_ErrorOnList(t *testing.T) {
runner := &Runner{
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
lastCreated, err := runner.checkLastCreated(context.Background())
@ -76,7 +76,7 @@ func TestRunner_createChecks_ErrorOnCreate(t *testing.T) {
runner := &Runner{
checkRegistry: mockCheckService,
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.createChecks(context.Background())
@ -100,7 +100,7 @@ func TestRunner_createChecks_Success(t *testing.T) {
runner := &Runner{
checkRegistry: mockCheckService,
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.createChecks(context.Background())
@ -116,10 +116,10 @@ func TestRunner_cleanupChecks_ErrorOnList(t *testing.T) {
runner := &Runner{
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.cleanupChecks(context.Background())
err := runner.cleanupChecks(context.Background(), logging.DefaultLogger)
assert.Error(t, err)
}
@ -137,10 +137,10 @@ func TestRunner_cleanupChecks_WithinMax(t *testing.T) {
runner := &Runner{
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.cleanupChecks(context.Background())
err := runner.cleanupChecks(context.Background(), logging.DefaultLogger)
assert.NoError(t, err)
}
@ -167,9 +167,9 @@ func TestRunner_cleanupChecks_ErrorOnDelete(t *testing.T) {
runner := &Runner{
client: mockClient,
maxHistory: defaultMaxHistory,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.cleanupChecks(context.Background())
err := runner.cleanupChecks(context.Background(), logging.DefaultLogger)
assert.ErrorContains(t, err, "delete error")
}
@ -203,9 +203,9 @@ func TestRunner_cleanupChecks_Success(t *testing.T) {
runner := &Runner{
client: mockClient,
maxHistory: defaultMaxHistory,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
err := runner.cleanupChecks(context.Background())
err := runner.cleanupChecks(context.Background(), logging.DefaultLogger)
assert.NoError(t, err)
assert.Equal(t, []string{"check-0"}, itemsDeleted)
}
@ -274,9 +274,9 @@ func Test_markUnprocessedChecksAsErrored(t *testing.T) {
}
runner := &Runner{
client: mockClient,
log: log.NewNopLogger(),
log: logging.DefaultLogger,
}
runner.markUnprocessedChecksAsErrored(context.Background())
runner.markUnprocessedChecksAsErrored(context.Background(), logging.DefaultLogger)
assert.Equal(t, "check-1", identifier.Name)
assert.Equal(t, "/metadata/annotations", patchOperation.Path)
expectedAnnotations := map[string]string{

@ -7,11 +7,11 @@ import (
"github.com/grafana/grafana-app-sdk/app"
"github.com/grafana/grafana-app-sdk/k8s"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/infra/log"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -23,13 +23,13 @@ type Runner struct {
checkRegistry checkregistry.CheckService
client resource.Client
namespace string
log log.Logger
log logging.Logger
retryAttempts int
retryDelay time.Duration
}
// NewRunner creates a new Runner.
func New(cfg app.Config) (app.Runnable, error) {
func New(cfg app.Config, log logging.Logger) (app.Runnable, error) {
// Read config
specificConfig, ok := cfg.SpecificConfig.(checkregistry.AdvisorAppConfig)
if !ok {
@ -52,33 +52,34 @@ func New(cfg app.Config) (app.Runnable, error) {
checkRegistry: checkRegistry,
client: client,
namespace: namespace,
log: log.New("advisor.checktyperegisterer"),
retryAttempts: 3,
retryDelay: time.Second * 5,
log: log.With("runner", "advisor.checktyperegisterer"),
retryAttempts: 5,
retryDelay: time.Second * 10,
}, nil
}
func (r *Runner) createOrUpdate(ctx context.Context, obj resource.Object) error {
func (r *Runner) createOrUpdate(ctx context.Context, log logging.Logger, obj resource.Object) error {
id := obj.GetStaticMetadata().Identifier()
_, err := r.client.Create(ctx, id, obj, resource.CreateOptions{})
if err != nil {
if errors.IsAlreadyExists(err) {
// Already exists, update
r.log.Debug("Check type already exists, updating", "identifier", id)
log.Debug("Check type already exists, updating", "identifier", id)
_, err = r.client.Update(ctx, id, obj, resource.UpdateOptions{})
if err != nil {
// Ignore the error, it's probably due to a race condition
r.log.Error("Error updating check type", "error", err)
log.Error("Error updating check type", "error", err)
}
return nil
}
return err
}
r.log.Debug("Check type registered successfully", "identifier", id)
log.Debug("Check type registered successfully", "identifier", id)
return nil
}
func (r *Runner) Run(ctx context.Context) error {
logger := r.log.WithContext(ctx)
for _, t := range r.checkRegistry.Checks() {
steps := t.Steps()
stepTypes := make([]advisorv0alpha1.CheckTypeStep, len(steps))
@ -105,17 +106,19 @@ func (r *Runner) Run(ctx context.Context) error {
},
}
for i := 0; i < r.retryAttempts; i++ {
err := r.createOrUpdate(ctx, obj)
err := r.createOrUpdate(ctx, logger, obj)
if err != nil {
r.log.Error("Error creating check type, retrying", "error", err, "attempt", i+1)
logger.Error("Error creating check type, retrying", "error", err, "attempt", i+1)
if i == r.retryAttempts-1 {
r.log.Error("Unable to register check type")
logger.Error("Unable to register check type")
} else {
time.Sleep(r.retryDelay)
// Calculate exponential backoff delay: baseDelay * 2^attempt
delay := r.retryDelay * time.Duration(1<<i)
time.Sleep(delay)
}
continue
}
r.log.Debug("Check type registered successfully", "check_type", t.ID())
logger.Debug("Check type registered successfully", "check_type", t.ID())
break
}
}

@ -6,10 +6,10 @@ import (
"fmt"
"testing"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
"github.com/grafana/grafana/pkg/infra/log"
k8sErrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@ -119,7 +119,7 @@ func TestCheckTypesRegisterer_Run(t *testing.T) {
updateFunc: tt.updateFunc,
},
namespace: "custom-namespace",
log: log.New("test"),
log: logging.DefaultLogger,
retryAttempts: 1,
retryDelay: 0,
}
@ -180,7 +180,7 @@ func (m *mockStep) Resolution() string {
return ""
}
func (m *mockStep) Run(ctx context.Context, obj *advisorv0alpha1.CheckSpec, item any) (*advisorv0alpha1.CheckReportFailure, error) {
func (m *mockStep) Run(ctx context.Context, log logging.Logger, obj *advisorv0alpha1.CheckSpec, item any) (*advisorv0alpha1.CheckReportFailure, error) {
return nil, nil
}

@ -7,6 +7,7 @@ import (
"slices"
"sync"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
@ -30,7 +31,7 @@ func getCheck(obj resource.Object, checkMap map[string]checks.Check) (checks.Che
return c, nil
}
func processCheck(ctx context.Context, client resource.Client, obj resource.Object, check checks.Check) error {
func processCheck(ctx context.Context, log logging.Logger, client resource.Client, obj resource.Object, check checks.Check) error {
status := checks.GetStatusAnnotation(obj)
if status != "" {
// Check already processed
@ -51,7 +52,7 @@ func processCheck(ctx context.Context, client resource.Client, obj resource.Obje
}
// Run the steps
steps := check.Steps()
failures, err := runStepsInParallel(ctx, &c.Spec, steps, items)
failures, err := runStepsInParallel(ctx, log, &c.Spec, steps, items)
if err != nil {
setErr := checks.SetStatusAnnotation(ctx, client, obj, checks.StatusAnnotationError)
if setErr != nil {
@ -77,7 +78,7 @@ func processCheck(ctx context.Context, client resource.Client, obj resource.Obje
}, resource.PatchOptions{}, obj)
}
func processCheckRetry(ctx context.Context, client resource.Client, obj resource.Object, check checks.Check) error {
func processCheckRetry(ctx context.Context, log logging.Logger, client resource.Client, obj resource.Object, check checks.Check) error {
status := checks.GetStatusAnnotation(obj)
if status == "" || status == checks.StatusAnnotationError {
// Check not processed yet or errored
@ -104,7 +105,7 @@ func processCheckRetry(ctx context.Context, client resource.Client, obj resource
}
// Run the steps
steps := check.Steps()
failures, err := runStepsInParallel(ctx, &c.Spec, steps, []any{item})
failures, err := runStepsInParallel(ctx, log, &c.Spec, steps, []any{item})
if err != nil {
setErr := checks.SetStatusAnnotation(ctx, client, obj, checks.StatusAnnotationError)
if setErr != nil {
@ -143,7 +144,7 @@ func processCheckRetry(ctx context.Context, client resource.Client, obj resource
}, resource.PatchOptions{}, obj)
}
func runStepsInParallel(ctx context.Context, spec *advisorv0alpha1.CheckSpec, steps []checks.Step, items []any) ([]advisorv0alpha1.CheckReportFailure, error) {
func runStepsInParallel(ctx context.Context, log logging.Logger, spec *advisorv0alpha1.CheckSpec, steps []checks.Step, items []any) ([]advisorv0alpha1.CheckReportFailure, error) {
reportFailures := []advisorv0alpha1.CheckReportFailure{}
var internalErr error
var wg sync.WaitGroup
@ -166,7 +167,8 @@ func runStepsInParallel(ctx context.Context, spec *advisorv0alpha1.CheckSpec, st
err = fmt.Errorf("panic recovered in step %s: %v", step.ID(), r)
}
}()
stepErr, err = step.Run(ctx, spec, item)
logger := log.With("step", step.ID())
stepErr, err = step.Run(ctx, logger, spec, item)
}()
mu.Lock()
defer mu.Unlock()

@ -6,6 +6,7 @@ import (
"fmt"
"testing"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana-app-sdk/resource"
advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1"
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
@ -62,7 +63,7 @@ func TestProcessCheck(t *testing.T) {
items: []any{"item"},
}
err = processCheck(ctx, client, obj, check)
err = processCheck(ctx, logging.DefaultLogger, client, obj, check)
assert.NoError(t, err)
assert.Equal(t, "processed", obj.GetAnnotations()[checks.StatusAnnotation])
}
@ -89,7 +90,7 @@ func TestProcessMultipleCheckItems(t *testing.T) {
items: items,
}
err = processCheck(ctx, client, obj, check)
err = processCheck(ctx, logging.DefaultLogger, client, obj, check)
assert.NoError(t, err)
assert.Equal(t, "processed", obj.GetAnnotations()[checks.StatusAnnotation])
r := client.lastValue.(advisorv0alpha1.CheckV0alpha1StatusReport)
@ -104,7 +105,7 @@ func TestProcessCheck_AlreadyProcessed(t *testing.T) {
ctx := context.TODO()
check := &mockCheck{}
err := processCheck(ctx, client, obj, check)
err := processCheck(ctx, logging.DefaultLogger, client, obj, check)
assert.NoError(t, err)
}
@ -124,7 +125,7 @@ func TestProcessCheck_RunError(t *testing.T) {
err: errors.New("run error"),
}
err = processCheck(ctx, client, obj, check)
err = processCheck(ctx, logging.DefaultLogger, client, obj, check)
assert.Error(t, err)
assert.Equal(t, "error", obj.GetAnnotations()[checks.StatusAnnotation])
}
@ -145,7 +146,7 @@ func TestProcessCheck_RunRecoversFromPanic(t *testing.T) {
runPanics: true,
}
err = processCheck(ctx, client, obj, check)
err = processCheck(ctx, logging.DefaultLogger, client, obj, check)
assert.Error(t, err)
assert.Contains(t, err.Error(), "panic recovered in step")
assert.Equal(t, "error", obj.GetAnnotations()[checks.StatusAnnotation])
@ -164,7 +165,7 @@ func TestProcessCheckRetry_NoRetry(t *testing.T) {
check := &mockCheck{}
err = processCheckRetry(ctx, client, obj, check)
err = processCheckRetry(ctx, logging.DefaultLogger, client, obj, check)
assert.NoError(t, err)
}
@ -187,7 +188,7 @@ func TestProcessCheckRetry_RetryError(t *testing.T) {
err: errors.New("retry error"),
}
err = processCheckRetry(ctx, client, obj, check)
err = processCheckRetry(ctx, logging.DefaultLogger, client, obj, check)
assert.Error(t, err)
assert.Equal(t, "error", obj.GetAnnotations()[checks.StatusAnnotation])
}
@ -216,7 +217,7 @@ func TestProcessCheckRetry_Success(t *testing.T) {
items: []any{"item"},
}
err = processCheckRetry(ctx, client, obj, check)
err = processCheckRetry(ctx, logging.DefaultLogger, client, obj, check)
assert.NoError(t, err)
assert.Equal(t, "processed", obj.GetAnnotations()[checks.StatusAnnotation])
assert.Empty(t, obj.GetAnnotations()[checks.RetryAnnotation])
@ -262,7 +263,7 @@ type mockStep struct {
panics bool
}
func (m *mockStep) Run(ctx context.Context, obj *advisorv0alpha1.CheckSpec, items any) (*advisorv0alpha1.CheckReportFailure, error) {
func (m *mockStep) Run(ctx context.Context, log logging.Logger, obj *advisorv0alpha1.CheckSpec, items any) (*advisorv0alpha1.CheckReportFailure, error) {
if m.panics {
panic("panic")
}

@ -26,21 +26,11 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/getting-started/get-started-grafana-prometheus/#get-started-with-grafana-and-prometheus
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/getting-started/get-started-grafana-prometheus/#get-started-with-grafana-and-prometheus
configure-grafana-configuration-file-location:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
provisioning-data-sources:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
set-up-grafana-monitoring:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/set-up-grafana-monitoring/
@ -73,145 +63,103 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
configure-prometheus-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/configure-prometheus-data-source/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/configure
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/configure
annotate-visualizations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/annotate-visualizations/
recorded-queries:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/recorded-queries/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/recorded-queries/
transformations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/configure-prometheus-data-source/
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/transform-data/
alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
visualizations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/dashboards/variables/
---
# Prometheus data source
Prometheus is an open source database that uses a telemetry collector agent to scrape and store metrics used for monitoring and alerting. Grafana provides native support for Prometheus. If you are just getting started with Prometheus, see [What is Prometheus?](ref:intro-to-prometheus).
Prometheus is an open source database that uses a telemetry collector agent to scrape and store metrics used for monitoring and alerting.
{{% admonition type="tip" %}}
For instructions on downloading Prometheus see [Get started with Grafana and Prometheus](ref:get-started-prometheus).
Grafana provides native support for Prometheus, so you don't need to install a plugin.
If you’re ready to start visualizing your metrics, check out our Prometheus Learning Journeys:
The following documentation will help you get started working with Prometheus and Grafana:
- [Connect to a Prometheus data source in Grafana Cloud](https://www.grafana.com/docs/learning-journeys/prometheus/) to visualize your metrics directly from where they are stored.
- [Send metrics to Grafana Cloud using Prometheus remote write](https://www.grafana.com/docs/learning-journeys/prom-remote-write/) to explore Grafana Cloud without making significant changes to your existing configuration.
{{% /admonition %}}
- [What is Prometheus?](ref:intro-to-prometheus)
- [Prometheus data model](https://prometheus.io/docs/concepts/data_model/)
- [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/)
- [Configure the Prometheus data source](ref:configure-prometheus-data-source)
- [Prometheus query editor](query-editor/)
- [Template variables](template-variables/)
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:administration-documentation).
Only users with the organization `administrator` role can add data sources and edit existing data sources.
Administrators can also [configure the data source via YAML](#provision-the-data-source) with Grafana's provisioning system.
## Exemplars
Once you've added the Prometheus data source, you can [configure it](ref:configure-prometheus-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards), use [Explore](ref:explore), and [annotate visualizations](ref:annotate-visualizations).
In Prometheus, an **exemplar** is a specific trace that represents a measurement taken within a given time interval. While metrics provide an aggregated view of your system, and traces offer a detailed view of individual requests, exemplars serve as a bridge between the two, linking high-level metrics to specific traces for deeper insights.
The following guides will help you get started with the Prometheus data source:
Exemplars associate higher-cardinality metadata from a specific event with traditional time series data. Refer to [Introduction to exemplars](ref:exemplars) in the Prometheus documentation for detailed information on how they work.
- [Configure the Prometheus data source](ref:configure-prometheus-data-source)
- [Prometheus query editor](query-editor/)
- [Template variables](template-variables/)
Grafana can show exemplar data alongside a metric both in Explore and in Dashboards.
{{< figure src="/static/img/docs/v74/exemplars.png" class="docs-image--no-shadow" caption="Exemplar window" >}}
You add exemplars when you configure the Prometheus data source.
{{< figure src="/static/img/docs/prometheus/exemplars-10-1.png" max-width="500px" class="docs-image--no-shadow" >}}
## Prometheus API
The Prometheus data source also works with other projects that implement the [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/).
For more information on how to query other Prometheus-compatible projects from Grafana, refer to the specific project's documentation:
For more information on how to query other Prometheus-compatible projects from Grafana, refer to the specific product's documentation:
- [Grafana Mimir](/docs/mimir/latest/)
- [Thanos](https://thanos.io/tip/components/query.md/)
## Provision the data source
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
{{% admonition type="note" %}}
Once you have provisioned a data source you cannot edit it.
{{% /admonition %}}
### Provisioning example
```yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
# Access mode - proxy (server in the UI) or direct (browser in the UI).
url: http://localhost:9090
jsonData:
httpMethod: POST
manageAlerts: true
prometheusType: Prometheus
prometheusVersion: 2.44.0
cacheLevel: 'High'
disableRecordingRules: false
incrementalQueryOverlapWindow: 10m
exemplarTraceIdDestinations:
# Field with internal link pointing to data source in Grafana.
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
- datasourceUid: my_jaeger_uid
name: traceID
# Field with external link.
- name: traceID
url: 'http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Jaeger%22,%7B%22query%22:%22$${__value.raw}%22%7D%5D'
```
## View Grafana metrics with Prometheus
Grafana exposes metrics for Prometheus on the `/metrics` endpoint.
We also bundle a dashboard within Grafana so you can start viewing your metrics faster.
Grafana exposes metrics for Prometheus on the `/metrics` endpoint and includes a pre-built dashboard to help you start visualizing your metrics immediately.
**To import the bundled dashboard:**
Complete the following steps to import the pre-built dashboard:
1. Navigate to the data source's [configuration page](ref:configure-prometheus-data-source).
1. Select the **Dashboards** tab.
This displays dashboards for Grafana and Prometheus.
1. Select **Import** for the dashboard to import.
1. Navigate to the Prometheus [configuration page](ref:configure-prometheus-data-source).
1. Click the **Dashboards** tab.
1. Locate the **Grafana metrics** dashboard in the list and click **Import**.
For details about these metrics, refer to [Internal Grafana metrics](ref:set-up-grafana-monitoring).
## Amazon Managed Service for Prometheus
The Prometheus data source with Amazon Managed Service for Prometheus is deprecated. Please use the [Amazon Managed service for Prometheus data source](https://grafana.com/grafana/plugins/grafana-amazonprometheus-datasource/). Migrations steps are detailed in the link.
## Azure authentication settings
The Prometheus data source works with Azure authentication. To configure Azure authentication see [Configure Azure Active Directory (AD) authentication](/docs/grafana/latest/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication).
In Grafana Enterprise, update the .ini configuration file: [Configure Grafana](ref:configure-grafana). Depending on your setup, the .ini file is located [here](ref:configure-grafana-configuration-file-location).
Add the following setting in the **[auth]** section :
```bash
[auth]
azure_auth_enabled = true
```
{{% admonition type="note" %}}
If you are using Azure authentication settings do not enable `Forward OAuth identity`. Both use the same HTTP authorization headers. Azure settings will get overwritten by the Oauth token.
{{% /admonition %}}
## Exemplars
Exemplars associate higher-cardinality metadata from a specific event with traditional time series data. See [Introduction to exemplars](ref:exemplars) in Prometheus documentation for detailed information on how they work.
{{% admonition type="note" %}}
Available in Prometheus v2.26 and higher with Grafana v7.4 and higher.
{{% /admonition %}}
Grafana can show exemplars data alongside a metric both in Explore and in Dashboards.
{{< figure src="/static/img/docs/v74/exemplars.png" class="docs-image--no-shadow" caption="Screenshot showing the detail window of an Exemplar" >}}
See the Exemplars section in [Configure Prometheus data source](ref:configure-prometheus-data-source).
{{< figure src="/static/img/docs/prometheus/exemplars-10-1.png" max-width="500px" class="docs-image--no-shadow" caption="Exemplars" >}}
## Incremental dashboard queries (beta)
As of Grafana 10, the Prometheus data source can be configured to query live dashboards incrementally, instead of re-querying the entire duration on each dashboard refresh.
This can be toggled on or off in the data source configuration or provisioning file (under `incrementalQuerying` in jsonData).
Additionally, the amount of overlap between incremental queries can be configured using the `incrementalQueryOverlapWindow` jsonData field, the default value is `10m` (10 minutes).
Grafana has deprecated the Prometheus data source for Amazon Managed Service for Prometheus. Use the [Amazon Managed Service for Prometheus data source](https://grafana.com/grafana/plugins/grafana-amazonprometheus-datasource/) instead. The linked documentation outlines the migration steps.
Increasing the duration of the `incrementalQueryOverlapWindow` will increase the size of every incremental query, but might be helpful for instances that have inconsistent results for recent data.
## Get the most out of the Prometheus data source
## Recording Rules (beta)
After you install and configure Prometheus you can:
The Prometheus data source can be configured to disable recording rules under the data source configuration or provisioning file (under `disableRecordingRules` in jsonData).
- Create a wide variety of [visualizations](ref:visualizations)
- Configure and use [templates and variables](ref:variables)
- Add [transformations](ref:transformations)
- Add [annotations](ref:annotate-visualizations)
- Set up [alerting](ref:alerting)
- Create [recorded queries](ref:recorded-queries)

@ -1,200 +0,0 @@
---
aliases:
- ../data-sources/prometheus/
- ../features/datasources/prometheus/
description: Guide for configuring Prometheus in Grafana
keywords:
- grafana
- prometheus
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure Prometheus
title: Configure the Prometheus data source
weight: 200
refs:
intro-to-prometheus:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
configure-data-links-value-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
alerting-alert-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
---
# Configure Prometheus
Grafana ships with built-in support for Prometheus. If you are new to Prometheus the following documentation will help you get started working with Prometheus and Grafana:
- [What is Prometheus?](ref:intro-to-prometheus)
- [Prometheus data model](https://prometheus.io/docs/concepts/data_model/)
- [Getting started](https://prometheus.io/docs/prometheus/latest/getting_started/)
## Configure the data source
{{< shared id="add-prom-data-source" >}}
To add the Prometheus data source, complete the following steps:
1. Click **Connections** in the left-side menu.
1. Under **Connections**, click **Add new connection**.
1. Enter `Prometheus` in the search bar.
1. Click **Prometheus data source**.
1. Click **Add new data source** in the upper right.
1. Enter a name for the data source.
{{< /shared >}}
You will be taken to the **Settings** tab where you will set up your Prometheus configuration.
## Configuration options
The following is a list of configuration options for Prometheus.
The first option to configure is the name of your connection:
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: prometheus-1, prom-metrics.
- **Default** - Toggle to select as the default name in dashboard panels. When you go to a dashboard panel this will be the default selected data source.
### Connection section
- **Prometheus server URL** - The URL of your Prometheus server. {{< shared id="prom-data-source-url" >}}
If your Prometheus server is local, use `http://localhost:9090`. If it's on a server within a network, this is the URL with the port where you are running Prometheus. Example: `http://prometheus.example.orgname:9090`.
{{< admonition type="note" >}}
If you're running Grafana and Prometheus together in different container environments, each localhost refers to its own container - if the server URL is localhost:9090, that means port 9090 inside the Grafana container, not port 9090 on the host machine.
You should use the IP address of the Prometheus container, or the hostname if you are using Docker Compose. Alternatively, you can consider `http://host.docker.internal:9090`.
{{< /admonition >}}
{{< /shared >}}
### Authentication section
There are several authentication methods you can choose in the Authentication section.
{{% admonition type="note" %}}
Use TLS (Transport Layer Security) for an additional layer of security when working with Prometheus. For information on setting up TLS encryption with Prometheus see [Securing Prometheus API and UI Endpoints Using TLS Encryption](https://prometheus.io/docs/guides/tls-encryption/). You must add TLS settings to your Prometheus configuration file **prior** to setting these options in Grafana.
{{% /admonition %}}
- **Basic authentication** - The most common authentication method. Use your `data source` user name and `data source` password to connect.
- **With credentials** - Toggle on to enable credentials such as cookies or auth headers to be sent with cross-site requests.
- **TLS client authentication** - Toggle on to use client authentication. When enabled, add the `Server name`, `Client cert` and `Client key`. The client provides a certificate that is validated by the server to establish the client's trusted identity. The client key encrypts the data between client and server.
- **With CA cert** - Authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file.
- **Skip TLS verify** - Toggle on to bypass TLS certificate validation.
- **Forward OAuth identity** - Forward the OAuth access token (and also the OIDC ID token if available) of the user querying the data source.
### Custom HTTP headers
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Prometheus instance.
- **Value** - The value of the header.
## Advanced settings
Following are additional configuration options.
### Advanced HTTP settings
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
- **Timeout** - The HTTP request timeout. This must be in seconds. The default is 30 seconds.
### Alerting
- **Manage alerts via Alerting UI** - Toggle to enable [data source-managed rules in Grafana Alerting](ref:alerting-alert-rules) for this data source. For `Mimir`, it enables managing data source-managed rules and alerts. For `Prometheus`, it only supports viewing existing rules and alerts, which are displayed as data source-managed.
{{% admonition type="note" %}}
The **Manage alerts via Alerting UI** toggle is enabled by default. You can change this behavior by setting the [default_manage_alerts_ui_toggle](../../../setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle) option in the Grafana configuration file.
{{% /admonition %}}
### Interval behavior
- **Scrape interval** - Set to the typical scrape and evaluation interval configured in Prometheus. The default is `15s`.
- **Query timeout** - The default is `60s`.
### Query editor
- **Default editor** - Sets a default editor. Options are `Builder` or `Code`. For information on query editor types see [Prometheus query editor](../query-editor/).
- **Disable metrics lookup** - Toggle on to disable the metrics chooser and metric/label support in the query field's autocomplete. This helps if you have performance issues with large Prometheus instances.
### Performance
- **Prometheus type** - The type of your Prometheus server. There are four options: `Prometheus`, `Cortex`, `Mimir`, and `Thanos`.
- **Cache level** - The browser caching level for editor queries. There are four options: `Low`, `Medium`, `High`, or `None`.
- **Incremental querying (beta)** - Changes the default behavior of relative queries to always request fresh data from the Prometheus instance. Enable this option to decrease database and network load.
- **Disable recording rules (beta)** - Toggle on to disable the recording rules. Enable this option to improve dashboard performance.
### Other
- **Custom query parameters** - Add custom parameters to the Prometheus query URL. For example `timeout`, `partial_response`, `dedup`, or `max_source_resolution`. Multiple parameters should be concatenated together with an '&amp;'.
- **HTTP method** - Use either `POST` or `GET` HTTP method to query your data source. `POST` is the recommended and pre-selected method as it allows bigger queries. Change to `GET` if you have a Prometheus version older than 2.1 or if `POST` requests are restricted in your network.
### Exemplars
Support for exemplars is available only for the Prometheus data source. If this is your first time working with exemplars see [Introduction to exemplars](ref:exemplars). An exemplar is a specific trace representative of measurement taken in a given time interval.
- **Internal link** - Toggle on to enable an internal link. When enabled, reveals the data source selector. Select the backend tracing data store for your exemplar data.
- **URL** - _(Visible if you **disable** `Internal link`)_ Defines the external link's URL trace backend. You can interpolate the value from the field by using the [`${__value.raw}` macro](ref:configure-data-links-value-variables).
- **Data source** - _(Visible if you **enable** `Internal link`)_ The data source the exemplar will navigate to.
- **URL label** - Adds a custom display label to override the value of the `Label name` field.
- **Label name** - The name of the field in the `labels` object used to obtain the traceID property.
- **Remove exemplar link** - Click to remove existing links.
### Troubleshooting
Refer to the following troubleshooting information, as required.
#### Data doesn't appear in Explore metrics
<!-- vale Grafana.Spelling = NO -->
If metric data doesn't appear in Explore after you've successfully tested a connection to a Prometheus data source or sent
metrics to Grafana Cloud, ensure that you've selected the correct data source in the **Data source** drop-down menu. If
you've used remote_write to send metrics to Grafana Cloud, the data source name follows the convention
`grafanacloud-stackname-prom`.
<!-- vale Grafana.Spelling = YES -->
The following image shows the **Data source** field in Explore metrics.
![Image that shows Prometheus metrics in Explore](/media/docs/grafana/data-sources/prometheus/troubleshoot-connection-1.png)

@ -0,0 +1,300 @@
---
aliases:
- ../data-sources/prometheus/
- ../features/datasources/prometheus/
description: Guide for configuring Prometheus in Grafana
keywords:
- grafana
- prometheus
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure the Prometheus data source
title: Configure the Prometheus data source
weight: 200
refs:
intro-to-prometheus:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
configure-data-links-value-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
alerting-alert-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
add-a-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
prom-query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
default-manage-alerts-ui-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
provision-grafana:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
manage-alerts-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
azure-active-directory:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
configure-grafana-configuration-file-location:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
---
# Configure the Prometheus data source
This document provides instructions for configuring the Prometheus data source and explains the available configuration options. Grafana includes built-in support for Prometheus, so you don't need to install a plugin. For general information on adding a data source to Grafana, refer to [Add a data source](ref:add-a-data-source).
## Before you begin
- You must have the `Organization administrator` role to add a data source. Administrators can also configure a data source via YAML files.
- Know which Prometheus-compatible database you are using.
- Familiarize yourself with your Prometheus security configuration and gather any necessary security certificates and client keys.
- Verify that data from Prometheus is being written to your Grafana instance.
## Configure the data source using the UI
{{< shared id="add-prom-data-source" >}}
To add the Prometheus data source, complete the following steps:
1. Click **Connections** in the left-side menu.
1. Under **Connections**, click **Add new connection**.
1. Enter `Prometheus` in the search bar.
1. Click **Prometheus data source**.
1. Click **Add new data source** in the upper right.
{{< /shared >}}
Grafana takes you to the **Settings** tab where you will set up your Prometheus configuration.
## Configuration options
Following is a list of configuration options for Prometheus.
- **Name** - The data source name. Sets the name you use to refer to the data source in panels and queries. Examples: prometheus-1, prom-metrics.
- **Default** - Toggle to select as the default name in dashboard panels. When you go to a dashboard panel this will be the default selected data source.
**Connection:**
- **Prometheus server URL** - The URL of your Prometheus server. {{< shared id="prom-data-source-url" >}}
If Prometheus is running locally, use `http://localhost:9090`. If it's hosted on a networked server, provide the server’s URL along with the port where Prometheus is running. Example: `http://prometheus.example.orgname:9090`.
{{< admonition type="note" >}}
When running Grafana and Prometheus in separate containers, localhost refers to each container’s own network namespace. This means that `localhost:9090` points to port 9090 inside the Grafana container, not on the host machine.
Use the IP address of the Prometheus container, or the hostname if you are using Docker Compose. Alternatively, you can use `http://host.docker.internal:9090` to reference the host machine.
{{< /admonition >}}
{{< /shared >}}
**Authentication:**
There are three authentication options for the Prometheus data source.
- **Basic authentication** - The most common authentication method.
- **User** - The username you use to connect to the data source.
- **Password** - The password you use to connect to the data source.
- **Forward OAuth identity** - Forward the OAuth access token (and also the OIDC ID token if available) of the user querying the data source.
- **No authentication** - Allows access to the data source without any authentication.
**TLS settings:**
{{< admonition type="note" >}}
Use TLS (Transport Layer Security) for an additional layer of security when working with Prometheus. For information on setting up TLS encryption with Prometheus refer to [Securing Prometheus API and UI Endpoints Using TLS Encryption](https://prometheus.io/docs/guides/tls-encryption/). You must add TLS settings to your Prometheus configuration file **prior** to setting these options in Grafana.
{{< /admonition >}}
- **Add self-signed certificate** - Check the box to authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file. Required for verifying self-signed TLS certificates.
- **CA certificate** - Add your certificate.
- **TLS client authentication** - Check the box to enable TLS client authentication.
- **Server name** - Add the server name, which is used to verify the hostname on the returned certificate.
- **Client certificate** - The client certificate is generated from a Certificate Authority or its self-signed. Follow the instructions of the CA (Certificate Authority) to download the certificate file.
- **Client key** - Add your client key, which can also be generated from a Certificate Authority (CA) or be self-signed. The client key encrypts data between the client and server.
- **Skip TLS verify** - Toggle on to bypass TLS certificate validation. Skipping TLS certificate validation is not recommended unless absolutely necessary or for testing purposes.
**HTTP headers:**
Pass along additional information and metadata about the request or response.
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Prometheus instance.
- **Value** - The value of the header.
**Advanced settings:**
Following are optional configuration settings you can configure for more control over your data source.
- **Advanced HTTP settings:**
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
- **Timeout** - The HTTP request timeout, must be in seconds.
**Alerting:**
- **Manage alerts via Alerting UI** -Toggled on by default. This enables [data source-managed rules in Grafana Alerting](ref:alerting-alert-rules) for this data source. For `Mimir`, it enables managing data source-managed rules and alerts. For `Prometheus`, it only supports viewing existing rules and alerts, which are displayed as data source-managed. Change this by setting the [`default_manage_alerts_ui_toggle`](ref:manage-alerts-toggle) option in the `grafana.ini` configuration file.
**Interval behavior:**
- **Scrape interval** - Sets the standard scrape and evaluation interval in Prometheus. The default is `15s`. This interval determines how often Prometheus scrapes targets. Set it to match the typical scrape and evaluation interval in your Prometheus configuration file. If you set a higher value than your Prometheus configuration, Grafana will evaluate data at this interval, resulting in fewer data points.
- **Query timeout** - Sets the Prometheus query timeout. The default is `60s`. Without a timeout, complex or inefficient queries can run indefinitely, consuming CPU and memory resources.
**Query editor:**
- **Default editor** - Sets the default query editor. Options are `Builder` or `Code`. `Builder` mode helps you build queries using a visual interface. `Code` mode is geared for the experienced Prometheus user with prior expertise in PromQL. For more details on editor types refer to [Prometheus query editor](ref:prom-query-editor). You can switch easily code editors in the Query editor UI.
- **Disable metrics lookup** - Toggle on to disable the metrics chooser and metric and label support in the query field's autocomplete. This can improve performance for large Prometheus instances.
**Performance:**
- **Prometheus type** - Select the type of your Prometheus-compatible database, such as Prometheus, Cortex, Mimir, or Thanos. Changing this setting will save your current configuration. Different database types support different APIs. For example, some allow `regex` matching for label queries to improve performance, while others provide a metadata API. Setting this incorrectly may cause unexpected behavior when querying metrics and labels. Refer to your Prometheus documentation to ensure you select the correct type.
- **Cache level** - Sets the browser caching level for editor queries. There are four options: `Low`, `Medium`, `High`, or `None`. Higher cache settings are recommended for high cardinality data sources.
- **Incremental querying (beta)** - Toggle on to enable incremental querying. Enabling this feature changes the default behavior of relative queries. Instead of always requesting fresh data from the Prometheus instance, Grafana will cache query results and only fetch new records. This helps reduce database and network load.
- **Query overlap window** - If you are using incremental querying, specify a duration (e.g., 10m, 120s, or 0s). The default is `10m`. This is a buffer of time added to incremental queries and this value is added to the duration of each incremental request.
- **Disable recording rules (beta)** - Toggle on to disable the recording rules. When recording rules are disabled, Grafana won't fetch and parse recording rules from Prometheus, improving dashboard performance by reducing processing overhead..
**Other settings:**
- **Custom query parameters** - Add custom parameters to the Prometheus query URL, which allow for more control over how queries are executed. Examples: `timeout`, `partial_response`, `dedup`, or `max_source_resolution`. Multiple parameters should be joined using `&`.
- **HTTP method** - Select either the `POST` or `GET` HTTP method to query your data source. `POST`is recommended and selected by default, as it supports larger queries. Select `GET` if you're using Prometheus version 2.1 or older, or if your network restricts `POST` requests.
Toggle on
- **Use series endpoint** - Enabling this option makes Grafana use the series endpoint (/api/v1/series) with the match[] parameter instead of the label values endpoint (/api/v1/label/<label_name>/values). While the label values endpoint is generally more performant, some users may prefer the series endpoint because it supports the `POST` method, whereas the label values endpoint only allows `GET` requests.
**Exemplars:**
Support for exemplars is available only for the Prometheus data source. For more information on exemplars refer to [Introduction to exemplars](ref:exemplars). An exemplar is a trace that represents a specific measurement taken within a given time interval.
Click the **+ sign** to add exemplars.
- **Internal link** - Toggle on to enable an internal link. This will display the data source selector, where you can choose the backend tracing data store for your exemplar data.
- **URL** - _(Visible if you **disable** `Internal link`)_ Defines the external link's URL trace backend. You can interpolate the value from the field by using the [`${__value.raw}` macro](ref:configure-data-links-value-variables).
- **Data source** - _(Visible when`Internal link` is enabled.)_ Select the data source that the exemplar will link to from the drop-down.
- **URL label** - Adds a custom display label to override the value of the `Label name` field.
- **Label name** - The name of the field in the `labels` object used to obtain the traceID property.
- **Remove exemplar link** - Click the **X** to remove existing links.
You can add multiple exemplars.
- **Private data source connect** - _Only for Grafana Cloud users._ Private data source connect, or PDC, allows you to establish a private, secured connection between a Grafana Cloud instance, or stack, and data sources secured within a private network. Click the drop-down to locate the URL for PDC. For more information regarding Grafana PDC refer to [Private data source connect (PDC)](ref:private-data-source-connect) and [Configure Grafana private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc) for steps on setting up a PDC connection.
Click **Manage private data source connect** to be taken to your PDC connection page, where you’ll find your PDC configuration details.
After you have configured your Prometheus data source options, click **Save & test** at the bottom to test out your data source connection.
You should see a confirmation dialog box that says:
**Successfully queried the Prometheus API.**
**Next, you can start to visualize data by building a dashboard, or by querying data in the Explore view.**
You can also remove a connection by clicking **Delete**.
## Provision the Prometheus data source
You can define and configure the data source in YAML files as part of the Grafana provisioning system. For more information about provisioning, and for available configuration options, refer to [Provision Grafana](ref:provision-grafana).
{{< admonition type="note" >}}
After you have provisioned a data source you cannot edit it.
{{< /admonition >}}
**Example of a Prometheus data source configuration:**
```yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://localhost:9090
jsonData:
httpMethod: POST
manageAlerts: true
prometheusType: Prometheus
prometheusVersion: 3.3.0
cacheLevel: 'High'
disableRecordingRules: false
incrementalQueryOverlapWindow: 10m
exemplarTraceIdDestinations:
# Field with internal link pointing to data source in Grafana.
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
- datasourceUid: my_jaeger_uid
name: traceID
# Field with external link.
- name: traceID
url: 'http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Jaeger%22,%7B%22query%22:%22$${__value.raw}%22%7D%5D'
```
## Azure authentication settings
The Prometheus data source works with Azure authentication. To configure Azure authentication refer to [Configure Azure Active Directory (AD) authentication](ref:azure-active-directory).
In Grafana Enterprise, you need to update the .ini configuration file. Refer to [Configuration file location](ref:configure-grafana-configuration-file-location) to locate your .ini file.
Add the following setting in the **[auth]** section of the .ini configuration file:
```bash
[auth]
azure_auth_enabled = true
```
{{% admonition type="note" %}}
If you are using Azure authentication, don't enable `Forward OAuth identity`. Both methods use the same HTTP authorization headers, and the OAuth token will override your Azure credentials.
{{% /admonition %}}
## Recording rules (beta)
You can configure the Prometheus data source to disable recording rules in the data source configuration or provisioning file under `disableRecordingRules` in jsonData.
## Troubleshooting configuration issues
Refer to the following troubleshooting information as needed.
**Data doesn't appear in Metrics Drilldown:**
If you have successfully tested the connection to a Prometheus data source or are sending metrics to Grafana Cloud and there is no metric data appearing in Explore, make sure you've selected the correct data source from the data source drop-down menu. When using `remote_write` to send metrics to Grafana Cloud, the data source name follows the convention `grafanacloud-stackname-prom`.

@ -0,0 +1,238 @@
---
aliases:
- ../../data-sources/prometheus/query-editor/
description: Guide for using the Prometheus data source's query editor
keywords:
- grafana
- prometheus
- logs
- queries
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Prometheus query editor
title: Prometheus query editor
weight: 300
refs:
query-transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
table:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/table/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/table/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
heatmap:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/heatmap/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/heatmap/
time-series-transform:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/time-series/#transform
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/time-series/#transform
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
---
# Prometheus query editor
Grafana provides a query editor for the Prometheus data source to create queries in PromQL. For more information about PromQL, see [Querying Prometheus](http://prometheus.io/docs/querying/basics/). The Prometheus query editor is located on the [Explore page](ref:explore). You can also access the PostgreSQL query editor from a dashboard panel. Click the ellipsis in the upper right of the panel and select **Edit**.
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data). For options and functions common to all query editors, refer to [Query editors](ref:query-transform-data).
The Prometheus query editor has two modes:
- [Builder mode](#builder-mode)
- [Code mode](#code-mode)
![Query editor mode](/media/docs/prometheus/builder-code-v11-mode.png)
Grafana synchronizes both modes, allowing you to switch between them. Grafana also displays a warning message if it detects an issue with the query while switching modes.
You can configure Prometheus-specific options in the query editor by setting several options regardless of mode.
{{< figure src="/static/img/docs/prometheus/options.png" max-width="500px" class="docs-image--no-shadow" caption="Options" >}}
## Builder mode
**Builder mode** helps you build queries using a visual interface. This option is best for users who have limited experience working with Prometheus and PromQL.
The following video demonstrates how to use the visual Prometheus query builder:
{{< vimeo 720004179 >}}
Builder mode contains the following components:
- **Kick start your query** - Click to view a list of predefined operation patterns that help you quickly build queries with multiple operations. These include:
- Rate query starters
- Histogram query starters
- Binary query starters
Click the arrow next to each to see the available options to add to your query.
- **Explain** - Toggle on to display a step-by-step explanation of all query components and operations.
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain results" >}}
- **Builder/Code** - Click the corresponding **Builder** or **Code** tab on the toolbar to select an editor mode.
If you select Builder mode you will see the following options:
- **Metric** - Select a metric from the drop-down. Click the icon to open Metrics explorer, where you can search for metrics by name and filter by type if your instance has a large number of metrics. Refer to [Metrics explorer](#metrics-explorer) for more detail on using this feature.
- **Label filters** - Select label filters from the drop-down. Select an operator and a value.
Select desired labels and their values from the drop-down list.
When a metric is selected, the data source requests available labels and their values from the server.
Use the `+` button to add a label, and the `x` button to remove a label.
Click **+ Operations** to select from a list of operations including Aggregations, Range functions, Functions, Binary operations, Trigonometric and Time functions. You can select multiple operations. Refer to [Operations](#operations) for more detail.
**Options:**
- **Legend**- Lets you customize the name for the time series. You can use a predefined or custom format.
- **Auto** - Displays unique labels. Also displays all overlapping labels if a series has multiple labels.
- **Verbose** - Displays all label names.
- **Custom** - Lets you customize the legend using label templates. For example, `{{hostname}}` is replaced with the value of the `hostname` label. To switch to a different legend mode, clear the input and click outside the field.
- **Min step** - Sets the minimum interval between data points returned by the query. For example, setting this to `1h` suggests that data is collected or displayed at hourly intervals. This setting supports the `$__interval` and `$__rate_interval` macros. Note that the time range of the query is aligned to this step size, which may adjust the actual start and end times of the returned data.
- **Format** - Determines how the data from your Prometheus query is interpreted and visualized in a panel. Choose from the following format options:
- **Time series** - The default format. Refer to [Time series kind formats](https://grafana.com/developers/dataplane/timeseries/) for information on time series data frames and how time and value fields are structured.
- **Table** - Displays data in table format. This format works only in a [Table panel](ref:table).
- **Heatmap** - Displays Histogram-type metrics in a [Heatmap panel](ref:heatmap) by converting cumulative histograms to regular ones and sorting the series by the bucket bound. Converts cumulative histogram data into regular histogram format and sorts the series by bucket boundaries for proper display.
- **Type** - This setting determines the query type. These include:
- **Both** - The default option. Returns results for both a **Range** query and an **Instant** query.
- **Range** - Returns a range vector - a set of time series
a set of time series where each series includes multiple data points over a period of time. You can choose to visualize the data as lines, bars, points, stacked lines, or stacked bars.
- **Instant** - Returns a single data point per series — the most recent value within the selected time range. The results can be displayed in a table or as raw data. To visualize instant query results in a time series panel, start by adding field override, then add a property to the override called `Transform`, and set the Transform value to `Constant` in the drop-down. For more information, refer to the [Time Series Transform option documentation](ref:time-series-transform).
{{% admonition type="note" %}}
Grafana adjusts the query time range to align with the dynamically calculated step interval. This alignment ensures consistent metric visualization and supports Prometheus's result caching requirements. However, this alignment can cause minor visual differences, such as a slight gap at the graph's right edge or a shifted start time. For example, a `15s` step aligns timestamps to Unix times divisible by 15 seconds. A `1w` `minstep` aligns the range to the start of the week, which for Prometheus is Thursday at 00:00 UTC.
{{% /admonition %}}
- **Exemplars** - Toggle on to run a query that includes exemplars in the graph. Exemplars are unique to Prometheus. For more information see [Introduction to exemplars](ref:exemplars).
{{% admonition type="note" %}}
There is no option to add exemplars with an **Instant** query type.
{{% /admonition %}}
### Filter metrics
{{< figure src="/static/img/docs/prometheus/metrics-and-labels.png" max-width="500px" class="docs-image--no-shadow" caption="Metric and label filters" >}}
When you are ready to create a query, you can choose the specific metric name from the drop-down list under **Metric**.
The data source provides the list of available metrics based on the selected time range.
You can also enter text into the selector when the drop-down is open to search and filter the list.
#### Metrics explorer in Builder mode
{{< figure src="/static/img/docs/prometheus/screenshot-grafana-prometheus-metrics-explorer-2.png" max-width="500px" class="docs-image--no-shadow" caption="Metrics explorer" >}}
Click the **Open book icon** to open the Metrics explorer, where you can search for and filter all the metrics in your instance.
If you would like to explore your metrics in the query builder further, you can open the **Metrics explorer** by clicking the first option in the metric select component of the query builder.
The Metrics explorer displays all metrics in a paginated table list. The list shows the total number of metrics, as well as the name, type, and description for each metric. You can enter text into the search input to filter results.
You can also filter by type.
The following options are included under the **Additional Settings** drop-down:
- **Include description in search**: Toggle on to search by both name and description.
- **Include results with no metadata**: Toggle on to include metrics that lack type or description metadata.
- **Disable text wrap**: Toggle on to disable text wrapping.
- **Enable regex search**: Toggle on to filter metric names by regex search, which uses an additional call on the Prometheus API.
{{% admonition type="note" %}}
The Metrics explorer (Builder mode) and [Metrics browser (Code mode)](#metrics-browser-in-code-mode) are separate elements. The Metrics explorer does not have the ability to browse labels yet, but the Metrics browser can display all labels on a metric name.
{{% /admonition %}}
### Operations
{{< figure src="/static/img/docs/prometheus/operations.png" max-width="500px" class="docs-image--no-shadow" caption="Operations" >}}
Select the **+ Operations** button to add operations to your query.
The query editor groups operations into the following sections:
- Aggregations - for additional information see [Aggregation operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators).
- Range functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
- Functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
- Binary operations - for additional information see [Binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
- Trigonometric - for additional information see [Trigonometric functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#trigonometric-functions).
- Time functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
All operations have function parameters under the operation header. Click the `operator` to see a full list of supported functions. Some operations allow you to apply specific labels to functions.
{{< figure src="/static/img/docs/prometheus/use-function-by-label-9-5.png" max-width="500px" class="docs-image--no-shadow" caption="Functions and labels" >}}
Some operations are only valid when used in a specific order. If you add an operation in a way that would create an invalid or illogical query, the query editor automatically places it in the correct position to maintain a valid query structure.
### Hints
The query editor can detect which operations are most appropriate for certain selected metrics.
When it does, it displays a hint next to the **+ Operations** button.
To add the operation to your query, click the **Hint**.
{{< figure src="/static/img/docs/prometheus/hint-example.png" max-width="500px" class="docs-image--no-shadow" caption="Hint" >}}
When you are satisfied with your query, click **Run query**.
## Code mode
**Code mode** is for the experienced Prometheus user with prior expertise in PromQL, Prometheus' query language. The Code mode editor allows you to create queries just as you would in Prometheus. For more information about PromQL see [Querying Prometheus](http://prometheus.io/docs/querying/basics/).
{{< figure src="/static/img/docs/prometheus/code-mode.png" max-width="500px" class="docs-image--no-shadow" caption="Code mode" >}}
The user interface (UI) also lets you select metrics, labels, filters, and operations.
You can write complex queries using the text editor with autocompletion features and syntax highlighting. Code mode's autocomplete feature works automatically while typing. The query editor can autocomplete static functions, aggregations, keywords, and also dynamic items like metrics and labels. The autocompletion drop-down includes documentation for the suggested items where available.
It also contains a [Metrics browser](#metrics-browser-in-code-mode) to further help you write queries. To open the Metrics browser, click the arrow next to **Metrics browser**.
### Metrics browser in Code mode
The Metrics browser locates metrics and selects relevant labels to help you build basic queries.
When you click **Metrics browser** in `Code` mode, it displays all available metrics and labels.
If supported by your Prometheus instance, each metric also displays its `HELP` and `TYPE` as a tooltip.
{{< figure alt="Prometheus query editor metrics browser" src="/media/docs/prometheus/Metrics-browser-V10-prom-query-editor.png" caption="Metrics browser" >}}
When you select a metric under **Step 1**, the browser narrows down the available labels to show only the ones applicable to the metric.
You can then select one or more labels shown in **Step 2**.
Select one or more values in **Step 3** for each label to tighten your query scope.
In **Step 4**, you can select **Use query** to run the query, **Use as rate query** to add the rate operation to your query (`$__rate_interval`), **Validate selector** to verify the selector is valid and show the number of series found, or **Clear** to clear your selections and start over.
{{% admonition type="note" %}}
If you don't remember the exact metric name, you can start by selecting a few labels to filter the list. This helps you find relevant label values and narrow down your options.
{{% /admonition %}}
All lists in the Metrics browser include a search field to quickly filter metrics or labels by keyword.
In the **Values** section, there's a single search field that filters across all selected labels, making it easier to find matching values. For example, if you have labels like `app`, `job`, and `job_name`, only one of them might contain the value you're looking for.
When you are satisfied with your query, click **Run query**.
## Incremental dashboard queries (beta)
Starting with Grafana v10, the Prometheus data source supports incremental querying for live dashboards. Instead of re-querying the entire time range on each refresh, Grafana can fetch only new data since the last query.
You can enable or disable this feature in the data source configuration or provisioning file using the `incrementalQuerying` field in `jsonData`.
You can also control the overlap between consecutive incremental queries using the `incrementalQueryOverlapWindow` field in `jsonData`. By default, this is set to `10m` (10 minutes). Increasing the `incrementalQueryOverlapWindow` value increases the time range covered by each incremental query. This can help in environments where the most recent data may be delayed or incomplete.

@ -1,255 +0,0 @@
---
aliases:
- ../../data-sources/prometheus/query-editor/
description: Guide for using the Prometheus data source's query editor
keywords:
- grafana
- prometheus
- logs
- queries
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Query editor
title: Prometheus query editor
weight: 300
refs:
query-transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
table:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/table/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/table/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
heatmap:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/heatmap/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/heatmap/
time-series-transform:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/time-series/#transform
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/time-series/#transform
---
# Prometheus query editor
Grafana provides a query editor for the Prometheus data source to create queries in PromQL. For more information about PromQL, see [Querying Prometheus](http://prometheus.io/docs/querying/basics/).
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
For options and functions common to all query editors, see [Query editors](ref:query-transform-data).
## Choose a query editing mode
The Prometheus query editor has two modes:
- [Builder mode](#builder-mode)
- [Code mode](#code-mode)
Each mode is explained in greater detail below.
{{< figure src="/static/img/docs/prometheus/editing-mode.png" max-width="500px" class="docs-image--no-shadow" caption="Query editor mode" >}}
Both modes are synchronized, so you can switch between them. However, if there is an issue with the query while switching modes, a warning message will appear.
## Toolbar elements
The query editor toolbar contains the following elements:
- **Kick start your query** - Click to see a list of operation patterns that help you quickly get started adding multiple operations to your query. These include:
- Rate query starters
- Histogram query starters
- Binary query starters
Click the arrow next to each to see available options to add to your query.
- **Explain** - Toggle to display a step-by-step explanation of all query components and operations.
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain results" >}}
- **Builder/Code** - Click the corresponding **Builder** or **Code** tab on the toolbar to select a editor mode.
## Configure common options
You can configure Prometheus-specific options in the query editor by setting several options regardless of mode.
{{< figure src="/static/img/docs/prometheus/options.png" max-width="500px" class="docs-image--no-shadow" caption="Options" >}}
### Legend
The **Legend** setting defines the time series's name. You can use a predefined or custom format.
- **Auto** - Displays unique labels. Also displays all overlapping labels if a series has multiple labels.
- **Verbose** - Displays all label names.
- **Custom** - Uses templating to select which labels will be included. For example, `{{hostname}}` is replaced by the label value for the label `hostname`. Clear the input and click outside of it to select another mode.
### Min step
The **Min step** setting defines the lower bounds on the interval between data points.
For example, set this to `1h` to hint that measurements are taken hourly.
This setting supports the `$__interval` and `$__rate_interval` macros. Be aware that the query range dates are aligned to the step and this can change the start and end of the range.
### Format
Switch between the following format options:
- **Time series** - The default time series format. See [Time series kind formats](https://grafana.com/developers/dataplane/timeseries/) for information on time series data frames and how time and value fields are structured.
- **Table** - This works only in a [Table panel](ref:table).
- **Heatmap** - Displays metrics of the Histogram type on a [Heatmap panel](ref:heatmap) by converting cumulative histograms to regular ones and sorting the series by the bucket bound.
### Type
The **Type** setting sets the query type. These include:
- **Both** - The default option. Returns results for both a **Range** query and an **Instant** query.
- **Range** - Returns a range vector consisting of a set of time series data containing a range of data points over time for each time series. You can choose lines, bars, points, stacked lines or stacked bars
- **Instant** - Returns one data point per query and only the most recent point in the time range provided. The results can be shown in table format or as raw data. To depict instant query results in the time series panel, first add a field override, next add a property to the override named `Transform`, and finally select `Constant` from the **Transform** dropdown.
For more information, refer to the [Time Series Transform option documentation](ref:time-series-transform).
{{% admonition type="note" %}}
Grafana modifies the request dates for queries to align them with the dynamically calculated step.
This ensures a consistent display of metrics data and Prometheus requires this for caching results. But, aligning the range with the step can result in a small gap of data at the right edge of a graph or change the start date of the range. For example, a 15s step aligns the range to Unix time divisible by 15s and a 1w minstep aligns the range to the start of the week on a Thursday.
{{% /admonition %}}
### Exemplars
Toggle **Exemplars** to run a query that includes exemplars in the graph. Exemplars are unique to Prometheus. For more information see [Introduction to exemplars](ref:exemplars).
{{% admonition type="note" %}}
There is no option to add exemplars with an **Instant** query type.
{{% /admonition %}}
### Inspector
Click **Inspector** to get detailed statistics regarding your query. Inspector functions as a kind of debugging tool that "inspects" your query. It provides query statistics under **Stats**, request response time under **Query**, data frame details under **{} JSON**, and the shape of your data under **Data**.
{{< figure src="/static/img/docs/prometheus/insepctor-9-5.png" max-width="500px" class="docs-image--no-shadow" caption="Inspector" >}}
## Builder mode
**Builder mode** helps you build queries using a visual interface. This option is best for users who have limited or no previous experience working with Prometheus and PromQL.
This video demonstrates how to use the visual Prometheus query builder:
{{< vimeo 720004179 >}}
</br>
### Metrics
{{< figure src="/static/img/docs/prometheus/metrics-and-labels.png" max-width="500px" class="docs-image--no-shadow" caption="Metric and label filters" >}}
When you are ready to create a query, you can choose the specific metric name from the dropdown list under **Metric**.
The data source requests the list of available metrics from the Prometheus server based on the selected time rage.
You can also enter text into the selector when the dropdown is open to search and filter the list.
#### Metrics explorer
{{< figure src="/static/img/docs/prometheus/screenshot-grafana-prometheus-metrics-explorer-2.png" max-width="500px" class="docs-image--no-shadow" caption="Metrics explorer" >}}
If you would like to explore your metrics in the query builder further, you can open the **Metrics Explorer** by clicking the first option in the metric select component of the query builder.
The metrics explorer is different than the metrics browser. The metrics explorer is only found in the query builder section. The metrics browser is only found in the code editor. The metrics explorer does not have the ability to browse labels yet, but the metrics browser can display all labels on a metric name.
The metrics explorer displays all metrics in a paginated table list. The list shows the total number of metrics, as well as the name, type and description for each metric. You can enter text into the search input to filter results.
You can also filter by type.
There are also additional settings for the following items:
- Include description in search. Search by name **and** description
- Include results with no metadata. Many Prometheus metrics have no metadata. This allows users to include metrics with undefined type and description.
- Disable text wrap.
- Enable regex search. This uses the Prometheus API to enable regex search for the metric name.
### Label filters
Select desired labels and their values from the dropdown list.
When a metric is selected, the data source requests available labels and their values from the server.
Use the `+` button to add a label, and the `x` button to remove a label.
### Operations
{{< figure src="/static/img/docs/prometheus/operations.png" max-width="500px" class="docs-image--no-shadow" caption="Operations" >}}
Select the **+ Operations** button to add operations to your query.
The query editor groups operations into the following sections:
- Aggregations - for additional information see [Aggregation operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators).
- Range functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
- Functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
- Binary operations - for additional information see [Binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
- Trigonometric - for additional information see [Trigonometric functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#trigonometric-functions).
- Time functions - for additional information see [Functions](https://prometheus.io/docs/prometheus/latest/querying/functions/#functions).
All operations have function parameters under the operation header. Click the `operator` to see a full list of supported functions. Some operations allow you to apply specific labels to functions.
{{< figure src="/static/img/docs/prometheus/use-function-by-label-9-5.png" max-width="500px" class="docs-image--no-shadow" caption="Functions and labels" >}}
Some operations make sense only when used in a specific order.
If adding an operation would result in nonsensical query, the query editor adds the operation to the correct place.
#### Hints
{{< figure src="/static/img/docs/prometheus/hint-example.png" max-width="500px" class="docs-image--no-shadow" caption="Hint" >}}
The query editor can detect which operations are most appropriate for some selected metrics.
If it does, it displays a hint next to the **+ Operations** button.
To add the operation to your query, click the **Hint**.
Once you are satisfied with your query, click **Run query**.
## Code mode
**Code mode** is for the experienced Prometheus user with prior expertise in PromQL, Prometheus' query language. The Code mode editor allows you to create queries just as you would in Prometheus. For more information about PromQL see [Querying Prometheus](http://prometheus.io/docs/querying/basics/).
{{< figure src="/static/img/docs/prometheus/code-mode.png" max-width="500px" class="docs-image--no-shadow" caption="Code mode" >}}
The user interface (UI) also lets you select metrics, labels, filters and operations.
You can write complex queries using the text editor with autocompletion features and syntax highlighting.
It also contains a [Metrics browser](#metrics-browser) to further help you write queries.
### Use autocomplete
Code mode's autocomplete feature works automatically while typing. The query editor can autocomplete static functions, aggregations, keywords, and also dynamic items like metrics and labels.
The autocompletion dropdown includes documentation for the suggested items where available.
### Metrics browser
The metrics browser locates metrics and selects relevant labels to help you build basic queries.
When you click **Metrics browser** in `Code` mode, it displays all available metrics and labels.
If supported by your Prometheus instance, each metric also displays its `HELP` and `TYPE` as a tooltip.
{{< figure src="/static/img/docs/prometheus/metric-browser.png" max-width="500px" class="docs-image--no-shadow" caption="Metrics browser" >}}
When you select a metric under Step 1, the browser narrows down the available labels to show only the ones applicable to the metric.
You can then select one or more labels shown in Step 2.
Select one or more values in Step 3 for each label to tighten your query scope.
In Step 4, you can select **Use query** to run the query, **Use as rate query** to add the rate operation to your query (`$__rate_interval`), **Validate selector** to verify the selector is valid and show the number of series found, or **Clear** to clear your selections and start over.
{{% admonition type="note" %}}
If you do not remember a metric name, you can also select a few labels to narrow down the list, then find relevant label values.
{{% /admonition %}}
All lists in the metrics browser have a search field above them to quickly filter for metrics or labels that match a certain string.
The values section has only one search field, and its filtering applies to all labels to help you find values across labels once selected.
For example, among your labels `app`, `job`, `job_name` only one might have the value you are looking for.
Once you are satisfied with your query, click **Run query**.

@ -13,7 +13,7 @@ labels:
- cloud
- enterprise
- oss
menuTitle: Template variables
menuTitle: Prometheus template variables
title: Prometheus template variables
weight: 400
refs:
@ -42,13 +42,13 @@ refs:
# Prometheus template variables
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables. Grafana refers to such variables as **template** variables.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the displayed data.
For an introduction to templating and template variables, see [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
For an introduction to templating and template variables, refer to [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
## Use query variables
You have the option to use several different variable types, but variables of the type `Query` will query Prometheus for a list of metrics, labels, label values, a query result or a series.
Grafana supports several types of variables, but Query variables are specifically used to query Prometheus. They can return a list of metrics, labels, label values, query results, or series.
Select a Prometheus data source query type and enter the required inputs:
@ -61,18 +61,18 @@ Select a Prometheus data source query type and enter the required inputs:
| `Series query` | `metric`, `label` or both | Returns a list of time series associated with the entered data. | /api/v1/series |
| `Classic query` | classic query string | Deprecated, classic version of variable query editor. Enter a string with the query type using a syntax like the following: `label_values(<metric>, <label>)` | all |
For details on _metric names_, _label names_, and _label values_, refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
For details on `metric names`, `label names`, and `label values`, refer to the [Prometheus documentation](http://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
### Query options
Under the query variable type, you can set the following query options:
With the query variable type, you can set the following query options:
| Option | Description |
| --------------------- | ------------------------------------------------------------------------------------------------------- |
| **Data source** | Select your data source from the dropdown list. |
| **Data source** | Select your data source from the drop-down list. |
| **Select query type** | Options are `default`, `value` and `metric name`. Each query type hits a different Prometheus endpoint. |
| **Regex** | Optional, if you want to extract part of a series name or metric node segment. |
| **Sort** | Default is `disabled`. Options include `alphabetical`, `numerical` and `alphabetical case-sensitive`. |
| **Sort** | Default is `disabled`. Options include `alphabetical`, `numerical`, and `alphabetical case-sensitive`. |
| **Refresh** | When to update the values for the variable. Options are `On dashboard load` and `On time range change`. |
### Selection options
@ -85,34 +85,41 @@ The following selection options are available:
### Use interval and range variables
You can use some global built-in variables in query variables, for example, `$__interval`, `$__interval_ms`, `$__range`, `$__range_s` and `$__range_ms`.
For details, see [Global built-in variables](ref:add-template-variables-global-variables).
You can use global built-in variables in query variables, including the following:
- `$__interval`
- `$__interval_ms`
- `$__range`
- `$__range_s`
- `$__range_ms`
For details, refer to [Global built-in variables](ref:add-template-variables-global-variables).
The `label_values` function doesn't support queries, so you can use these variables in conjunction with the `query_result` function to filter variable queries.
Make sure to set the variable's `refresh` trigger to be `On Time Range Change` to get the correct instances when changing the time range on the dashboard.
Configure the variable’s `refresh` setting to `On Time Range Change` to ensure it dynamically queries and displays the correct instances when the dashboard time range is modified.
**Example:**
Populate a variable with the busiest 5 request instances based on average QPS over the time range shown in the dashboard:
Populate a variable with the top 5 busiest request instances ranked by average QPS over the dashboard's selected time range:
```
Query: query_result(topk(5, sum(rate(http_requests_total[$__range])) by (instance)))
query_result(topk(5, sum(rate(http_requests_total[$__range])) by (instance)))
Regex: /"([^"]+)"/
```
Populate a variable with the instances having a certain state over the time range shown in the dashboard, using `$__range_s`:
```
Query: query_result(max_over_time(<metric>[${__range_s}s]) != <state>)
query_result(max_over_time(<metric>[${__range_s}s]) != <state>)
Regex:
```
## Use `$__rate_interval`
We recommend using `$__rate_interval` in the `rate` and `increase` functions instead of `$__interval` or a fixed interval value.
Because `$__rate_interval` is always at least four times the value of the Scrape interval, it avoid problems specific to Prometheus.
Grafana recommends using `$__rate_interval` with the `rate` and `increase` functions instead of `$__interval` or a fixed interval value.
Since `$__rate_interval` is always at least four times the scrape interval, it helps avoid issues specific to Prometheus, such as gaps or inaccuracies in query results.
For example, instead of using:
For example, instead of using the following:
```
rate(http_requests_total[5m])
@ -124,20 +131,28 @@ or:
rate(http_requests_total[$__interval])
```
We recommend that you use:
Use the following:
```
rate(http_requests_total[$__rate_interval])
```
The value of `$__rate_interval` is defined as
<!-- The value of `$__rate_interval` is defined as
*max(`$__interval` + *Scrape interval*, 4 \* *Scrape interval*)*,
where _Scrape interval_ is the "Min step" setting (also known as `query*interval`, a setting per PromQL query) if any is set.
Otherwise, Grafana uses the Prometheus data source's "Scrape interval" setting.
Otherwise, Grafana uses the Prometheus data source's `scrape interval` setting. -->
The value of `$__rate_interval` is calculated as:
```
max($__interval + scrape_interval, 4 * scrape_interval)
```
Here, `scrape_interval` refers to the `min step` setting (also known as `query_interval`) specified per PromQL query, if set. If not, Grafana falls back to the Prometheus data source’s scrape interval setting.
The "Min interval" setting in the panel is modified by the resolution setting, and therefore doesn't have any effect on _Scrape interval_.
The `min interval` setting in the panel is modified by the resolution setting, and therefore doesn't have any effect on `scrape interval`.
For details, refer to the [Grafana blog](/blog/2020/09/28/new-in-grafana-7.2-__rate_interval-for-prometheus-rate-queries-that-just-work/).
For details, refer to the Grafana blog [$\_\_rate_interval for Prometheus rate queries that just work](https://grafana.com/blog/2020/09/28/new-in-grafana-7.2-__rate_interval-for-prometheus-rate-queries-that-just-work/).
## Choose a variable syntax
@ -146,9 +161,8 @@ The Prometheus data source supports two variable syntaxes for use in the **Query
- `$<varname>`, for example `rate(http_requests_total{job=~"$job"}[$_rate_interval])`, which is easier to read and write but does not allow you to use a variable in the middle of a word.
- `[[varname]]`, for example `rate(http_requests_total{job=~"[[job]]"}[$_rate_interval])`
If you've enabled the _Multi-value_ or _Include all value_ options, Grafana converts the labels from plain text to a regex-compatible string, which requires you to use `=~` instead of `=`.
If you've enabled the `Multi-value` or `Include all value` options, Grafana converts the labels from plain text to a regex-compatible string, which requires you to use `=~` instead of `=`.
## Use the ad hoc filters variable type
Prometheus supports the special [ad hoc filters](ref:add-template-variables-add-ad-hoc-filters) variable type, which you can use to specify any number of label/value filters on the fly.
These filters are automatically applied to all your Prometheus queries.
Prometheus supports the special [ad hoc filters](ref:add-template-variables-add-ad-hoc-filters) variable type, which allows you to dynamically apply label/value filters across your dashboards. These filters are automatically added to all Prometheus queries, allowing dynamic filtering without modifying individual queries.

@ -1,73 +0,0 @@
---
labels:
products:
- cloud
- enterprise
- oss
title: Metrics Drilldown
aliases:
- ../explore-metrics/ # /docs/grafana/latest/explore/explore-metrics/
canonical: https://grafana.com/docs/grafana/latest/explore/simplified-exploration/metrics/
description: Grafana Metrics Drilldown lets you browse Prometheus-compatible metrics using an intuitive, queryless experience.
weight: 200
---
# Grafana Metrics Drilldown
Grafana Metrics Drilldown is a query-less experience for browsing **Prometheus-compatible** metrics. Quickly find related metrics with just a few simple clicks, without needing to write PromQL queries to retrieve metrics.
{{< docs/shared source="grafana" lookup="plugins/rename-note.md" version="<GRAFANA_VERSION>" >}}
With Metrics Drilldown, you can:
- Easily segment metrics based on their labels, so you can immediately spot anomalies and identify issues.
- Automatically display the optimal visualization for each metric type (gauge vs. counter, for example) without manual setup.
- Uncover related metrics relevant to the one you're viewing.
- “Explore in a drawer” - overlay additional content on your dashboard without losing your current view.
- View a history of user steps when navigating through metrics and their filters.
- Seamlessly pivot to related telemetry, including log data.
{{< docs/play title="Metrics Drilldown" url="https://play.grafana.org/explore/metrics/trail?from=now-1h&to=now&var-ds=grafanacloud-demoinfra-prom&var-filters=&refresh=&metricPrefix=all" >}}
You can access Metrics Drilldown either as a standalone experience or as part of Grafana dashboards.
## Standalone experience
To access Metrics Drilldown as a standalone experience:
1. Click the arrow next to **Drilldown** in the Grafana left-side menu and click **Metrics**. You are taken to an overview page that shows recent metrics, bookmarks, and the option to select a new metric exploration.
1. To get started with a new exploration, click **Let's start!**.
1. Select **Prometheus** or any Prometheus-compatible data source available in the drop-down menu under **Data source**.
1. Click **+ Add label** to select a label-value pair from the drop-down menu. You can add multiple label-value pairs. A label type appears above the selected label with a drop-down list of options from which to choose. For example, if you select the label `container` a drop-down list of available containers appears.
1. You can also search for metrics using keywords under **Search metrics** in the search bar.
1. Use the time picker to select a date and time range from the drop-down menu or use an absolute time range.
1. Click the down arrow next to the **Refresh** icon to set a refresh rate from the drop-down menu. The default is `Off`.
The **History** button in the upper left corner tracks every step navigating through metric exploration.
![show metrics explore overview](/media/metrics-explore/metrics-drilldown-overview.png)
### Metrics exploration
To further explore a metric, click **Select** in the upper right corner of the metric visualization.
![show select box](/media/metrics-explore/select-metric.png)
- The **Overview** tab provides a description for each metric, as well as the metric `type` and `unit` associated with the metric. It also provides a list of labels associated with the metric. Click on any label to view drill-down visualizations.
- The **Breakdown** tab depicts time series visualizations for each of the label-value pairs for the selected metric. You can further drill down on each label and click **Add to filter** to add the label/value pair into your filters. You can also change the **View** from grid to rows.
- The **Related metrics** tab depicts related metrics with relevant key words. You can repeat the drill down process for any related metric. Toggle **Show previews** to preview visualizations.
After you have gathered your metrics exploration data you can:
- Click the **Open in Explore** icon on the right side to open the graph in Explore, where you can modify the query or add the graph to a dashboard or incident.
- Click the **Copy URL** icon on the right side to copy the metric drill down URL to the clipboard so it can be shared.
- Click the **Star** icon on the right side to bookmark and save the metrics exploration.
## Dashboard experience
To access Metrics Drilldown via a dashboard:
1. Navigate to your dashboard.
1. Select a time series panel.
1. Click the panel menu in the upper right and select **Metrics Drilldown**. If there are multiple metrics, click on the one you want to explore.
1. You see a slide out drawer with the Metrics Experience, starting with the drill down. You can access the standalone experience by clicking **Open** in the upper right.

@ -1,9 +1,4 @@
---
_build:
list: false
noindex: true
cascade:
noindex: true
description: Overview of Observability as Code including description, key features, and explanation of benefits.
keywords:
- observability
@ -19,22 +14,60 @@ labels:
- oss
title: Observability as Code
weight: 100
cards:
items:
- title: Get started
height: 24
href: ./get-started/
description: Learn about how you can use Observability as Code.
- title: Grafana CLI
height: 24
href: ./grafana-cli/
description: Grafana CLI (`grafanactl`) is a command-line tool designed to simplify interaction with Grafana instances. You can authenticate, manage multiple environments, and perform administrative tasks through Grafana’s REST API, all from the terminal.
- title: JSON schema v2
height: 24
href: ./schema-v2/
description: Grafana dashboards are represented as JSON objects that store metadata, panels, variables, and settings. Observability as Code works with all versions of the JSON model, and it's fully compatible with version 2.
- title: Foundation SDK
height: 24
href: ./foundation-sdk/
description: The Grafana Foundation SDK is a set of tools, types, and libraries that let you define Grafana dashboards and resources using strongly typed code.
- title: Git Sync (experimental)
height: 24
href: ./provision-resources/intro-git-sync/
description: Git Sync is an experimental feature that lets you store your dashboard files in a GitHub repository and synchronize those changes with your Grafana instance.
- title: File provisioning (experimental)
height: 24
href: ./provision-resources/
description: File provisioning in Grafana lets you include resources, including folders and dashboard JSON files, that are stored in a local file system.
title_class: pt-0 lh-1
hero:
title: Observability as Code
description: Using Observability as Code, you can version, automate, and scale Grafana configurations, including dashboards and observability workflows.
height: 110
level: 1
width: 110
---
# Observability as Code
{{< docs/hero-simple key="hero" >}}
---
## Overview
Observability as Code lets you apply code management best practices to your observability resources.
Using Observability as Code, you can version, automate, and scale Grafana configurations, including dashboards and observability workflows.
By representing Grafana resources as code, you can integrate them into existing infrastructure-as-code workflows and apply standard development practices.
Observability as Code provides more control over configuration. Instead of manually configuring dashboards or settings through the Grafana UI, you can:
- **Write configurations in code:** Define dashboards in JSON or other supported formats.
- **Sync your Grafana setup to GitHub:** Track changes, collaborate, and roll back updates using Git and GitHub, or other remote sources.
- **Automate with CI/CD:** Integrate Grafana directly into your development and deployment pipelines.
- **Standardize workflows:** Ensure consistency across your teams by using repeatable, codified processes for managing Grafana resources.
- Write configurations in code: Define dashboards in JSON or other supported formats.
- Sync your Grafana setup to GitHub: Track changes, collaborate, and roll back updates using Git and GitHub, or other remote sources.
- Automate with CI/CD: Integrate Grafana directly into your development and deployment pipelines.
- Standardize workflows: Ensure consistency across your teams by using repeatable, codified processes for managing Grafana resources.
## Explore
{{< section depth=5 >}}
{{< card-grid key="cards" type="simple" >}}
<!-- Hiding this part of the doc because the rest of the docs aren't released yet

@ -14,7 +14,7 @@ weight: 250
# Get started with the Grafana Foundation SDK
The [Grafana Foundation SDK](https://github.com/grafana/grafana-foundation-sdk) is a set of tools, types, and libraries that enable you to define Grafana dashboards and resources using strongly typed code. By writing your dashboards as code, you can:
The [Grafana Foundation SDK](https://github.com/grafana/grafana-foundation-sdk) is a set of tools, types, and libraries that let you define Grafana dashboards and resources using strongly typed code. By writing your dashboards as code, you can:
- **Leverage strong typing:** Catch errors at compile time, ensuring more reliable configurations.
- **Enhance version control:** Track changes seamlessly using standard version control systems like Git.

@ -17,62 +17,72 @@ weight: 100
# Get started with Observability as Code
Simply put, with Observability as Code, you can manage Grafana resources.
You can write code that describes what you want the dashboard to do, rather than manipulate it via the UI.
Grafana provides a suite of tools for **Observability as Code** to help you manage your Grafana resources programmatically and at scale. This approach lets you define dashboards, data sources, and other configurations in code, enabling version control, automated testing, and reliable deployments through CI/CD pipelines.
Observability as Code lets you manage dashboards, resources, and configurations programmatically, leveraging powerful tools for automation and standardization.
Historically, managing Grafana as code involved various community and Grafana Labs tools, but lacked a single, cohesive story. Grafana 12 introduces foundational improvements, including new versioned APIs and official tooling, to provide a clearer path forward.
## Get started with Observability as Code
## Grafana CLI (`grafanactl`)
<!--
1. [**Understand the Dashboard Schemas**](json-models/)
Use the official command-line tool, `grafanactl`, to interact with your Grafana instances and manage resources via the new APIs.
- Learn about the Dashboard JSON models, which introduces clearer separation of properties, improved layouts, and metadata management.
- Review examples of JSON definitions for dashboards to get familiar with the structure and fields.
- It's the recommended tool for automation and direct API interaction, suitable for CI/CD pipelines and local development or free-form tasks. It supports pulling/pushing configurations from remote instances, validating configurations, and more.
- `grafanactl` works across all environments for Grafana OSS, Enterprise, and Cloud.
1. [**Understand the Foundation SDK**](foundation-sdk)
Refer to the [Grafana CLI (`grafanactl`)](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/grafana-cli) documentation for more information.
- Learn about a toolkit for programmatically creating and managing Grafana dashboards and resources with reusable components and streamlined workflows.
-->
## Git Sync
1. [**Set up Git Sync**](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
For an integrated, UI-driven Git workflow focused on dashboards, explore Git Sync.
- Configure Git repositories to store your dashboard JSON files.
- Understand best practices for version control, including collaboration through pull requests and rollbacks.
- Edit your JSON files in GitHub and then sync with Grafana.
- Connect folders or entire Grafana instances directly to a GitHub repository to synchronize dashboard definitions, enabling version control, branching, and pull requests directly from Grafana.
- Git Sync offers a simple, out-of-the-box approach for managing dashboards as code.
{{< admonition type="note" >}}
Git Sync is an **experimental feature** in Grafana 12, available in Grafana OSS and Enterprise [nightly releases](https://grafana.com/grafana/download/nightly). It is not yet available in Grafana Cloud.
{{< /admonition >}}
1. [**Manage dashboard deployments from GitHub**](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
Refer to the [Git Sync documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/) to learn more.
- Integrate dashboards into CI/CD pipelines using tools like GitHub Actions.
- Leverage provisioning features in Grafana to automate updates and deployment of dashboards.
<!--
## Direct API usage
1. **Explore additional tools and libraries for working with Observability as Code**
For maximum flexibility, advanced use cases, or building custom tooling, you can interact directly with the underlying versioned APIs.
- [**Grafanactl**](grafanactl)
- This approach requires handling HTTP requests and responses but provides complete control over resource management.
- `grafanactl`, Git Sync, and the Foundation SDK are all built on top of these APIs.
- To understand Dashboard Schemas accepted by the APIs, refer to the [JSON models documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/schema-v2/).
- Use a command-line tool for simplifying the management of Grafana resources.
Refer to the [Grafana APIs](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/developers/http_api/apis/) documentation for more information.
- [**Terraform**](infrastructure-as-code/terraform/)
## Foundation SDK
To programmatically define your Grafana resources (like dashboards or alerts) using familiar programming languages, use Foundation SDK.
- Define resources using strongly typed builders in languages like Go, TypeScript, Python, Java, and PHP.
- Avoid crafting complex JSON manually and integrate resource generation into your existing development workflows.
- Catch errors at compile time and easily integrate resource generation into your CI/CD pipelines.
- Use in conjunction with `grafanactl` to push your programmatically generated resources.
Refer to the [Foundation SDK](../foundation-sdk) documentation for more information.
## Additional Observability as Code tools
If you're already using established Infrastructure as Code or other configuration management tools, Grafana offers integrations to manage resources within your existing workflows.
- [Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/)
- Use the Grafana Terraform provider to manage dashboards, alerts, and more.
- Understand how to define and deploy resources using HCL/JSON configurations.
- [**Ansible**](infrastructure-as-code/ansible/)
- [Ansible](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/ansible/)
- Learn to use the Grafana Ansible collection to manage Grafana Cloud resources, including folders and cloud stacks.
- Write playbooks to automate resource provisioning through the Grafana API.
- [**Grafana Operator**](./infrastructure-as-code/grafana-operator/_index.md)
- [Grafana Operator](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/grafana-operator/)
- Utilize Kubernetes-native management with the Grafana Operator.
- Manage dashboards, folders, and data sources via Kubernetes Custom Resources.
- Integrate with GitOps workflows for seamless version control and deployment.
-->
## Explore additional Observability as Code tools
- [**Crossplane:**](https://github.com/grafana/crossplane-provider-grafana) Manage Grafana resources using Kubernetes manifests with the Grafana Crossplane provider.
- [**Grafonnet:**](https://github.com/grafana/grafonnet) Grafonnet is a Jsonnet library for generating Grafana dashboard JSON definitions programmatically. It is currently in the process of being deprecated.
- [**Grizzly:**](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/grizzly/dashboards-folders-datasources/) Grizzly is a command-line tool that simplifies managing Grafana resources using Kubernetes-inspired YAML syntax. It is currently in the process of being deprecated.
- [Crossplane](https://github.com/grafana/crossplane-provider-grafana) lets you manage Grafana resources using Kubernetes manifests with the Grafana Crossplane provider.
- [Grafonnet](https://github.com/grafana/grafonnet) is a Jsonnet library for generating Grafana dashboard JSON definitions programmatically. It is currently in the process of being deprecated.
- [Grizzly](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/grizzly/dashboards-folders-datasources/) is a deprecated command-line tool that simplifies managing Grafana resources using Kubernetes-inspired YAML syntax.

@ -1,9 +1,4 @@
---
_build:
list: false
noindex: true
cascade:
noindex: true
description: Overview of Grafana CLI, a command line tool for managing Grafana resources as code.
keywords:
- observability

@ -1,9 +1,4 @@
---
_build:
list: false
noindex: true
cascade:
noindex: true
description: Learn more about the supported workflows and use cases for Grafana CLI
keywords:
- workflows

@ -1,9 +1,4 @@
---
_build:
list: false
noindex: true
cascade:
noindex: true
description: Installation guide for Grafana CLI, a command line tool for managing Grafana Observability as Code
keywords:
- configuration

@ -1,9 +1,4 @@
---
_build:
list: false
noindex: true
cascade:
noindex: true
description: Configuration guide for Grafana CLI, a command line tool for managing Grafana resources as code.
keywords:
- configuration

@ -12,7 +12,7 @@ labels:
- enterprise
- oss
title: Provision resources and sync dashboards
weight: 100
weight: 300
---
# Provision resources and sync dashboards
@ -21,10 +21,6 @@ weight: 100
Provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. This feature isn't available in Grafana Cloud.
{{< /admonition >}}
{{< section depth="5" >}}
<hr />
Using Provisioning, you can configure how to store your dashboard JSON files in either GitHub repositories using Git Sync or a local path.
Of the two experimental options, Git Sync is the recommended method for provisioning your dashboards. You can synchronize any new dashboards and changes to existing dashboards to your configured GitHub repository.

@ -19,15 +19,6 @@ weight: 200
Local file provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature isn't available in Grafana Cloud.
{{< /admonition >}}
- [Provision resources and sync dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/)
- [Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/)
- [Set up Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
- [Set up file provisioning](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/)
- [Work with provisioned dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
<hr />
File provisioning in Grafana lets you include resources, including folders and dashboard JSON files, that are stored in a local file system.
This page explains how to set up local file provisioning.

@ -19,15 +19,6 @@ weight: 100
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. Git Sync isn't available in Grafana Cloud.
{{< /admonition >}}
- [Provision resources and sync dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/)
- [Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/)
- [Set up Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
- [Set up file provisioning](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/)
- [Work with provisioned dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
<hr />
Git Sync lets you manage Grafana dashboards as code by storing dashboards JSON files and folders in a remote GitHub repository.
Alternatively, you can configure a local file system instead of using GitHub.
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for information.

@ -19,15 +19,6 @@ weight: 100
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. Git Sync isn't available in Grafana Cloud.
{{< /admonition >}}
- [Provision resources and sync dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/)
- [Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/)
- [Set up Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
- [Set up file provisioning](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/)
- [Work with provisioned dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
<hr />
Using Git Sync, you can:
- Introduce a review process for creating and modifying dashboards

@ -19,15 +19,6 @@ weight: 300
Git Sync and File path provisioning an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana. These features aren't available in Grafana Cloud.
{{< /admonition >}}
- [Provision resources and sync dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/)
- [Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/)
- [Set up Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
- [Set up file provisioning](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/)
- [Work with provisioned dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
<hr />
Using Provisioning, you can choose to store your dashboard JSON files in either GitHub repositories using Git Sync or a local file path.
For more information, refer to the [Dashboards](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/) documentation.

@ -18,15 +18,6 @@ weight: 400
# Manage provisioned repositories with Git Sync
- [Provision resources and sync dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/)
- [Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/)
- [Set up Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/)
- [Set up file provisioning](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/)
- [Work with provisioned dashboards](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/)
<hr />
After you have set up Git Sync, you can synchronize dashboards and changes to existing dashboards to your configured GitHub repository.
If you push a change in the repository, those changes are mirrored in your Grafana instance.

@ -18,10 +18,10 @@ weight: 200
# Dashboard JSON schema v2
Grafana dashboards are represented as JSON objects that store metadata, panels, variables, and settings.
{{< docs/experimental product="Dashboard JSON schema v2" featureFlag="`dashboardNewLayouts`" >}}
Grafana dashboards are represented as JSON objects that store metadata, panels, variables, and settings.
Observability as Code works with all versions of the JSON model, and it's fully compatible with version 2.
## Before you begin

@ -0,0 +1,206 @@
---
aliases:
labels:
products:
- cloud
- enterprise
- oss
menuTitle: SQL expressions
title: SQL expressions
description: Manipulate and transform data in Grafana using SQL expressions.
weight: 45
refs:
expressions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/expression-queries/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/expression-queries/
---
# SQL expressions
{{< docs/private-preview product="SQL expressions" >}}
SQL Expressions are server-side expressions that manipulate and transform the results of data source queries using MySQL-like syntax. They allow you to easily query and transform your data after it has been queried, using SQL, which provides a familiar and powerful syntax that can handle everything from simple filters to highly complex, multi-step transformations.
In Grafana, a server-side expression is a way to transform or calculate data after it has been retrieved from the data source, but before it is sent to the frontend for visualization. Grafana evaluates these expressions on the server, not in the browser or at the data source.
For general information on Grafana expressions, refer to [Write expression queries](ref:expressions).
![Example of a SQL expression](/media/docs/sql-expressions/sql-expressions-example-1.png)
## Before you begin
- Enable SQL expressions under the feature toggle `sqlExpressions`.
- If you self-host Grafana, you can find feature toggles in the configuration file `grafana.ini`.
```
[feature_toggles]
enable = sqlExpressions
```
- If you are using Grafana Cloud, contact [Support](https://grafana.com/help/) to enable this feature.
## Transform data with SQL expressions
SQL expressions allow you to:
- Shape, transform, and modify query results without changing the original query.
- JOIN data from multiple tables.
- Create alerts or recording rules based on transformed data.
- Perform final-stage modifications to datasets, including:
- Show, hide, or rename columns.
- Filter rows based on conditions.
- Aggregate data (for example: sum, average, count).
- Write subqueries and Common Table Expressions (CTEs) to support more complex logic:
- **Subqueries** are nested queries used for filtering, calculations, or transformations.
- **CTEs** are temporary named result sets that help make complex queries more readable and reusable.
A key capability of SQL expressions is the ability to JOIN data from multiple tables. This allows users to combine and transform data in a predictable, user-friendly way—even for complex use cases. You can JOIN data from an unlimited number of data source queries.
To work with SQL expressions, you must use data from a backend data source. In Grafana, a backend data source refers to a data source plugin or integration that communicates with a database, service, or API through the Grafana server, rather than directly from the browser (frontend).
## Compatible data sources
The following are compatible data sources:
**Full support:** All query types for each data source are supported.
- Elasticsearch
- MySQL
- Loki
- Graphite
- Google Sheets
- Amazon Athena
**Partial support:** The following data sources offer limited or conditional support. Some allow different types of queries, depending on the service being accessed. For example, Azure Monitor can query multiple services, each with its own query format. In some cases, you can also change the query type within a panel.
- InfluxDB
- Infinity
- Azure Monitor
- TestData
- Tempo
- Prometheus
- Cloudwatch
- GitHub
- BigQuery
## Create SQL expressions
To create a SQL expression, complete the following steps:
1. Navigate to **Dashboards** in the left-side menu.
1. Select a dashboard and open a dashboard panel.
1. Click the ellipsis in the upper right and select **Edit** .
1. Click **+ Expression**.
1. Select **SQL** from the drop-down.
After you have added a SQL expression, you can select from other data source queries by referencing the RefIDs of the queries in your SQL expression as if they were tables in a SQL database.
![Using the RefID](/media/docs/sql-expressions/using-the-RefID.png)
## Workflow to build SQL expressions
Use the following workflow to create a SQL expression:
1. **Build your base queries.** Create the individual query and give it a meaningful name. Create the queries (A, B, etc.) that provide the data you want to combine or transform using SQL Expressions.
1. **Hide your base queries.** Click the **👁 Eye icon** next to each base query to hide them from visualization. This keeps your panel clean while still making the data available to the SQL Expression.
1. **Switch to table view**. Set the panel visualization to **Table** to inspect and review the structure and output of your SQL expression as you build and refine it.
1. **Add a SQL Expression**. Add a new query and add select SQL Expression as its type.
**Inspect inputs**. Start with simple test queries to understand the shape of your input frames.
```sql
SELECT * FROM A LIMIT 10.
```
This lets you see the available columns and sample rows from `query A`. Repeat this for each input query you want to use (e.g., `SELECT * FROM B LIMIT 10`).
1. **Inspect your data**. Repeat this for each input query to understand the column structure and data types you're working with.
```sql
SELECT * FROM <B, C, D, etc> LIMIT 10
```
1. **Construct the SQL expression.** Once you understand your data, you can write your SQL expression to join, filter, or otherwise transform the data.
1. **Validate and iterate**. Click **Refresh** every time you update your SQL query to re-evaluate and see the updated result.
When selecting a visualization type, **ensure your SQL expression returns data in the required shape**. For example, time series panels require a column with a time field (e.g., timestamp) and a numeric value column (e.g., \_\_value\_\_). If the output is not shaped correctly, your visualization may appear empty or fail to render.
The SQL expression workflow in Grafana is designed with the following behaviors:
- **Unhidden queries are visualized automatically.** If an input query is not hidden, Grafana will attempt to render it alongside your SQL expression. This can clutter the output, especially in table visualizations.
- **SQL expression results may not be immediately visible.** You might need to use the data frame selector (dropdown at the bottom of the table panel) to switch between the raw query and the SQL expression result.
- **Non-tabular or incorrectly shaped data will not render in certain panels.** Visualizations such as graphs or gauges require properly structured data. Mismatched formats will result in rendering issues or missing data.
For data to be used in SQL expressions, it must be in a **tabular format**, specifically the **FullLong format**. This means all relevant data is contained within a single table, with values such as metric labels stored as columns and individual cells. Because not all data sources return results in this format by default, Grafana will automatically convert compatible query results to FullLong format when they are referenced in a SQL expression.
## SQL conversion rules
When a RefID is referenced within a SQL statement (e.g., `SELECT * FROM A`), the system invokes a distinct SQL conversion process.
The SQL conversion path:
- The query result is treated as a single data frame, without labels, and is mapped directly to a tabular format.
- If the frame type is present and is either numeric, wide time series, or multi-frame time series (for example, labeled formats), Grafana automatically converts the data into a table structure.
## Known limitations
- Currently, only one SQL expression is supported per panel or alert.
- Grafana supports certain data sources. Refer to [compatible data sources](#compatible-data-sources) for a current list.
## Supported data source formats
Grafana supports three types of data source response formats:
1. **Single Table-like Frame**:
This refers to data returned in a standard tabular structure, where all values are organized into rows and columns, similar to what you'd get from a SQL query.
- **Example**: Any query against a SQL data source (e.g., PostgreSQL, MySQL) with the format set to Table.
2. **Dataplane: Time Series Format**:
This format represents time series data with timestamps and associated values. It is typically returned from monitoring data sources.
- **Example**: Prometheus or Loki Range Queries (queries that return a set of values over time).
3. **Dataplane: Numeric Long Format**:
This format is used for point-in-time (instant) metric queries that return a single value (or a set of values) at a specific moment.
- **Example**: Prometheus or Loki Instant Queries (queries that return the current value of a metric).
For more information on Dataplane formats, refer to [Grafana Dataplane Documentation](https://grafana.com/developers/dataplane).
The following non-tabular formats are automatically converted to a tabular format (`FullLong`) when used in SQL expressions:
- **Time Series Wide**: Label keys become column names.
- **Time Series Multi**: Label values become the values in each row (or null if a label is missing).
- **Numeric Wide**: The `value` column contains the numeric metric value.
- **Numeric Multi**: If a display name exists, it will appear in the `display_name` column.
During conversion:
- Label keys become column names.
- Label values populate the corresponding rows (null if a label is missing).
- The `value` column contains the numeric metric.
- If available, the `display_name` column contains a human-readable name.
- The `metric_name` column stores the raw metric identifier.
- For time series data, Grafana includes a `time` column with timestamps
## SQL expressions examples
1. Create the following Prometheus query:
```promql
sum(
rate(go_cpu_classes_gc_total_cpu_seconds_total{namespace=~".*(namespace).*5."}[$__rate_interval])
) by (namespace)
```
The panel displays the CPU usage by Go garbage collection (GC) over time, broken down by namespace.
![Example using a Prometheus query](/media/docs/sql-expressions/sql-expressions-prom-query-example.png)
2. Add the SQL expression `SELECT * from A`. After you add a SQL expression that selects from RefID A, Grafana converts it to a table response:
![Add the SQL expression](/media/docs/sql-expressions/add-the-sql-expression.png)

@ -0,0 +1,232 @@
---
aliases:
- ../../../auth/saml/
- ../../../enterprise/configure-saml/
- ../../../enterprise/saml/
- ../../../enterprise/saml/about-saml/
- ../../../enterprise/saml/configure-saml/
- ../../../enterprise/saml/enable-saml/
- ../../../enterprise/saml/set-up-saml-with-okta/
- ../../../enterprise/saml/troubleshoot-saml/
- ../../../saml/
description: Learn how to configure SAML authentication in Grafana's configuration
file.
labels:
products:
- cloud
- enterprise
menuTitle: SAML
title: Configure SAML authentication in Grafana
weight: 500
---
# SAML authentication in Grafana
{{< admonition type="note" >}}
Available in [Grafana Enterprise](/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-enterprise/) and [Grafana Cloud](/docs/grafana-cloud).
{{< /admonition >}}
SAML authentication integration allows your Grafana users to log in by using an external SAML 2.0 Identity Provider (IdP). To enable this, Grafana becomes a Service Provider (SP) in the authentication flow, interacting with the IdP to exchange user information.
You can configure SAML authentication in Grafana through one of the following methods:
- [Configure SAML using Grafana configuration file](#configure-saml-using-the-grafana-config-file)
- Configure SAML using the [SSO Settings API](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/developers/http_api/sso-settings/)
- Configure SAML using the [SAML user interface](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/saml-ui/)
- Configure SAML using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/<GRAFANA_VERSION>/docs/resources/sso_settings)
If you are using Okta or Azure AD as Identity Provider, see the following documentation for configuration:
- [Configure SAML with Azure AD](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-azuread/)
- [Configure SAML with Okta](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-okta/)
{{< admonition type="note" >}}
The API and Terraform support are available in Public Preview in Grafana v11.1 behind the `ssoSettingsSAML` feature toggle. You must also enable the `ssoSettingsApi` flag.
{{< /admonition >}}
All methods offer the same configuration options. However, if you want to keep all of Grafana authentication settings in one place, use the Grafana configuration file or the Terraform provider. If you are a Grafana Cloud user, you do not have access to Grafana configuration file. Instead, configure SAML through the other methods.
{{< admonition type="note" >}}
Configuration in the API takes precedence over the configuration in the Grafana configuration file. SAML settings from the API will override any SAML configuration set in the Grafana configuration file.
{{< /admonition >}}
## SAML Bindings
Grafana supports the following SAML 2.0 bindings:
- From the Service Provider (SP) to the Identity Provider (IdP):
- `HTTP-POST` binding
- `HTTP-Redirect` binding
- From the Identity Provider (IdP) to the Service Provider (SP):
- `HTTP-POST` binding
## Request Initiation
Grafana supports:
- SP-initiated requests
- IdP-initiated requests
By default, SP-initiated requests are enabled. For instructions on how to enable IdP-initiated logins, see [IdP-initiated Single Sign-On (SSO)](#idp-initiated-single-sign-on-sso).
## Enable SAML authentication in Grafana
To use the SAML integration, in the `auth.saml` section of in the Grafana custom configuration file, set `enabled` to `true`.
Refer to [Configuration](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/) for more information about configuring Grafana.
## Identity provider (IdP) registration
For the SAML integration to work correctly, you need to make the IdP aware of the SP.
The integration provides two key endpoints as part of Grafana:
- The `/saml/metadata` endpoint, which contains the SP metadata. You can either download and upload it manually, or you make the IdP request it directly from the endpoint. Some providers name it Identifier or Entity ID.
- The `/saml/acs` endpoint, which is intended to receive the ACS (Assertion Customer Service) callback. Some providers name it SSO URL or Reply URL.
## Configure SAML using the Grafana configuration file
1. In the `[auth.saml]` section in the Grafana configuration file, set [`enabled`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#enabled-3) to `true`.
2. Configure SAML options:
- Review all [available configuration options](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/saml-configuration-options/)
- For IdP-specific configuration, refer to:
- [Configure SAML with Okta](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-okta/)
- [Configure SAML with Entra ID](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-with-azuread/)
3. Save the configuration file and then restart the Grafana server.
When you are finished, the Grafana configuration might look like this example:
```ini
[server]
root_url = https://grafana.example.com
[auth.saml]
enabled = true
name = My IdP
auto_login = false
private_key_path = "/path/to/private_key.pem"
certificate_path = "/path/to/certificate.cert"
idp_metadata_url = "https://my-org.okta.com/app/my-application/sso/saml/metadata"
assertion_attribute_name = DisplayName
assertion_attribute_login = Login
assertion_attribute_email = Email
assertion_attribute_groups = Group
```
## Assertion mapping
During the SAML SSO authentication flow, Grafana receives the ACS callback. The callback contains all the relevant information of the user under authentication embedded in the SAML response. Grafana parses the response to create (or update) the user within its internal database.
For Grafana to map the user information, it looks at the individual attributes within the assertion. You can think of these attributes as Key/Value pairs (although, they contain more information than that).
Grafana provides configuration options that let you modify which keys to look at for these values. The data we need to create the user in Grafana is Name, Login handle, and email.
### The `assertion_attribute_name` option
`assertion_attribute_name` is a special assertion mapping that can either be a simple key, indicating a mapping to a single assertion attribute on the SAML response, or a complex template with variables using the `$__saml{<attribute>}` syntax. If this property is misconfigured, Grafana will log an error message on startup and disallow SAML sign-ins. Grafana will also log errors after a login attempt if a variable in the template is missing from the SAML response.
**Examples**
```ini
#plain string mapping
assertion_attribute_name = displayName
```
```ini
#template mapping
assertion_attribute_name = $__saml{firstName} $__saml{lastName}
```
## SAML Name ID
The `name_id_format` configuration field specifies the requested format of the NameID element in the SAML assertion.
By default, this is set to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` and does not need to be specified in the configuration file.
The following list includes valid configuration field values:
| `name_id_format` value in the configuration file or Terraform | `Name identifier format` on the UI |
| ------------------------------------------------------------- | ---------------------------------- |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` | Default |
| `urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified` | Unspecified |
| `urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress` | Email address |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` | Persistent |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` | Transient |
## IdP metadata
You also need to define the public part of the IdP for message verification. The SAML IdP metadata XML defines where and how Grafana exchanges user information.
Grafana supports three ways of specifying the IdP metadata.
- Without a suffix `idp_metadata`, Grafana assumes base64-encoded XML file contents.
- With the `_path` suffix, Grafana assumes a path and attempts to read the file from the file system.
- With the `_url` suffix, Grafana assumes a URL and attempts to load the metadata from the given location.
## Maximum issue delay
Prevents SAML response replay attacks and internal clock skews between the SP (Grafana) and the IdP. You can set a maximum amount of time between the SP issuing the AuthnRequest and the SP (Grafana) processing it.
The configuration options is specified as a duration, such as `max_issue_delay = 90s` or `max_issue_delay = 1h`.
## Metadata valid duration
SP metadata is likely to expire at some point, perhaps due to a certificate rotation or change of location binding. Grafana allows you to specify for how long the metadata should be valid. Leveraging the `validUntil` field, you can tell consumers until when your metadata is going to be valid. The duration is computed by adding the duration to the current time.
The configuration option is specified as a duration, such as `metadata_valid_duration = 48h`.
## Allow new user sign up
By default, new Grafana users using SAML authentication will have an account created for them automatically. To decouple authentication and account creation and ensure only users with existing accounts can log in with SAML, set the `allow_sign_up` option to false.
## Configure automatic login
Set `auto_login` option to true to attempt login automatically, skipping the login screen.
This setting is ignored if multiple auth providers are configured to use auto login.
For more information about automatic login behavior and troubleshooting, see [Automatic login](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/#automatic-oauth-login).
```
auto_login = true
```
## Configure allowed organizations
With the [`allowed_organizations`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#allowed_organizations) option you can specify a list of organizations where the user must be a member of at least one of them to be able to log in to Grafana.
To get the list of user's organizations from SAML attributes, you must configure the `assertion_attribute_org` option. This option specifies which SAML attribute contains the list of organizations the user belongs to.
To put values containing spaces in the list, use the following JSON syntax:
```ini
allowed_organizations = ["org 1", "second org"]
```
## Configuring SAML with HTTP-Post binding
If multiple bindings are supported for SAML Single Sign-On (SSO) by the Identity Provider (IdP), Grafana will use the `HTTP-Redirect` binding by default. If the IdP only supports the `HTTP-Post binding` then updating the `content_security_policy_template` (in case `content_security_policy = true`) and `content_security_policy_report_only_template` (in case `content_security_policy_report_only = true`) might be required to allow Grafana to initiate a POST request to the IdP. These settings are used to define the [Content Security Policy (CSP)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy) headers that are sent by Grafana.
To allow Grafana to initiate a POST request to the IdP, update the `content_security_policy_template` and `content_security_policy_report_only_template` settings in the Grafana configuration file and add the identity provider domain to the `form-action` directive. By default, the `form-action` directive is set to `self` which only allows POST requests to the same domain as Grafana. To allow POST requests to the identity provider domain, update the `form-action` directive to include the identity provider domain, for example: `form-action 'self' https://idp.example.com`.
{{< admonition type="note" >}}
For Grafana Cloud instances, please contact Grafana Support to update the `content_security_policy_template` and `content_security_policy_report_only_template` settings of your Grafana instance. Please provide the metadata URL/file of your IdP.
{{< /admonition >}}
## IdP-initiated login
By default, Grafana allows only service provider (SP) initiated logins (when the user logs in with SAML via the login page in Grafana). If you want users to log in into Grafana directly from your identity provider (IdP), set the `allow_idp_initiated` configuration option to `true` and configure `relay_state` with the same value specified in the IdP configuration.
IdP-initiated SSO has some security risks, so make sure you understand the risks before enabling this feature. When using IdP-initiated login, Grafana receives unsolicited SAML responses and can't verify that login flow was started by the user. This makes it hard to detect whether SAML message has been stolen or replaced. Because of this, IdP-initiated login is vulnerable to login cross-site request forgery (CSRF) and man in the middle (MITM) attacks. We do not recommend using IdP-initiated login and keeping it disabled whenever possible.
## Advanced configuration
For advanced configuration and troubleshooting, please refer to the one of the following pages:
- [Configure SAML request signing](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-request-signing/)
- [Configure SAML single logout](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-single-logout/)
- [Configure Organization mapping](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-org-mapping/)
- [Configure Role and Team sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/configure-saml-team-role-mapping/)
- [SAML configuration options](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/saml-configuration-options/)
- [Troubleshooting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/troubleshoot-saml/)

@ -0,0 +1,62 @@
---
aliases:
- ../../../../saml/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure Organisation mapping for SAML
title: Configure Organisation mapping for SAML
weight: 550
---
# Configure organization mapping for SAML
Organization mapping allows you to assign users to particular organization in Grafana depending on attribute value obtained from identity provider.
1. In configuration file, set [`assertion_attribute_org`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_org) to the attribute name you store organization info in. This attribute can be an array if you want a user to be in multiple organizations.
1. Set [`org_mapping`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#org_mapping) option to the comma-separated list of `Organization:OrgId` pairs to map organization from IdP to Grafana organization specified by ID. If you want users to have different roles in multiple organizations, you can set this option to a comma-separated list of `Organization:OrgId:Role` mappings.
For example, use following configuration to assign users from `Engineering` organization to the Grafana organization with ID `2` as Editor and users from `Sales` - to the org with ID `3` as Admin, based on `Org` assertion attribute value:
```ini
[auth.saml]
assertion_attribute_org = Org
org_mapping = Engineering:2:Editor, Sales:3:Admin
```
Starting from Grafana version 11.5, you can use the organization name instead of the organization ID in the `org_mapping` option. Ensure that the organization name you configure matches exactly with the organization name in Grafana, as it is case-sensitive. If the organization name is not found in Grafana, the mapping will be ignored. If the external organization or the organization name contains spaces, use the JSON syntax for the `org_mapping` option:
```ini
org_mapping = ["Org 1:2:Editor", "ExternalOrg:ACME Corp.:Admin"]
```
If one of the mappings contains a `:`, use the JSON syntax and escape the `:` with a backslash:
```ini
# Assign users from "External:Admin" to the organization with name "ACME Corp" as Admin
org_mapping = ["External\:Admin:ACME Corp:Admin"]
```
For example, to assign users from `Engineering` organization to the Grafana organization with name `ACME Corp` as Editor and users from `Sales` - to the org with id `3` as Admin, based on `Org` assertion attribute value:
```ini
[auth.saml]
assertion_attribute_org = Org
org_mapping = ["Engineering:ACME Corp:Editor", "Sales:3:Admin"]
```
You can specify multiple organizations both for the IdP and Grafana:
- `org_mapping = Engineering:2, Sales:2` to map users from `Engineering` and `Sales` to `2` in Grafana.
- `org_mapping = Engineering:2, Engineering:3` to assign `Engineering` to both `2` and `3` in Grafana.
You can use `*` as the SAML Organization if you want all your users to be in some Grafana organizations with a default role:
- `org_mapping = *:2:Editor` to map all users to the organization which ID is `2` in Grafana as Editors.
You can use `*` as the Grafana organization in the mapping if you want all users from a given SAML Organization to be added to all existing Grafana organizations.
- `org_mapping = Engineering:*` to map users from `Engineering` to all existing Grafana organizations.
- `org_mapping = Administration:*:Admin` to map users from `Administration` to all existing Grafana organizations as Admins.

@ -0,0 +1,77 @@
---
aliases:
- ../../../../saml/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure SAML signing and encryption
title: Configure SAML signing and encryption
weight: 530
---
# Configure SAML signing and encryption
Grafana supports signed and encrypted responses, and _only_ supports signed requests.
## Certificate and private key
Commonly, the certificate and key are embedded in the IdP metadata and refreshed as needed by Grafana automatically. However, if your IdP expects signed requests, you must supply a certificate and private key.
The SAML SSO standard uses asymmetric encryption to exchange information between the SP (Grafana) and the IdP. To perform such encryption, you need a public part and a private part. In this case, the X.509 certificate provides the public part, while the private key provides the private part. The private key needs to be issued in a [PKCS#8](https://en.wikipedia.org/wiki/PKCS_8) format.
If you are directly supplying the certificate and key, Grafana supports two ways of specifying both the `certificate` and `private_key`:
- Without a suffix (`certificate` or `private_key`), the configuration assumes you've supplied the base64-encoded file contents.
- With the `_path` suffix (`certificate_path` or `private_key_path`), then Grafana treats the value entered as a path and attempts to read the file from the file system.
{{< admonition type="note" >}}
You can only use one form of each configuration option. Using multiple forms, such as both `certificate` and `certificate_path`, results in an error.
{{< /admonition >}}
Always work with your company's security team on setting up certificates and private keys. If you need to generate them yourself (such as in the short term, for testing purposes, and so on), use the following example to generate your certificate and private key, including the step of ensuring that the key is generated with the [PKCS#8](https://en.wikipedia.org/wiki/PKCS_8) format.
## Signature algorithm
The SAML standard requires digital signatures for security-critical messages such as authentication and logout requests. When you configure the `signature_algorithm` option, Grafana automatically signs these SAML requests using your configured private key and certificate.
### Supported algorithms
- `rsa-sha1`: Legacy algorithm, not recommended for new deployments
- `rsa-sha256`: Recommended for most use cases
- `rsa-sha512`: Strongest security, but may impact performance
### Important considerations
- The signature algorithm must match your IdP configuration exactly
- Mismatched algorithms will cause signature validation failures
- Grafana uses the key and certificate specified in `private_key` and `certificate` options for signing
- We recommend using `rsa-sha256` for new SAML implementations
## Example of private key generation for SAML authentication
An example of how to generate a self-signed certificate and private key that's valid for one year:
```sh
$ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
Base64-encode the cert.pem and key.pem files:
(-w0 switch is not needed on Mac, only for Linux)
```sh
$ base64 -i key.pem -o key.pem.base64
$ base64 -i cert.pem -o cert.pem.base64
```
The base64-encoded values (`key.pem.base64, cert.pem.base64` files) are then used for `certificate` and `private key`.
The key you provide should look like:
```
-----BEGIN PRIVATE KEY-----
...
...
-----END PRIVATE KEY-----
```

@ -0,0 +1,20 @@
---
aliases:
- ../../../../saml/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure SAML single logout
title: Configure SAML single logout
weight: 560
---
# Configure SAML Single Logout
The single logout feature allows users to log out from all applications associated with the current IdP session established via SAML SSO. If the `single_logout` option is set to `true` and a user logs out, Grafana requests IdP to end the user session which in turn triggers logout from all other applications the user is logged into using the same IdP session (applications should support single logout). Conversely, if another application connected to the same IdP logs out using single logout, Grafana receives a logout request from IdP and ends the user session.
{{< admonition type="note" >}}
The improved SLO features, including proper handling of the IdP's SessionIndex, are currently behind the `improvedExternalSessionHandlingSAML` feature toggle. When this feature toggle is enabled, Grafana will correctly handle session-specific logouts. If the feature toggle is not enabled, logging out will end all of the user's sessions.
{{< /admonition >}}

@ -0,0 +1,104 @@
---
aliases:
- ../../../../saml/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure Role and Team sync for SAML
title: Configure Role and Team sync for SAML
weight: 540
---
# Configure team sync for SAML
To use SAML Team sync, set [`assertion_attribute_groups`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_groups) to the attribute name where you store user groups. Then Grafana will use attribute values extracted from SAML assertion to add user into the groups with the same name configured on the External group sync tab.
{{< admonition type="warning" >}}
Grafana requires the SAML groups attribute to be configured with distinct `AttributeValue` elements for each group. Do not include multiple groups within a single `AttributeValue` delimited by a comma or any other character. Failure to do so will prevent correct group parsing. Example:
```xml
<saml2:Attribute ...>
<saml2:AttributeValue ...>admins_group</saml2:AttributeValue>
<saml2:AttributeValue ...>division_1</saml2:AttributeValue>
</saml2:Attribute>
```
{{< /admonition >}}
{{< admonition type="note" >}}
Team Sync allows you sync users from SAML to Grafana teams. It does not automatically create teams in Grafana. You need to create teams in Grafana before you can use this feature.
{{< /admonition >}}
Given the following partial SAML assertion:
```xml
<saml2:Attribute
Name="groups"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="xs:string">admins_group
</saml2:AttributeValue>
<saml2:AttributeValue
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="xs:string">division_1
</saml2:AttributeValue>
</saml2:Attribute>
```
The configuration would look like this:
```ini
[auth.saml]
# ...
assertion_attribute_groups = groups
```
The following `External Group ID`s would be valid for input in the desired team's _External group sync_ tab:
- `admins_group`
- `division_1`
[Learn more about Team Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-team-sync/)
# Configure role sync for SAML
Role sync allows you to map user roles from an identity provider to Grafana. To enable role sync, configure role attribute and possible values for the Editor, Admin, and Grafana Admin roles. For more information about user roles, refer to [Roles and permissions](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
1. In the configuration file, set [`assertion_attribute_role`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_role) option to the attribute name where the role information will be extracted from.
1. Set the [`role_values_none`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_none) option to the values mapped to the `None` role.
1. Set the [`role_values_viewer`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_viewer) option to the values mapped to the `Viewer` role.
1. Set the [`role_values_editor`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_editor) option to the values mapped to the `Editor` role.
1. Set the [`role_values_admin`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_admin) option to the values mapped to the organization `Admin` role.
1. Set the [`role_values_grafana_admin`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_grafana_admin) option to the values mapped to the `Grafana Admin` role.
If a user role doesn't match any of configured values, then the role specified by the `auto_assign_org_role` configuration option will be assigned. If the `auto_assign_org_role` field is not set then the user role will default to `Viewer`.
For more information about roles and permissions in Grafana, refer to [Roles and permissions](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
Example configuration:
```ini
[auth.saml]
assertion_attribute_role = role
role_values_none = none
role_values_viewer = external
role_values_editor = editor, developer
role_values_admin = admin, operator
role_values_grafana_admin = superadmin
```
**Important**: When role sync is configured, any changes of user roles and organization membership made manually in Grafana will be overwritten on next user login. Assign user organizations and roles in the IdP instead.
If you don't want user organizations and roles to be synchronized with the IdP, you can use the `skip_org_role_sync` configuration option.
Example configuration:
```ini
[auth.saml]
skip_org_role_sync = true
```

@ -0,0 +1,126 @@
---
aliases:
- ./saml/#set-up-saml-with-azure-ad
- ../saml/#set-up-saml-with-azure-ad
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure SAML with Entra ID
title: Configure SAML authentication with Entra ID
weight: 570
---
# Configure SAML with Microsoft Entra ID
Grafana supports user authentication through Microsoft Entra ID. This topic shows you how to configure SAML authentication in Grafana with [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id).
{{< admonition type="note" >}}
If an Entra ID user belongs to more than 150 groups, a Graph API endpoint is used instead.
Grafana versions 11.1 and below, do not support fetching the groups from the Graph API endpoint. As a result, users with more than 150 groups will not be able to retrieve their groups. Instead, it is recommended that you use the Azure AD connector.
As of Grafana 11.2, the SAML integration offers a mechanism to retrieve user groups from the Graph API.
Related links:
- [Entra ID SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim)
- [Configure a Graph API application in Entra ID](#configure-a-graph-api-application-in-entra-id)
{{< /admonition >}}
## Before you begin
Ensure you have permission to administer SAML authentication. For more information about roles and permissions in Grafana, refer to [Roles and permissions](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
If you have users that belong to more than 150 groups, configure a registered application to provide an Entra ID Graph API to retrieve the groups. Refer to [Setup Entra ID Graph API applications](#configure-a-graph-api-application-in-azure-ad).
## Generate self-signed certificates
Entra ID requires a certificate to verify the SAML requests' signature. You can generate a private key and a self-signed certificate using the following command (the private key used to sign the requests and the certificate contains the public key for verification):
```sh
$ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
This will generate a `key.pem` and `cert.pem` file that you can use for the `private_key_path` and `certificate_path` configuration options.
## Add Microsoft Entra SAML Toolkit from the gallery
> Taken from https://learn.microsoft.com/en-us/entra/identity/saas-apps/saml-toolkit-tutorial#add-microsoft-entra-saml-toolkit-from-the-gallery
1. Go to the [Azure portal](https://portal.azure.com/#home) and sign in with your Entra ID account.
1. Search for **Enterprise Applications**.
1. In the **Enterprise applications** pane, select **New application**.
1. In the search box, enter **SAML Toolkit**, and then select the **Microsoft Entra SAML Toolkit** from the results panel.
1. Add a descriptive name and select **Create**.
## Configure the SAML Toolkit application endpoints
In order to validate Entra ID users with Grafana, you need to configure the SAML Toolkit application endpoints by creating a new SAML integration in the Entra ID organization.
> For the following configuration, we will use `https://localhost` as the Grafana URL. Replace it with your Grafana URL.
1. In the **SAML Toolkit application**, select **Set up single sign-on**.
1. In the **Single sign-on** pane, select **SAML**.
1. In the Set up **Single Sign-On with SAML** pane, select the pencil icon for **Basic SAML Configuration** to edit the settings.
1. In the **Basic SAML Configuration** pane, click on the **Edit** button and update the following fields:
- In the **Identifier (Entity ID)** field, enter `https://localhost/saml/metadata`.
- In the **Reply URL (Assertion Consumer Service URL)** field, enter `https://localhost/saml/acs`.
- In the **Sign on URL** field, enter `https://localhost`.
- In the **Relay State** field, enter `https://localhost`.
- In the **Logout URL** field, enter `https://localhost/saml/slo`.
1. Select **Save**.
1. At the **SAML Certificate** section, copy the **App Federation Metadata Url**.
- Use this URL in the `idp_metadata_url` field in the `custom.ini` file.
### Generate a client secret
1. In the **Overview** pane, select **Certificates & secrets**.
1. Select **New client secret**.
1. In the **Add a client secret** pane, enter a description for the secret.
1. Set the expiration date for the secret.
1. Select **Add**.
1. Copy the value of the secret. This value is used in the `client_secret` field in the [SAML configuration](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml/saml-configuration-options/).
## Configure a Graph API application in Entra ID
While an Entra ID tenant can be configured in Grafana via SAML, some additional information is only accessible via the Graph API. To retrieve this information, create a new application in Entra ID and grant it the necessary permissions.
> [Entra ID SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim)
> For the following configuration, the URL `https://localhost` will be used as the Grafana URL. Replace it with your Grafana instance URL.
### Create a new App registration
This app registration will be used as a Service Account to retrieve more information about the user from the Entra ID.
1. Go to the [Azure portal](https://portal.azure.com/#home) and sign in with your Entra ID account.
1. In the left-hand navigation pane, select the Microsoft Entra ID service, and then select **App registrations**.
1. Click the **New registration** button.
1. In the **Register an application** pane, enter a name for the application.
1. In the **Supported account types** section, select the account types that can use the application.
1. In the **Redirect URI** section, select Web and enter `https://localhost/login/azuread`.
1. Click the **Register** button.
### Set up permissions for the application
1. In the overview pane, look for **API permissions** section and select **Add a permission**.
1. In the **Request API permissions** pane, select **Microsoft Graph**, and click **Application permissions**.
1. In the **Select permissions** pane, under the **GroupMember** section, select **GroupMember.Read.All**.
1. In the **Select permissions** pane, under the **User** section, select **User.Read.All**.
1. Click the **Add permissions** button at the bottom of the page.
1. In the **Request API permissions** pane, select **Microsoft Graph**, and click **Delegated permissions**.
1. In the **Select permissions** pane, under the **User** section, select **User.Read**.
1. Click the **Add permissions** button at the bottom of the page.
1. In the **API permissions** section, select **Grant admin consent for <your-organization>**.
The following table shows what the permissions look like from the Entra ID portal:
| Permissions name | Type | Admin consent required | Status |
| ---------------- | ----------- | ---------------------- | ------- |
| `Group.Read.All` | Application | Yes | Granted |
| `User.Read` | Delegated | No | Granted |
| `User.Read.All` | Application | Yes | Granted |
{{< figure src="/media/docs/grafana/saml/graph-api-app-permissions.png" caption="Screen shot of the permissions listed in Entra ID for the App registration" >}}

@ -0,0 +1,51 @@
---
aliases:
- ../../../../saml/index/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Configure SAML with Okta
title: Configure SAML authentication with Okta
weight: 580
---
# Configure SAML Okta
Grafana supports user authentication through Okta, which is useful when you want your users to access Grafana using single sign on. This guide will follow you through the steps of configuring SAML authentication in Grafana with [Okta](https://okta.com/). You need to be an admin in your Okta organization to access Admin Console and create SAML integration. You also need permissions to edit Grafana configuration file and restart Grafana server.
## Before you begin
- To configure SAML integration with Okta, create an app integration inside the Okta organization first. [Add app integration in Okta](https://help.okta.com/en/prod/Content/Topics/Apps/apps-overview-add-apps.htm)
- Ensure you have permission to administer SAML authentication. For more information about roles and permissions in Grafana, refer to [Roles and permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
## Set up SAML with Okta
1. Log in to the [Okta portal](https://login.okta.com/).
1. Go to the Admin Console in your Okta organization by clicking **Admin** in the upper-right corner. If you are in the Developer Console, then click **Developer Console** in the upper-left corner and then click **Classic UI** to switch over to the Admin Console.
1. In the Admin Console, navigate to **Applications** > **Applications**.
1. Click **Create App Integration** to start the Application Integration Wizard.
1. Choose **SAML 2.0** as the **Sign-in method**.
1. Click **Create**.
1. On the **General Settings** tab, enter a name for your Grafana integration. You can also upload a logo.
1. On the **Configure SAML** tab, enter the SAML information related to your Grafana instance:
- In the **Single sign on URL** field, use the `/saml/acs` endpoint URL of your Grafana instance, for example, `https://grafana.example.com/saml/acs`.
- In the **Audience URI (SP Entity ID)** field, use the `/saml/metadata` endpoint URL, by default it is the `/saml/metadata` endpoint of your Grafana instance (for example `https://example.grafana.com/saml/metadata`). This could be configured differently, but the value here must match the `entity_id` setting of the SAML settings of Grafana.
- Leave the default values for **Name ID format** and **Application username**.
{{< admonition type="note" >}}
If you plan to enable SAML Single Logout, consider setting the **Name ID format** to `EmailAddress` or `Persistent`. This must match the `name_id_format` setting of the Grafana instance.
{{< /admonition >}}
- In the **ATTRIBUTE STATEMENTS (OPTIONAL)** section, enter the SAML attributes to be shared with Grafana. The attribute names in Okta need to match exactly what is defined within Grafana, for example:
| Attribute name (in Grafana) | Name and value (in Okta profile) | Grafana configuration (under `auth.saml`) |
| --------------------------- | ---------------------------------------------------- | ----------------------------------------- |
| Login | Login - `user.login` | `assertion_attribute_login = Login` |
| Email | Email - `user.email` | `assertion_attribute_email = Email` |
| DisplayName | DisplayName - `user.firstName + " " + user.lastName` | `assertion_attribute_name = DisplayName` |
- In the **GROUP ATTRIBUTE STATEMENTS (OPTIONAL)** section, enter a group attribute name (for example, `Group`, ensure it matches the `asssertion_attribute_groups` setting in Grafana) and set filter to `Matches regex .*` to return all user groups.
1. Click **Next**.
1. On the final Feedback tab, fill out the form and then click **Finish**.

@ -1,797 +0,0 @@
---
aliases:
- ../../../auth/saml/
- ../../../enterprise/configure-saml/
- ../../../enterprise/saml/
- ../../../enterprise/saml/about-saml/
- ../../../enterprise/saml/configure-saml/
- ../../../enterprise/saml/enable-saml/
- ../../../enterprise/saml/set-up-saml-with-okta/
- ../../../enterprise/saml/troubleshoot-saml/
description: Learn how to configure SAML authentication in Grafana's configuration
file.
labels:
products:
- cloud
- enterprise
menuTitle: SAML
title: Configure SAML authentication using the configuration file
weight: 500
---
# Configure SAML authentication using the configuration file
{{< admonition type="note" >}}
Available in [Grafana Enterprise](/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-enterprise/) and [Grafana Cloud](/docs/grafana-cloud).
{{< /admonition >}}
SAML authentication integration allows your Grafana users to log in by using an external SAML 2.0 Identity Provider (IdP). To enable this, Grafana becomes a Service Provider (SP) in the authentication flow, interacting with the IdP to exchange user information.
You can configure SAML authentication in Grafana through one of the following methods:
- The Grafana configuration file
- The API (refer to [SSO Settings API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/sso-settings/)
- The user interface (refer to [Configure SAML authentication using the Grafana user interface](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/saml-ui/)
- The Terraform provider (refer to [Terraform docs](https://registry.terraform.io/providers/grafana/grafana/<GRAFANA_VERSION>/docs/resources/sso_settings))
{{< admonition type="note" >}}
The API and Terraform support are available in Public Preview in Grafana v11.1 behind the `ssoSettingsSAML` feature toggle. You must also enable the `ssoSettingsApi` flag.
{{< /admonition >}}
All methods offer the same configuration options. However, if you want to keep all of Grafana authentication settings in one place, use the Grafana configuration file or the Terraform provider. If you are a Grafana Cloud user, you do not have access to Grafana configuration file. Instead, configure SAML through the other methods.
{{< admonition type="note" >}}
Configuration in the API takes precedence over the configuration in the Grafana configuration file. SAML settings from the API will override any SAML configuration set in the Grafana configuration file.
{{< /admonition >}}
## Supported SAML
The following indicate what Grafana supports.
### Bindings
Grafana supports the following SAML 2.0 bindings:
- From the Service Provider (SP) to the Identity Provider (IdP):
- `HTTP-POST` binding
- `HTTP-Redirect` binding
- From the Identity Provider (IdP) to the Service Provider (SP):
- `HTTP-POST` binding
### Security
Grafana supports signed and encrypted assertions, and does _not_ support encrypted requests.
### Initiation
Grafana supports:
- SP-initiated requests
- IdP-initiated requests
By default, SP-initiated requests are enabled. For instructions on how to enable IdP-initiated logins, refer to [IdP-initiated Single Sign-On (SSO)](#idp-initiated-single-sign-on-sso).
{{< admonition type="note" >}}
It's possible to set up Grafana with SAML authentication using Azure AD. However, if an Azure AD user belongs to more than 150 groups, a Graph API endpoint is shared instead.
Grafana versions 11.1 and below do not support fetching the groups from the Graph API endpoint. As a result, users with more than 150 groups will not be able to retrieve their groups. Instead, it's recommended that you use OIDC/OAuth workflows.
As of Grafana 11.2, the SAML integration offers a mechanism to retrieve user groups from the Graph API.
Related links:
- [Azure AD SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim)
- [Set up SAML with Azure AD](#set-up-saml-with-azure-ad)
- [Configure a Graph API application in Azure AD](#configure-a-graph-api-application-in-azure-ad)
{{< /admonition >}}
### Edit SAML options in the Grafana config file
1. In the `[auth.saml]` section in the Grafana configuration file, set [`enabled`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#enabled-3) to `true`.
1. Optionally, configure the [certificate and private key](#certificate-and-private-key").
1. On the Okta application page where you have been redirected after application created, navigate to the **Sign On** tab and find **Identity Provider metadata** link in the **Settings** section.
1. Set the [`idp_metadata_url`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#idp_metadata_url) to the URL obtained from the previous step. The URL should look like `https://<your-org-id>.okta.com/app/<application-id>/sso/saml/metadata`.
1. Set the following options to the attribute names configured at the **step 10** of the SAML integration setup. You can find this attributes on the **General** tab of the application page (**ATTRIBUTE STATEMENTS** and **GROUP ATTRIBUTE STATEMENTS** in the **SAML Settings** section).
- [`assertion_attribute_login`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_login)
- [`assertion_attribute_email`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_email)
- [`assertion_attribute_name`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_name)
- [`assertion_attribute_groups`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_groups)
1. Optionally, set the `name` parameter in the `[auth.saml]` section in the Grafana configuration file. This parameter replaces SAML in the Grafana user interface in locations such as the sign-in button.
1. Save the configuration file and then restart the Grafana server.
When you're finished, the Grafana configuration might look like this example:
```ini
[server]
root_url = https://grafana.example.com
[auth.saml]
enabled = true
name = My IdP
auto_login = false
private_key_path = "/path/to/private_key.pem"
certificate_path = "/path/to/certificate.cert"
idp_metadata_url = "https://my-org.okta.com/app/my-application/sso/saml/metadata"
assertion_attribute_name = DisplayName
assertion_attribute_login = Login
assertion_attribute_email = Email
assertion_attribute_groups = Group
```
## Enable SAML authentication in Grafana
To use the SAML integration, in the `auth.saml` section of in the `grafana.ini` or `custom.ini` file, set `enabled` to `true`.
Refer to [Configuration](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/) for more information about configuring Grafana.
## Additional configuration for HTTP-Post binding
If multiple bindings are supported for SAML Single Sign-On (SSO) by the Identity Provider (IdP), Grafana will use the `HTTP-Redirect` binding by default. If the IdP only supports the `HTTP-Post binding` then updating the `content_security_policy_template` (in case `content_security_policy = true`) and `content_security_policy_report_only_template` (in case `content_security_policy_report_only = true`) might be required to allow Grafana to initiate a POST request to the IdP. These settings are used to define the [Content Security Policy (CSP)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy) headers that are sent by Grafana.
To allow Grafana to initiate a POST request to the IdP, update the `content_security_policy_template` and `content_security_policy_report_only_template` settings in the Grafana configuration file and add the IdP's domain to the `form-action` directive. By default, the `form-action` directive is set to `self` which only allows POST requests to the same domain as Grafana. To allow POST requests to the IdP's domain, update the `form-action` directive to include the IdP's domain, for example: `form-action 'self' https://idp.example.com`.
{{< admonition type="note" >}}
For Grafana Cloud instances, please contact Grafana Support to update the `content_security_policy_template` and `content_security_policy_report_only_template` settings of your Grafana instance. Please provide the metadata URL/file of your IdP.
{{< /admonition >}}
## Certificate and private key
Commonly, the certificate and key are embedded in the [IDP metadata](#configure-the-saml-toolkit-application-endpoints) and refreshed as needed by Grafana automatically. However, if your IdP expects signed requests, you must supply a certificate and private key.
The SAML SSO standard uses asymmetric encryption to exchange information between the SP (Grafana) and the IdP. To perform such encryption, you need a public part and a private part. In this case, the X.509 certificate provides the public part, while the private key provides the private part. The private key needs to be issued in a [PKCS#8](https://en.wikipedia.org/wiki/PKCS_8) format.
If you are directly supplying the certificate and key, Grafana supports two ways of specifying both the `certificate` and `private_key`:
- Without a suffix (`certificate` or `private_key`), the configuration assumes you've supplied the base64-encoded file contents.
- With the `_path` suffix (`certificate_path` or `private_key_path`), then Grafana treats the value entered as a file path and attempts to read the file from the file system.
{{< admonition type="note" >}}
You can only use one form of each configuration option. Using multiple forms, such as both `certificate` and `certificate_path`, results in an error.
{{< /admonition >}}
Always work with your company's security team on setting up certificates and private keys. If you need to generate them yourself (such as in the short term, for testing purposes, and so on), use the following example to generate your certificate and private key, including the step of ensuring that the key is generated with the [PKCS#8](https://en.wikipedia.org/wiki/PKCS_8) format.
### Example of private key generation for SAML authentication
An example of how to generate a self-signed certificate and private key that's valid for one year:
```sh
$ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
Base64-encode the cert.pem and key.pem files:
(-w0 switch is not needed on Mac, only for Linux)
```sh
$ base64 -i key.pem -o key.pem.base64
$ base64 -i cert.pem -o cert.pem.base64
```
The base64-encoded values (`key.pem.base64, cert.pem.base64` files) are then used for certificate and private key.
The key you provide should look like:
```
-----BEGIN PRIVATE KEY-----
...
...
-----END PRIVATE KEY-----
```
## Set up SAML with Azure AD
Grafana supports user authentication through Azure AD, which is useful when you want users to access Grafana using single sign-on. This topic shows you how to configure SAML authentication in Grafana with [Azure AD](https://azure.microsoft.com/en-us/services/active-directory/).
**Before you begin**
Ensure you have permission to administer SAML authentication. For more information about roles and permissions in Grafana, refer to [Roles and permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
Learn the [limitations of Azure AD SAML] (https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim) integration.
Configure SAML integration with Azure AD, [creating an Enterprise Application](#add-microsoft-entra-saml-toolkit-from-the-gallery) inside the Azure AD organization first and then [enable single sign-on](#configure-the-saml-toolkit-application-endpoints).
If you have users that belong to more than 150 groups, configure a registered application to provide an Azure Graph API to retrieve the groups. Refer to [Setup Azure AD Graph API applications](#configure-a-graph-api-application-in-azure-ad).
### Generate self-signed certificates
Azure AD requires a certificate to sign the SAML requests. You can generate a self-signed certificate using the following command:
```sh
$ openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
This will generate a `key.pem` and `cert.pem` file that you can use for the `private_key_path` and `certificate_path` configuration options.
### Add Microsoft Entra SAML Toolkit from the gallery
> Taken from https://learn.microsoft.com/en-us/entra/identity/saas-apps/saml-toolkit-tutorial#add-microsoft-entra-saml-toolkit-from-the-gallery
1. Go to the [Azure portal](https://portal.azure.com/#home) and sign in with your Azure AD account.
1. Search for **Enterprise Applications**.
1. In the **Enterprise applications** pane, select **New application**.
1. In the search box, enter **SAML Toolkit**, and then select the **Microsoft Entra SAML Toolkit** from the results panel.
1. Add a descriptive name and select **Create**.
### Configure the SAML Toolkit application endpoints
In order to validate Azure AD users with Grafana, you need to configure the SAML Toolkit application endpoints by creating a new SAML integration in the Azure AD organization.
> For the following configuration, we will use `https://localhost` as the Grafana URL. Replace it with your Grafana URL.
1. In the **SAML Toolkit application**, select **Set up single sign-on**.
1. In the **Single sign-on** pane, select **SAML**.
1. In the Set up **Single Sign-On with SAML** pane, select the pencil icon for **Basic SAML Configuration** to edit the settings.
1. In the **Basic SAML Configuration** pane, click on the **Edit** button and update the following fields:
- In the **Identifier (Entity ID)** field, enter `https://localhost/saml/metadata`.
- In the **Reply URL (Assertion Consumer Service URL)** field, enter `https://localhost/saml/acs`.
- In the **Sign on URL** field, enter `https://localhost`.
- In the **Relay State** field, enter `https://localhost`.
- In the **Logout URL** field, enter `https://localhost/saml/slo`.
1. Select **Save**.
1. At the **SAML Certificate** section, copy the **App Federation Metadata Url**.
- Use this URL in the `idp_metadata_url` field in the `custom.ini` file.
### Configure a Graph API application in Azure AD
While an Azure AD tenant can be configured in Grafana via SAML, some additional information is only accessible via the Graph API. To retrieve this information, create a new application in Azure AD and grant it the necessary permissions.
> [Azure AD SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim)
> For the following configuration, the URL `https://localhost` will be used as the Grafana URL. Replace it with your Grafana instance URL.
#### Create a new Application registration
This app registration will be used as a Service Account to retrieve more information about the user from the Azure AD.
1. Go to the [Azure portal](https://portal.azure.com/#home) and sign in with your Azure AD account.
1. In the left-hand navigation pane, select the Azure Active Directory service, and then select **App registrations**.
1. Click the **New registration** button.
1. In the **Register an application** pane, enter a name for the application.
1. In the **Supported account types** section, select the account types that can use the application.
1. In the **Redirect URI** section, select Web and enter `https://localhost/login/azuread`.
1. Click the **Register** button.
#### Set up permissions for the application
1. In the overview pane, look for **API permissions** section and select **Add a permission**.
1. In the **Request API permissions** pane, select **Microsoft Graph**, and click **Application permissions**.
1. In the **Select permissions** pane, under the **GroupMember** section, select **GroupMember.Read.All**.
1. In the **Select permissions** pane, under the **User** section, select **User.Read.All**.
1. Click the **Add permissions** button at the bottom of the page.
1. In the **Request API permissions** pane, select **Microsoft Graph**, and click **Delegated permissions**.
1. In the **Select permissions** pane, under the **User** section, select **User.Read**.
1. Click the **Add permissions** button at the bottom of the page.
1. In the **API permissions** section, select **Grant admin consent for <your-organization>**.
The following table shows what the permissions look like from the Azure AD portal:
| Permissions name | Type | Admin consent required | Status |
| ---------------- | ----------- | ---------------------- | ------- |
| `Group.Read.All` | Application | Yes | Granted |
| `User.Read` | Delegated | No | Granted |
| `User.Read.All` | Application | Yes | Granted |
{{< figure src="/media/docs/grafana/saml/graph-api-app-permissions.png" caption="Screen shot of the permissions listed in Azure AD for the App registration" >}}
#### Generate a client secret
1. In the **Overview** pane, select **Certificates & secrets**.
1. Select **New client secret**.
1. In the **Add a client secret** pane, enter a description for the secret.
1. Set the expiration date for the secret.
1. Select **Add**.
1. Copy the value of the secret. This value is used in the `client_secret` field in the `custom.ini` file.
## Set up SAML with Okta
Grafana supports user authentication through Okta, which is useful when you want your users to access Grafana using single sign on. This guide will follow you through the steps of configuring SAML authentication in Grafana with [Okta](https://okta.com/). You need to be an admin in your Okta organization to access Admin Console and create SAML integration. You also need permissions to edit Grafana config file and restart Grafana server.
**Before you begin:**
- To configure SAML integration with Okta, create an app integration inside the Okta organization first. [Add app integration in Okta](https://help.okta.com/en/prod/Content/Topics/Apps/apps-overview-add-apps.htm)
- Ensure you have permission to administer SAML authentication. For more information about roles and permissions in Grafana, refer to [Roles and permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
**To set up SAML with Okta:**
1. Log in to the [Okta portal](https://login.okta.com/).
1. Go to the Admin Console in your Okta organization by clicking **Admin** in the upper-right corner. If you are in the Developer Console, then click **Developer Console** in the upper-left corner and then click **Classic UI** to switch over to the Admin Console.
1. In the Admin Console, navigate to **Applications** > **Applications**.
1. Click **Create App Integration** to start the Application Integration Wizard.
1. Choose **SAML 2.0** as the **Sign-in method**.
1. Click **Create**.
1. On the **General Settings** tab, enter a name for your Grafana integration. You can also upload a logo.
1. On the **Configure SAML** tab, enter the SAML information related to your Grafana instance:
- In the **Single sign on URL** field, use the `/saml/acs` endpoint URL of your Grafana instance, for example, `https://grafana.example.com/saml/acs`.
- In the **Audience URI (SP Entity ID)** field, use the `/saml/metadata` endpoint URL, by default it is the `/saml/metadata` endpoint of your Grafana instance (for example `https://example.grafana.com/saml/metadata`). This could be configured differently, but the value here must match the `entity_id` setting of the SAML settings of Grafana.
- Leave the default values for **Name ID format** and **Application username**.
{{% admonition type="note" %}}
If you plan to enable SAML Single Logout, consider setting the **Name ID format** to `EmailAddress` or `Persistent`. This must match the `name_id_format` setting of the Grafana instance.
{{% /admonition %}}
- In the **ATTRIBUTE STATEMENTS (OPTIONAL)** section, enter the SAML attributes to be shared with Grafana. The attribute names in Okta need to match exactly what is defined within Grafana, for example:
| Attribute name (in Grafana) | Name and value (in Okta profile) | Grafana configuration (under `auth.saml`) |
| --------------------------- | ---------------------------------------------------- | ----------------------------------------- |
| Login | Login - `user.login` | `assertion_attribute_login = Login` |
| Email | Email - `user.email` | `assertion_attribute_email = Email` |
| DisplayName | DisplayName - `user.firstName + " " + user.lastName` | `assertion_attribute_name = DisplayName` |
- In the **GROUP ATTRIBUTE STATEMENTS (OPTIONAL)** section, enter a group attribute name (for example, `Group`, ensure it matches the `asssertion_attribute_groups` setting in Grafana) and set filter to `Matches regex .*` to return all user groups.
1. Click **Next**.
1. On the final Feedback tab, fill out the form and then click **Finish**.
### Signature algorithm
The SAML standard recommends using a digital signature for some types of messages, like authentication or logout requests. If the `signature_algorithm` option is configured, Grafana will put a digital signature into SAML requests. Supported signature types are `rsa-sha1`, `rsa-sha256`, `rsa-sha512`. This option should match your IdP configuration, otherwise, signature validation will fail. Grafana uses key and certificate configured with `private_key` and `certificate` options for signing SAML requests.
### Specify user's Name ID
The `name_id_format` configuration field specifies the format of the NameID element in the SAML assertion.
By default, this is set to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` and does not need to be specified in the configuration file.
The following list includes valid configuration field values:
| `name_id_format` value in the configuration file or Terraform | `Name identifier format` on the UI |
| ------------------------------------------------------------- | ---------------------------------- |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` | Default |
| `urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified` | Unspecified |
| `urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress` | Email address |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent` | Persistent |
| `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` | Transient |
### IdP metadata
You also need to define the public part of the IdP for message verification. The SAML IdP metadata XML defines where and how Grafana exchanges user information.
Grafana supports three ways of specifying the IdP metadata.
- Without a suffix `idp_metadata`, Grafana assumes base64-encoded XML file contents.
- With the `_path` suffix, Grafana assumes a file path and attempts to read the file from the file system.
- With the `_url` suffix, Grafana assumes a URL and attempts to load the metadata from the given location.
### Maximum issue delay
Prevents SAML response replay attacks and internal clock skews between the SP (Grafana) and the IdP. You can set a maximum amount of time between the IdP issuing a response and the SP (Grafana) processing it.
The configuration options is specified as a duration, such as `max_issue_delay = 90s` or `max_issue_delay = 1h`.
### Metadata valid duration
SP metadata is likely to expire at some point, perhaps due to a certificate rotation or change of location binding. Grafana allows you to specify for how long the metadata should be valid. Leveraging the `validUntil` field, you can tell consumers until when your metadata is going to be valid. The duration is computed by adding the duration to the current time.
The configuration option is specified as a duration, such as `metadata_valid_duration = 48h`.
### Identity provider (IdP) registration
For the SAML integration to work correctly, you need to make the IdP aware of the SP.
The integration provides two key endpoints as part of Grafana:
- The `/saml/metadata` endpoint, which contains the SP metadata. You can either download and upload it manually, or you make the IdP request it directly from the endpoint. Some providers name it Identifier or Entity ID.
- The `/saml/acs` endpoint, which is intended to receive the ACS (Assertion Customer Service) callback. Some providers name it SSO URL or Reply URL.
### IdP-initiated Single Sign-On (SSO)
By default, Grafana allows only service provider (SP) initiated logins (when the user logs in with SAML via Grafana’s login page). If you want users to log in into Grafana directly from your identity provider (IdP), set the `allow_idp_initiated` configuration option to `true` and configure `relay_state` with the same value specified in the IdP configuration.
IdP-initiated SSO has some security risks, so make sure you understand the risks before enabling this feature. When using IdP-initiated SSO, Grafana receives unsolicited SAML requests and can't verify that login flow was started by the user. This makes it hard to detect whether SAML message has been stolen or replaced. Because of this, IdP-initiated SSO is vulnerable to login cross-site request forgery (CSRF) and man in the middle (MITM) attacks. We do not recommend using IdP-initiated SSO and keeping it disabled whenever possible.
### Single logout
SAML's single logout feature allows users to log out from all applications associated with the current IdP session established via SAML SSO. If the `single_logout` option is set to `true` and a user logs out, Grafana requests IdP to end the user session which in turn triggers logout from all other applications the user is logged into using the same IdP session (applications should support single logout). Conversely, if another application connected to the same IdP logs out using single logout, Grafana receives a logout request from IdP and ends the user session.
`HTTP-Redirect` and `HTTP-POST` bindings are supported for single logout.
When using `HTTP-Redirect` bindings the query should include a request signature.
#### Configure single logout
To configure single logout in Grafana:
1. Enable the `single_logout` option in your configuration.
2. Ensure the `name_id_format` matches the format your IdP expects (e.g., `urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress`).
3. Enable the `improvedExternalSessionHandlingSAML` feature toggle for complete NameID and SessionIndex support (Grafana v11.5+).
4. After enabling the feature, users may need to log in again to establish a new session.
#### `NameID` and `SessionIndex` changes in Grafana v11.5
Before Grafana version 11.5, the `Login` attribute value (extracted from the SAML assertion using the `assertion_attribute_login` configuration) was used as the `NameID` in the logout request. This could cause issues with single logout if the `assertion_attribute_login` value differed from what the Identity Provider (IdP) expected.
Additionally, Grafana did not support IdP sessions and could not include the `SessionIndex` (a unique identifier for the user session on the IdP side) value in the logout request. This could result in issues such as the user being logged out from all of their applications/IdP sessions when logging out from Grafana.
Starting from Grafana version 11.5, Grafana uses the `NameID` from the SAML assertion to create the logout request. If the `NameID` is not present in the assertion, Grafana defaults to using the user's `Login` attribute. Additionally, Grafana supports including the `SessionIndex` in the logout request if it is provided in the SAML assertion by the IdP.
{{% admonition type="note" %}}
These improvements are available in public preview behind the `improvedExternalSessionHandlingSAML` feature toggle, starting from Grafana v11.5. To enable it, refer to the [Configure feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/)
{{% /admonition %}}
### Assertion mapping
During the SAML SSO authentication flow, Grafana receives the ACS callback. The callback contains all the relevant information of the user under authentication embedded in the SAML response. Grafana parses the response to create (or update) the user within its internal database.
For Grafana to map the user information, it looks at the individual attributes within the assertion. You can think of these attributes as Key/Value pairs (although, they contain more information than that).
Grafana provides configuration options that let you modify which keys to look at for these values. The data we need to create the user in Grafana is Name, Login handle, and email.
#### The `assertion_attribute_name` option
`assertion_attribute_name` is a special assertion mapping that can either be a simple key, indicating a mapping to a single assertion attribute on the SAML response, or a complex template with variables using the `$__saml{<attribute>}` syntax. If this property is misconfigured, Grafana will log an error message on startup and disallow SAML sign-ins. Grafana will also log errors after a login attempt if a variable in the template is missing from the SAML response.
**Examples**
```ini
#plain string mapping
assertion_attribute_name = displayName
```
```ini
#template mapping
assertion_attribute_name = $__saml{firstName} $__saml{lastName}
```
### Allow new user signups
By default, new Grafana users using SAML authentication will have an account created for them automatically. To decouple authentication and account creation and ensure only users with existing accounts can log in with SAML, set the `allow_sign_up` option to false.
### Configure automatic login
Set the `auto_login` option to true to attempt login automatically, skipping the login screen.
This setting is ignored if multiple auth providers are configured to use auto login.
```ini
auto_login = true
```
### Configure team sync
To use SAML Team sync, set [`assertion_attribute_groups`](../../../configure-grafana/enterprise-configuration#assertion_attribute_groups) to the attribute name where you store user groups. Then Grafana will use attribute values extracted from SAML assertion to add user into the groups with the same name configured on the External group sync tab.
{{% admonition type="warning" %}}
Grafana requires the SAML groups attribute to be configured with distinct `AttributeValue` elements for each group. Do not include multiple groups within a single `AttributeValue` delimited by a comma or any other character. Failure to do so will prevent correct group parsing. Example:
```xml
<saml2:Attribute ...>
<saml2:AttributeValue ...>admins_group</saml2:AttributeValue>
<saml2:AttributeValue ...>division_1</saml2:AttributeValue>
</saml2:Attribute>
```
{{% /admonition %}}
{{% admonition type="note" %}}
Teamsync allows you sync users from SAML to Grafana teams. It does not automatically create teams in Grafana. You need to create teams in Grafana before you can use this feature.
{{% /admonition %}}
Given the following partial SAML assertion:
```xml
<saml2:Attribute
Name="groups"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="xs:string">admins_group
</saml2:AttributeValue>
<saml2:AttributeValue
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="xs:string">division_1
</saml2:AttributeValue>
</saml2:Attribute>
```
The configuration would look like this:
```ini
[auth.saml]
# ...
assertion_attribute_groups = groups
```
The following `External Group ID`s would be valid for input in the desired team's _External group sync_ tab:
- `admins_group`
- `division_1`
[Learn more about Team Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-team-sync/)
### Configure role sync
Role sync allows you to map user roles from an identity provider to Grafana. To enable role sync, configure role attribute and possible values for the Editor, Admin, and Grafana Admin roles. For more information about user roles, refer to [Roles and permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
1. In the configuration file, set [`assertion_attribute_role`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_role) option to the attribute name where the role information will be extracted from.
1. Set the [`role_values_none`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_none) option to the values mapped to the `None` role.
1. Set the [`role_values_viewer`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_viewer) option to the values mapped to the `Viewer` role.
1. Set the [`role_values_editor`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_editor) option to the values mapped to the `Editor` role.
1. Set the [`role_values_admin`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_admin) option to the values mapped to the organization `Admin` role.
1. Set the [`role_values_grafana_admin`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#role_values_grafana_admin) option to the values mapped to the `Grafana Admin` role.
If a user role doesn't match any of configured values, then the role specified by the `auto_assign_org_role` configuration option will be assigned. If the `auto_assign_org_role` field isn't set then the user role will default to `Viewer`.
For more information about roles and permissions in Grafana, refer to [Roles and permissions](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/).
Example configuration:
```ini
[auth.saml]
assertion_attribute_role = role
role_values_none = none
role_values_viewer = external
role_values_editor = editor, developer
role_values_admin = admin, operator
role_values_grafana_admin = superadmin
```
**Important**: When role sync is configured, any changes of user roles and organization membership made manually in Grafana will be overwritten on next user login. Assign user organizations and roles in the IdP instead.
If you don't want user organizations and roles to be synchronized with the IdP, you can use the `skip_org_role_sync` configuration option.
Example configuration:
```ini
[auth.saml]
skip_org_role_sync = true
```
### Configure organization mapping
Organization mapping allows you to assign users to particular organization in Grafana depending on attribute value obtained from identity provider.
1. In configuration file, set [`assertion_attribute_org`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#assertion_attribute_org) to the attribute name you store organization info in. This attribute can be an array if you want a user to be in multiple organizations.
1. Set [`org_mapping`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#org_mapping) option to the comma-separated list of `Organization:OrgId` pairs to map organization from IdP to Grafana organization specified by ID. If you want users to have different roles in multiple organizations, you can set this option to a comma-separated list of `Organization:OrgId:Role` mappings.
For example, the following configuration assigns users from the `Engineering` organization to the Grafana organization with ID `2` as Editor and users from `Sales` to the org with ID `3` as Admin, based on the `Org` assertion attribute value:
```ini
[auth.saml]
assertion_attribute_org = Org
org_mapping = Engineering:2:Editor, Sales:3:Admin
```
Starting from Grafana version 11.5, you can use the organization name instead of the organization ID in the `org_mapping` option. Ensure that the organization name you configure matches exactly with the organization name in Grafana, as it is case-sensitive. If the organization name is not found in Grafana, the mapping will be ignored. If the external organization or the organization name contains spaces, use the JSON syntax for the `org_mapping` option:
```ini
org_mapping = ["Org 1:2:Editor", "ExternalOrg:ACME Corp.:Admin"]
```
If one of the mappings contains a `:`, use the JSON syntax and escape the `:` with a backslash:
```ini
# Assign users from "External:Admin" to the organization with name "ACME Corp" as Admin
org_mapping = ["External\:Admin:ACME Corp:Admin"]
```
For example, to assign users from `Engineering` organization to the Grafana organization with name `ACME Corp` as Editor and users from `Sales` to the org with id `3` as Admin, based on `Org` assertion attribute value:
```ini
[auth.saml]
assertion_attribute_org = Org
org_mapping = ["Engineering:ACME Corp:Editor", "Sales:3:Admin"]
```
You can specify multiple organizations both for the IdP and Grafana:
- `org_mapping = Engineering:2, Sales:2` to map users from `Engineering` and `Sales` to `2` in Grafana.
- `org_mapping = Engineering:2, Engineering:3` to assign `Engineering` to both `2` and `3` in Grafana.
You can use `*` as the SAML Organization if you want all your users to be in some Grafana organizations with a default role:
- `org_mapping = *:2:Editor` to map all users to `2` in Grafana as Editors.
You can use `*` as the Grafana organization in the mapping if you want all users from a given SAML Organization to be added to all existing Grafana organizations.
- `org_mapping = Engineering:*` to map users from `Engineering` to all existing Grafana organizations.
- `org_mapping = Administration:*:Admin` to map users from `Administration` to all existing Grafana organizations as Admins.
### Configure allowed organizations
With the [`allowed_organizations`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/enterprise-configuration/#allowed_organizations) option you can specify a list of organizations where the user must be a member of at least one of them to be able to log in to Grafana.
To put values containing spaces in the list, use the following JSON syntax:
```ini
allowed_organizations = ["org 1", "second org"]
```
### Example SAML configuration
```ini
[auth.saml]
enabled = true
auto_login = false
certificate_path = "/path/to/certificate.cert"
private_key_path = "/path/to/private_key.pem"
idp_metadata_path = "/my/metadata.xml"
max_issue_delay = 90s
metadata_valid_duration = 48h
assertion_attribute_name = displayName
assertion_attribute_login = mail
assertion_attribute_email = mail
assertion_attribute_groups = Group
assertion_attribute_role = Role
assertion_attribute_org = Org
role_values_viewer = external
role_values_editor = editor, developer
role_values_admin = admin, operator
role_values_grafana_admin = superadmin
org_mapping = Engineering:2:Editor, Engineering:3:Viewer, Sales:3:Editor, *:1:Editor
allowed_organizations = Engineering, Sales
```
### Example SAML configuration in Terraform
{{< admonition type="note" >}}
Available in Public Preview in Grafana v11.1 behind the `ssoSettingsSAML` feature toggle. Supported in the Terraform provider since v2.17.0.
{{< /admonition >}}
```terraform
resource "grafana_sso_settings" "saml_sso_settings" {
provider_name = "saml"
saml_settings {
name = "SAML"
auto_login = false
certificate_path = "/path/to/certificate.cert"
private_key_path = "/path/to/private_key.pem"
idp_metadata_path = "/my/metadata.xml"
max_issue_delay = "90s"
metadata_valid_duration = "48h"
assertion_attribute_name = "displayName"
assertion_attribute_login = "mail"
assertion_attribute_email = "mail"
assertion_attribute_groups = "Group"
assertion_attribute_role = "Role"
assertion_attribute_org = "Org"
role_values_editor = "editor, developer"
role_values_admin = "admin, operator"
role_values_grafana_admin = "superadmin"
org_mapping = "Engineering:2:Editor, Engineering:3:Viewer, Sales:3:Editor, *:1:Editor"
allowed_organizations = "Engineering, Sales"
}
}
```
Go to [Terraform Registry](https://registry.terraform.io/providers/grafana/grafana/<GRAFANA_VERSION>/docs/resources/sso_settings) for a complete reference on using the `grafana_sso_settings` resource.
## Troubleshoot SAML authentication in Grafana
To troubleshoot and get more log information, enable SAML debug logging in the configuration file. Refer to [Configuration](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#filters) for more information.
```ini
[log]
filters = saml.auth:debug
```
## Troubleshooting
Following are common issues found in configuring SAML authentication in Grafana and how to resolve them.
### Infinite redirect loop / User gets redirected to the login page after successful login on the IdP side
If you experience an infinite redirect loop when `auto_login = true` or redirected to the login page after successful login, it is likely that the `grafana_session` cookie's SameSite setting is set to `Strict`. This setting prevents the `grafana_session` cookie from being sent to Grafana during cross-site requests. To resolve this issue, set the `security.cookie_samesite` option to `Lax` in the Grafana configuration file.
### SAML authentication fails with error:
- `asn1: structure error: tags don't match`
We only support one private key format: PKCS#8.
The keys may be in a different format (PKCS#1 or PKCS#12); in that case, it may be necessary to convert the private key format.
The following command creates a pkcs8 key file.
```bash
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
#### **Convert** the private key format to base64
The following command converts keys to base64 format.
Base64-encode the `cert.pem` and `key.pem` files:
(The `-w0` switch is not needed on Mac, only for Linux)
```sh
$ base64 -w0 key.pem > key.pem.base64
$ base64 -w0 cert.pem > cert.pem.base64
```
The base64-encoded values (`key.pem.base64, cert.pem.base64` files) are then used for certificate and private_key.
The keys you provide should look like:
```
-----BEGIN PRIVATE KEY-----
...
...
-----END PRIVATE KEY-----
```
### SAML login attempts fail with request response "origin not allowed"
When the user logs in using SAML and gets presented with "origin not allowed", the user might be issuing the login from an IdP (identity provider) service or the user is behind a reverse proxy. This potentially happens as Grafana's CSRF checks deem the requests to be invalid. For more information [CSRF](https://owasp.org/www-community/attacks/csrf).
To solve this issue, you can configure either the [`csrf_trusted_origins`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#csrf_trusted_origins) or [`csrf_additional_headers`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#csrf_additional_headers) option in the SAML configuration.
Example of a configuration file:
```ini
# config.ini
...
[security]
csrf_trusted_origins = https://grafana.example.com
csrf_additional_headers = X-Forwarded-Host
...
```
### SAML login attempts fail with request response "login session has expired"
Accessing the Grafana login page from a URL that is not the root URL of the
Grafana server can cause the instance to return the following error: "login session has expired".
If you are accessing grafana through a proxy server, ensure that cookies are correctly
rewritten to the root URL of Grafana.
Cookies must be set on the same url as the `root_url` of Grafana. This is normally the reverse proxy's domain/address.
Review the cookie settings in your proxy server configuration to ensure that cookies are
not being discarded
Review the following settings in your grafana config:
```ini
[security]
cookie_samesite = none
```
This setting should be set to none to allow grafana session cookies to work correctly with redirects.
```ini
[security]
cookie_secure = true
```
Ensure cookie_secure is set to true to ensure that cookies are only sent over HTTPS.
## Configure SAML authentication in Grafana
The table below describes all SAML configuration options. Continue reading below for details on specific options. Like any other Grafana configuration, you can apply these options as [environment variables](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#override-configuration-with-environment-variables).
| Setting | Required | Description | Default |
| ---------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `enabled` | No | Whether SAML authentication is allowed. | `false` |
| `name` | No | Name used to refer to the SAML authentication in the Grafana user interface. | `SAML` |
| `entity_id` | No | The entity ID of the service provider. This is the unique identifier of the service provider. | `https://{Grafana URL}/saml/metadata` |
| `single_logout` | No | Whether SAML Single Logout is enabled. | `false` |
| `allow_sign_up` | No | Whether to allow new Grafana user creation through SAML login. If set to `false`, then only existing Grafana users can log in with SAML. | `true` |
| `auto_login` | No | Whether SAML auto login is enabled. | `false` |
| `allow_idp_initiated` | No | Whether SAML IdP-initiated login is allowed. | `false` |
| `certificate` or `certificate_path` | Yes | Base64-encoded string or Path for the SP X.509 certificate. | |
| `private_key` or `private_key_path` | Yes | Base64-encoded string or Path for the SP private key. | |
| `signature_algorithm` | No | Signature algorithm used for signing requests to the IdP. Supported values are rsa-sha1, rsa-sha256, rsa-sha512. | |
| `idp_metadata`, `idp_metadata_path`, or `idp_metadata_url` | Yes | Base64-encoded string, Path or URL for the IdP SAML metadata XML. | |
| `max_issue_delay` | No | Maximum time allowed between the issuance of an AuthnRequest by the SP and the processing of the Response. | `90s` |
| `metadata_valid_duration` | No | Duration for which the SP metadata remains valid. | `48h` |
| `relay_state` | No | Relay state for IdP-initiated login. This should match the relay state configured in the IdP. | |
| `assertion_attribute_name` | No | Friendly name or name of the attribute within the SAML assertion to use as the user name. Alternatively, this can be a template with variables that match the names of attributes within the SAML assertion. | `displayName` |
| `assertion_attribute_login` | No | Friendly name or name of the attribute within the SAML assertion to use as the user login handle. | `mail` |
| `assertion_attribute_email` | No | Friendly name or name of the attribute within the SAML assertion to use as the user email. | `mail` |
| `assertion_attribute_groups` | No | Friendly name or name of the attribute within the SAML assertion to use as the user groups. | |
| `assertion_attribute_role` | No | Friendly name or name of the attribute within the SAML assertion to use as the user roles. | |
| `assertion_attribute_org` | No | Friendly name or name of the attribute within the SAML assertion to use as the user organization | |
| `allowed_organizations` | No | List of comma- or space-separated organizations. User should be a member of at least one organization to log in. | |
| `org_mapping` | No | List of comma- or space-separated Organization:OrgId:Role mappings. Organization can be `*` meaning "All users". Role is optional and can have the following values: `None`, `Viewer`, `Editor` or `Admin`. | |
| `role_values_none` | No | List of comma- or space-separated roles which will be mapped into the None role. | |
| `role_values_viewer` | No | List of comma- or space-separated roles which will be mapped into the Viewer role. | |
| `role_values_editor` | No | List of comma- or space-separated roles which will be mapped into the Editor role. | |
| `role_values_admin` | No | List of comma- or space-separated roles which will be mapped into the Admin role. | |
| `role_values_grafana_admin` | No | List of comma- or space-separated roles which will be mapped into the Grafana Admin (Super Admin) role. | |
| `skip_org_role_sync` | No | Whether to skip organization role synchronization. | `false` |
| `name_id_format` | No | Specifies the format of the requested NameID element in the SAML AuthnRequest. | `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` |
| `client_id` | No | Client ID of the IdP service application used to retrieve more information about the user from the IdP. (Microsoft Entra ID only) | |
| `client_secret` | No | Client secret of the IdP service application used to retrieve more information about the user from the IdP. (Microsoft Entra ID only) | |
| `token_url` | No | URL to retrieve the access token from the IdP. (Microsoft Entra ID only) | |
| `force_use_graph_api` | No | Whether to use the IdP service application retrieve more information about the user from the IdP. (Microsoft Entra ID only) | `false` |

@ -0,0 +1,113 @@
---
aliases:
- ../../../../saml/
labels:
products:
- cloud
- enterprise
menuTitle: SAML configuration options
title: SAML configuration options
weight: 520
---
# SAML configuration options
This page provides a comprehensive guide to configuring SAML authentication in Grafana. You'll find detailed configuration examples, available settings, and their descriptions to help you set up and customize SAML authentication for your Grafana instance.
The table below describes all SAML configuration options. Continue reading below for details on specific options. Like any other Grafana configuration, you can apply these options as [environment variables](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#override-configuration-with-environment-variables).
| Setting | Required | Description | Default |
| ---------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
| `enabled` | No | Whether SAML authentication is allowed. | `false` |
| `name` | No | Name used to refer to the SAML authentication in the Grafana user interface. | `SAML` |
| `entity_id` | No | The entity ID of the service provider. This is the unique identifier of the service provider. | `https://{Grafana URL}/saml/metadata` |
| `single_logout` | No | Whether SAML Single Logout is enabled. | `false` |
| `allow_sign_up` | No | Whether to allow new Grafana user creation through SAML login. If set to `false`, then only existing Grafana users can log in with SAML. | `true` |
| `auto_login` | No | Whether SAML auto login is enabled. | `false` |
| `allow_idp_initiated` | No | Whether SAML IdP-initiated login is allowed. | `false` |
| `certificate` or `certificate_path` | Yes | Base64-encoded string or Path for the SP X.509 certificate. | |
| `private_key` or `private_key_path` | Yes | Base64-encoded string or Path for the SP private key. | |
| `signature_algorithm` | No | Signature algorithm used for signing requests to the IdP. Supported values are rsa-sha1, rsa-sha256, rsa-sha512. | |
| `idp_metadata`, `idp_metadata_path`, or `idp_metadata_url` | Yes | Base64-encoded string, Path or URL for the IdP SAML metadata XML. | |
| `max_issue_delay` | No | Maximum time allowed between the issuance of an AuthnRequest by the SP and the processing of the Response. | `90s` |
| `metadata_valid_duration` | No | Duration for which the SP metadata remains valid. | `48h` |
| `relay_state` | No | Relay state for IdP-initiated login. This should match the relay state configured in the IdP. | |
| `assertion_attribute_name` | No | Friendly name or name of the attribute within the SAML assertion to use as the user name. Alternatively, this can be a template with variables that match the names of attributes within the SAML assertion. | `displayName` |
| `assertion_attribute_login` | No | Friendly name or name of the attribute within the SAML assertion to use as the user login handle. | `mail` |
| `assertion_attribute_email` | No | Friendly name or name of the attribute within the SAML assertion to use as the user email. | `mail` |
| `assertion_attribute_groups` | No | Friendly name or name of the attribute within the SAML assertion to use as the user groups. | |
| `assertion_attribute_role` | No | Friendly name or name of the attribute within the SAML assertion to use as the user roles. | |
| `assertion_attribute_org` | No | Friendly name or name of the attribute within the SAML assertion to use as the user organization | |
| `allowed_organizations` | No | List of comma- or space-separated organizations. User should be a member of at least one organization to log in. | |
| `org_mapping` | No | List of comma- or space-separated Organization:OrgId:Role mappings. Organization can be `*` meaning "All users". Role is optional and can have the following values: `None`, `Viewer`, `Editor` or `Admin`. | |
| `role_values_none` | No | List of comma- or space-separated roles which will be mapped into the None role. | |
| `role_values_viewer` | No | List of comma- or space-separated roles which will be mapped into the Viewer role. | |
| `role_values_editor` | No | List of comma- or space-separated roles which will be mapped into the Editor role. | |
| `role_values_admin` | No | List of comma- or space-separated roles which will be mapped into the Admin role. | |
| `role_values_grafana_admin` | No | List of comma- or space-separated roles which will be mapped into the Grafana Admin (Super Admin) role. | |
| `skip_org_role_sync` | No | Whether to skip organization role synchronization. | `false` |
| `name_id_format` | No | Specifies the format of the requested NameID element in the SAML AuthnRequest. | `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` |
| `client_id` | No | Client ID of the IdP service application used to retrieve more information about the user from the IdP. (Microsoft Entra ID only) | |
| `client_secret` | No | Client secret of the IdP service application used to retrieve more information about the user from the IdP. (Microsoft Entra ID only) | |
| `token_url` | No | URL to retrieve the access token from the IdP. (Microsoft Entra ID only) | |
| `force_use_graph_api` | No | Whether to use the IdP service application retrieve more information about the user from the IdP. (Microsoft Entra ID only) | `false` |
## Example SAML configuration
```ini
[auth.saml]
enabled = true
auto_login = false
certificate_path = "/path/to/certificate.cert"
private_key_path = "/path/to/private_key.pem"
idp_metadata_path = "/my/metadata.xml"
max_issue_delay = 90s
metadata_valid_duration = 48h
assertion_attribute_name = displayName
assertion_attribute_login = mail
assertion_attribute_email = mail
assertion_attribute_groups = Group
assertion_attribute_role = Role
assertion_attribute_org = Org
role_values_viewer = external
role_values_editor = editor, developer
role_values_admin = admin, operator
role_values_grafana_admin = superadmin
org_mapping = Engineering:2:Editor, Engineering:3:Viewer, Sales:3:Editor, *:1:Editor
allowed_organizations = Engineering, Sales
```
## Example SAML configuration in Terraform
{{< admonition type="note" >}}
Available in Public Preview in Grafana v11.1 behind the `ssoSettingsSAML` feature toggle. Supported in the Terraform provider since v2.17.0.
{{< /admonition >}}
```terraform
resource "grafana_sso_settings" "saml_sso_settings" {
provider_name = "saml"
saml_settings {
name = "SAML"
auto_login = false
certificate_path = "/path/to/certificate.cert"
private_key_path = "/path/to/private_key.pem"
idp_metadata_path = "/my/metadata.xml"
max_issue_delay = "90s"
metadata_valid_duration = "48h"
assertion_attribute_name = "displayName"
assertion_attribute_login = "mail"
assertion_attribute_email = "mail"
assertion_attribute_groups = "Group"
assertion_attribute_role = "Role"
assertion_attribute_org = "Org"
role_values_editor = "editor, developer"
role_values_admin = "admin, operator"
role_values_grafana_admin = "superadmin"
org_mapping = "Engineering:2:Editor, Engineering:3:Viewer, Sales:3:Editor, *:1:Editor"
allowed_organizations = "Engineering, Sales"
}
}
```
Go to [Terraform Registry](https://registry.terraform.io/providers/grafana/grafana/<GRAFANA_VERSION>/docs/resources/sso_settings) for a complete reference on using the `grafana_sso_settings` resource.

@ -1,4 +1,6 @@
---
aliases:
- ../../../../saml/saml-ui/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
@ -6,16 +8,16 @@ labels:
- enterprise
menuTitle: SAML user interface
title: Configure SAML authentication using the Grafana user interface
weight: 600
weight: 510
---
# Configure SAML authentication using the Grafana user interface
{{% admonition type="note" %}}
Available in [Grafana Enterprise](../../../../introduction/grafana-enterprise/) version 10.0 and later, and [Grafana Cloud Pro and Advanced](/docs/grafana-cloud/).
{{% /admonition %}}
{{< admonition type="note" >}}
Available in [Grafana Enterprise](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-enterprise/) version 10.0 and later, and [Grafana Cloud Pro or Advanced](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-cloud/).
{{< /admonition >}}
You can configure SAML authentication in Grafana through the user interface (UI) or the Grafana configuration file. For instructions on how to set up SAML using the Grafana configuration file, refer to [Configure SAML authentication using the configuration file](../saml/).
You can configure SAML authentication in Grafana through the user interface (UI) or the Grafana configuration file. For instructions on how to set up SAML using the Grafana configuration file, refer to [Configure SAML authentication using the configuration file](../#configure-saml-using-the-grafana-config-file).
The Grafana SAML UI provides the following advantages over configuring SAML in the Grafana configuration file:
@ -24,39 +26,21 @@ The Grafana SAML UI provides the following advantages over configuring SAML in t
- It doesn't require Grafana to be restarted after a configuration update
- Access to the SAML UI only requires access to authentication settings, so it can be used by users with limited access to Grafana's configuration
{{% admonition type="note" %}}
Any configuration changes made through the Grafana user interface (UI) will take precedence over settings specified in the Grafana configuration file or through environment variables. This means that if you modify any configuration settings in the UI, they will override any corresponding settings set via environment variables or defined in the configuration file. For more information on how Grafana determines the order of precedence for its settings, please refer to the [Settings update at runtime](../../../configure-grafana/settings-updates-at-runtime/).
{{% /admonition %}}
{{% admonition type="note" %}}
Disabling the UI does not affect any configuration settings that were previously set up through the UI. Those settings will continue to function as intended even with the UI disabled.
{{% /admonition %}}
{{< admonition type="note" >}}
Any configuration changes made through the Grafana user interface (UI) will take precedence over settings specified in the Grafana configuration file or through environment variables. This means that if you modify any configuration settings in the UI, they will override any corresponding settings set via environment variables or defined in the configuration file. For more information on how Grafana determines the order of precedence for its settings, please refer to the [SSO Settings API](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/developers/http_api/sso-settings/).
{{< /admonition >}}
## Before you begin
To follow this guide, you need:
- Knowledge of SAML authentication. Refer to [SAML authentication in Grafana](../saml/) for an overview of Grafana's SAML integration.
- Knowledge of SAML authentication. Refer to [SAML authentication in Grafana](../) for an overview of the SAML integration in Grafana.
- Permissions `settings:read` and `settings:write` with scope `settings:auth.saml:*` that allow you to read and update SAML authentication settings.
These permissions are granted by `fixed:authentication.config:writer` role.
By default, this role is granted to Grafana server administrator in self-hosted instances and to Organization admins in Grafana Cloud instances.
- Grafana instance running Grafana version 10.0 or later with [Grafana Enterprise](../../../../introduction/grafana-enterprise/) or [Grafana Cloud Pro or Advanced](/docs/grafana-cloud/) license.
{{% admonition type="note" %}}
It is possible to set up Grafana with SAML authentication using Azure AD. However, if an Azure AD user belongs to more than 150 groups, a Graph API endpoint is shared instead.
Grafana versions 11.1 and below do not support fetching the groups from the Graph API endpoint. As a result, users with more than 150 groups will not be able to retrieve their groups. Instead, it is recommended that you use OIDC/OAuth workflows.
As of Grafana 11.2, the SAML integration offers a mechanism to retrieve user groups from the Graph API.
Related links:
- [Azure AD SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim)
- [Set up SAML with Azure AD](../saml/#set-up-saml-with-azure-ad)
- [Configure a Graph API application in Azure AD](../saml/#configure-a-graph-api-application-in-azure-ad)
{{% /admonition %}}
- Grafana instance running Grafana version 10.0 or later with [Grafana Enterprise](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-enterprise/) or [Grafana Cloud Pro or Advanced](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-cloud/) license.
## Steps To Configure SAML Authentication
@ -69,11 +53,11 @@ Sign in to Grafana and navigate to **Administration > Authentication > Configure
For assistance, consult the following table for additional guidance about certain fields:
| Field | Description |
| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Allow signup** | If enabled, you can create new users through the SAML login. If disabled, then only existing Grafana users can log in with SAML. |
| **Auto login** | If enabled, Grafana will attempt to automatically log in with SAML skipping the login screen. |
| **Single logout** | The SAML single logout feature enables users to log out from all applications associated with the current IdP session established using SAML SSO. For more information, refer to [SAML single logout documentation](../saml/#single-logout). |
| **Identity provider initiated login** | Enables users to log in to Grafana directly from the SAML IdP. For more information, refer to [IdP initiated login documentation](../saml/#idp-initiated-single-sign-on-sso). |
| **Single logout** | The SAML single logout feature enables users to log out from all applications associated with the current IdP session established using SAML SSO. For more information, refer to [SAML single logout documentation](../configure-saml-single-logout). |
| **Identity provider initiated login** | Enables users to log in to Grafana directly from the SAML IdP. For more information, refer to [IdP initiated login documentation](../#idp-initiated-single-sign-on-sso). |
1. Click **Next: Sign requests**.
@ -85,7 +69,7 @@ Sign in to Grafana and navigate to **Administration > Authentication > Configure
Use the [PKCS #8](https://en.wikipedia.org/wiki/PKCS_8) format to issue the private key.
For more information, refer to an [example on how to generate SAML credentials](../saml/#generate-private-key-for-saml-authentication).
For more information, refer to an [example on how to generate SAML credentials](../configure-saml-request-signing/#example-of-private-key-generation-for-saml-authentication).
Alternatively, you can generate a new private key and certificate pair directly from the UI. Click on the `Generate key and certificate` button to open a form where you enter some information you want to be embedded into the new certificate.
@ -111,7 +95,7 @@ Sign in to Grafana and navigate to **Administration > Authentication > Configure
### 4. User Mapping Section
1. If you wish to [map user information from SAML assertions](../saml/#assertion-mapping), complete the **Assertion attributes mappings** section.
1. If you wish to [map user information from SAML assertions](../#assertion-mapping), complete the **Assertion attributes mappings** section.
If Azure is the Identity Provider over SAML there are caveats for the assertion attribute mappings. Due to how Azure interprets these attributes the full URL will need to be entered in the corresponding fields within the UI, which should match the URLs from the metadata XML. There are differences depending on whether it's a Role or Group claim vs other assertions which Microsoft has [documented](https://learn.microsoft.com/en-us/entra/identity-platform/reference-claims-customization#table-2-saml-restricted-claim-set).
@ -132,15 +116,15 @@ http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress
![image](https://github.com/user-attachments/assets/23910ab8-20ec-4dfd-8ef6-7dbaec51ac90)
You also need to configure the **Groups attribute** field if you want to use team sync. Team sync automatically maps users to Grafana teams based on their SAML group membership.
Learn more about [team sync](../../configure-team-sync) and [configuring team sync for SAML](../saml#configure-team-sync).
Learn more about [team sync](../../../configure-team-sync) and [configuring team sync for SAML](../configure-saml-team-role-mapping/#configure-team-sync).
1. If you want to automatically assign users' roles based on their SAML roles, complete the **Role mapping** section.
First, you need to configure the **Role attribute** field to specify which SAML attribute should be used to retrieve SAML role information.
Then enter the SAML roles that you want to map to Grafana roles in **Role mapping** section. If you want to map multiple SAML roles to a Grafana role, separate them by a comma and a space. For example, `Editor: editor, developer`.
Role mapping will automatically update user's [basic role](../../../../administration/roles-and-permissions/access-control/#basic-roles) based on their SAML roles every time the user logs in to Grafana.
Learn more about [SAML role synchronization](../saml/#configure-role-sync).
Role mapping will automatically update user's [basic role](../../../../../administration/roles-and-permissions/access-control/#basic-roles) based on their SAML roles every time the user logs in to Grafana.
Learn more about [SAML role synchronization](../configure-saml-team-role-mapping/#configure-role-sync).
1. If you're setting up Grafana with Azure AD using the SAML protocol and want to fetch user groups from the Graph API, complete the **Azure AD Service Account Configuration** subsection.
1. Set up a service account in Azure AD and provide the necessary details in the **Azure AD Service Account Configuration** section.
@ -155,7 +139,7 @@ Learn more about [team sync](../../configure-team-sync) and [configuring team sy
If you want users to have different roles in different organizations, you can additionally specify a role. For example, `Org mapping: Engineering:2:Editor` will map users who belong to `Engineering` organizations in SAML to Grafana organization with ID 2 and assign them Editor role.
Organization mapping will automatically update user's organization memberships (and roles, if they have been configured) based on their SAML organization every time the user logs in to Grafana.
Learn more about [SAML organization mapping](../saml/#configure-organization-mapping).
Learn more about [SAML organization mapping](../configure-saml-org-mapping/).
1. If you want to limit the access to Grafana based on user's SAML organization membership, fill in the **Allowed organizations** field.
1. Click **Next: Test and enable**.

@ -0,0 +1,111 @@
---
aliases:
- ../../../../saml/
description: Learn how to configure SAML authentication in Grafana's UI.
labels:
products:
- cloud
- enterprise
menuTitle: Troubleshooting
title: Troubleshoot SAML configuration
weight: 590
---
## Troubleshooting
Following are common issues found in configuring SAML authentication in Grafana and how to resolve them.
### Troubleshoot SAML authentication in Grafana
To troubleshoot and get more log information, enable SAML debug logging in the configuration file. Refer to [Configuration](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#filters) for more information.
```ini
[log]
filters = saml.auth:debug
```
### Infinite redirect loop / User gets redirected to the login page after successful login on the IdP side
If you experience an infinite redirect loop when `auto_login = true` or redirected to the login page after successful login, it is likely that the `grafana_session` cookie's SameSite setting is set to `Strict`. This setting prevents the `grafana_session` cookie from being sent to Grafana during cross-site requests. To resolve this issue, set the `security.cookie_samesite` option to `Lax` in the Grafana configuration file.
### SAML authentication fails with error:
- `asn1: structure error: tags don't match`
We only support one private key format: PKCS#8.
The keys may be in a different format (PKCS#1 or PKCS#12); in that case, it may be necessary to convert the private key format.
The following command creates a pkcs8 key file.
```bash
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -nodes
```
#### **Convert** the private key format to base64
The following command converts keys to base64 format.
Base64-encode the cert.pem and key.pem files:
(-w0 switch is not needed on Mac, only for Linux)
```sh
$ base64 -w0 key.pem > key.pem.base64
$ base64 -w0 cert.pem > cert.pem.base64
```
The base64-encoded values (`key.pem.base64, cert.pem.base64` files) are then used for certificate and `private_key`.
The keys you provide should look like:
```
-----BEGIN PRIVATE KEY-----
...
...
-----END PRIVATE KEY-----
```
### SAML login attempts fail with request response `origin not allowed`
When the user logs in using SAML and gets presented with `origin not allowed`, the user might be issuing the login from an IdP (identity provider) service or the user is behind a reverse proxy. This potentially happens as the CSRF checks in Grafana deem the requests to be invalid. For more information [CSRF](https://owasp.org/www-community/attacks/csrf).
To solve this issue, you can configure either the [`csrf_trusted_origins`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#csrf_trusted_origins) or [`csrf_additional_headers`](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#csrf_additional_headers) option in the SAML configuration.
Example of a configuration file:
```ini
# config.ini
...
[security]
csrf_trusted_origins = https://grafana.example.com
csrf_additional_headers = X-Forwarded-Host
...
```
### SAML login attempts fail with request response "login session has expired"
Accessing the Grafana login page from a URL that is not the root URL of the
Grafana server can cause the instance to return the following error: "login session has expired".
If you are accessing Grafana through a proxy server, ensure that cookies are correctly
rewritten to the root URL of Grafana.
Cookies must be set on the same URL as the `root_url` of Grafana. This is normally the reverse proxy's domain/address.
Review the cookie settings in your proxy server configuration to ensure that cookies are
not being discarded
Review the following settings in your Grafana configuration:
```ini
[security]
cookie_samesite = none
```
This setting should be set to none to allow Grafana session cookies to work correctly with redirects.
```ini
[security]
cookie_secure = true
```
Ensure `cookie_secure` is set to true to ensure that cookies are only sent over HTTPS.

@ -0,0 +1,25 @@
---
headless: true
labels:
products:
- enterprise
- oss
title: Upgrade guide introduction
---
We recommend that you upgrade Grafana often to stay current with the latest fixes and enhancements.
Because Grafana upgrades are backward compatible, the upgrade process is straightforward, and dashboards and graphs will not change.
In addition to common tasks you should complete for all versions of Grafana, there might be additional upgrade tasks to complete for a version.
{{% admonition type="note" %}}
There might be breaking changes in some releases. We outline all these changes in the [What's New](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/whatsnew/) document.
{{% /admonition %}}
For versions of Grafana prior to v9.2, we published additional information in the [Release Notes](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/release-notes/).
When available, we list all changes with links to pull requests or issues in the [Changelog](https://github.com/grafana/grafana/blob/main/CHANGELOG.md).
{{% admonition type="note" %}}
When possible, we recommend that you test the Grafana upgrade process in a test or development environment.
{{% /admonition %}}

@ -0,0 +1,82 @@
---
description: Guide for upgrading to Grafana v12.0
keywords:
- grafana
- configuration
- documentation
- upgrade
- '12.0'
title: Upgrade to Grafana v12.0
menuTitle: Upgrade to v12.0
weight: 600
---
# Upgrade to Grafana v12.0
{{< docs/shared lookup="upgrade/intro_2.md" source="grafana" version="<GRAFANA_VERSION>" >}}
{{< docs/shared lookup="back-up/back-up-grafana.md" source="grafana" version="<GRAFANA_VERSION>" leveloffset="+1" >}}
{{< docs/shared lookup="upgrade/upgrade-common-tasks.md" source="grafana" version="<GRAFANA_VERSION>" >}}
## Technical notes
### Grafana data source UID format enforcement
**Ensure that your data source UIDs follow the correct standard**
We've had standard ways to define UIDs for Grafana objects for years (at least [since Grafana v5](https://github.com/grafana/grafana/issues/7883)). While all of our internal code complies with this format, we haven't strictly enforced this format in REST APIs and provisioning paths that allow the creation and update of data sources.
In Grafana v11.1, we [introduced](https://github.com/grafana/grafana/pull/86598) a warning that is sent to Grafana server logs every time a data source instance is created or updated using an invalid UID format.
In Grafana v11.2, we [added](https://github.com/grafana/grafana/pull/89363/files) a new feature flag called `failWrongDSUID` that is turned off by default. When enabled, the REST APIs and provisioning reject any requests to create or update data source instances that have an incorrect UID.
In Grafana v12.0, we're turning the feature flag `failWrongDSUID` on by default.
#### Correct UID format
You can find the exact regex definition [in the `grafana/grafana` repository](https://github.com/grafana/grafana/blob/c92f5169d1c83508beb777f71a93336179fe426e/pkg/util/shortid_generator.go#L32-L45).
A data source UID can only contain:
- Latin characters (`a-Z`)
- Numbers (`0-9`)
- Dash symbols (`-`)
#### How do I know if I'm affected?
- You can fetch all your data sources using the `/api/datasources` API. Review the `uid` fields, comparing them to the correct format, as shown [in the docs](https://grafana.com/docs/grafana/latest/developers/http_api/data_source/#get-all-data-sources). The following script can help, but note that it's missing authentication that you [have to add yourself](https://grafana.com/docs/grafana/latest/developers/http_api/#authenticating-api-requests):
```
curl http://localhost:3000/api/datasources | jq '.[] | select((.uid | test("^[a-zA-Z0-9\\-_]+$") | not) or (.uid | length > 40)) | {id, uid, name, type}'
```
- Alternatively, you can check the server logs for the `Invalid datasource uid` [error](https://github.com/grafana/grafana/blob/68751ed3107c4d15d33f34b15183ee276611785c/pkg/services/datasources/service/store.go#L429).
#### What do I do if I'm affected?
You'll need to create a new data source with the correct UID and update your dashboards and alert rules to use it.
#### How do I update my dashboards to use the new or updated data source?
- Go to the dashboard using the data source and update it by selecting the new or updated data source from the picker below your panel.
OR
- Update the dashboard's JSON model directly using search and replace.
Navigate to [dashboard json model](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/view-dashboard-json-model/) and carefully replace all the instances of the old `uid` with the newly created `uid`.
{{< figure src="/media/docs/grafana/screenshot-grafana-11-datasource-uid-enforcement.png" alt="Updating JSON Model of a Dashboard">}}
#### How do I update my alert rules to use the new or updated data source?
Open the alert rule you want to adjust and search for the data source that is being used for the query/alert condition. From there, select the new data source from the drop-down list and save the alert rule.
### Enforcing stricter version compatibility checks in plugin CLI install commands
Since Grafana 10.2, the endpoint to check compatible versions when installing a plugin using `grafana cli plugins install` changed, which led to Grafana dependency version no longer being taken into account. This might have led to some behavior where the CLI would install plugins that are not fully compatible based on the plugins definition of compatibility via `grafanaDependency` property in the `plugin.json` file.
#### What if I want to ignore the compatibility check?
We _do not_ recommend installing plugins declared as incompatible. However, if you need to force install a plugin despite it being declared as incompatible, refer to the [Installing a plugin from a ZIP](https://grafana.com/docs/grafana/latest/administration/plugin-management/#install-a-plugin-from-a-zip-file) guidance.

@ -74,6 +74,10 @@ For Grafana versions prior to v9.2, additional information might also be availab
For a complete list of every change, with links to pull requests and related issues when available, see the [Changelog](https://github.com/grafana/grafana/blob/main/CHANGELOG.md).
## Grafana 12
- [What's new in 12.0](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/whatsnew/whats-new-in-v12-0)
## Grafana 11
- [What's new in 11.6](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/whatsnew/whats-new-in-v11-6)

@ -0,0 +1,95 @@
---
description: Feature and improvement highlights for Grafana v12.0
keywords:
- grafana
- new
- documentation
- '12.0'
- release notes
labels:
products:
- cloud
- enterprise
- oss
title: What's new in Grafana v12.0
posts:
- title: Observability as Code
items:
- docs/grafana-cloud/whats-new/2025-04-14-git-sync-for-grafana-dashboards.md
- docs/grafana-cloud/whats-new/2025-04-11-new-dashboards-schema.md
- docs/grafana-cloud/whats-new/2025-05-05-new-dashboard-apis-released-as-experimental.md
- title: Drilldown apps
items:
- docs/grafana-cloud/whats-new/2025-04-28-metrics-drilldown-improvements.md
- docs/grafana-cloud/whats-new/2025-04-28-logs-drilldown-improvements.md
- docs/grafana-cloud/whats-new/2025-04-17-ga-release-of-grafana-traces-drilldown.md
- docs/grafana-cloud/whats-new/2025-04-28-introducing-investigations
- title: Cloud Migration Assistant
items:
- docs/grafana-cloud/whats-new/2025-04-11-grafana-cloud-migration-assistant-now-generally-available.md
- title: Dashboards and visualizations
items:
- docs/grafana-cloud/whats-new/2025-04-11-dynamic-dashboards.md
- docs/grafana-cloud/whats-new/2025-04-11-blazing-fast-table-panel.md
- docs/grafana-cloud/whats-new/2025-04-07-sql-expressions.md
- title: Authentication and authorization
items:
- docs/grafana-cloud/whats-new/2025-04-14-scim-user-and-team-provisioning.md
- title: Alerting
items:
- docs/grafana-cloud/whats-new/2025-04-10-alert-rule-migration-tool.md
- docs/grafana-cloud/whats-new/2025-04-10-grafana-managed-alert-rule-recovering-state.md
- docs/grafana-cloud/whats-new/2025-04-11-grafana-managed-alert-rule-improvements.md
- title: Experimental themes
items:
- docs/grafana-cloud/whats-new/2025-04-10-experimental-themes.md
- title: Explore
items:
- docs/grafana-cloud/whats-new/2025-04-15-new-controls-for-logs-in-explore.md
- title: Traces
items:
- docs/grafana-cloud/whats-new/2025-04-30-trace-correlations-instant-context-hops-from-any-trace.md
- title: Breaking Changes
items:
- docs/grafana-cloud/whats-new/2025-04-28-removal-of-editors_can_admin-configuration.md
- docs/grafana-cloud/whats-new/2025-04-28-dashboard-v2-schema-and-next-gen-dashboards.md
- docs/grafana-cloud/whats-new/2025-04-29-deduplication-and-renaming-of-metric-cache_size.md
- docs/grafana-cloud/whats-new/2025-04-28-removal-of-optional-actions-property-from-datalinkscontextmenu-component.md
- docs/grafana-cloud/whats-new/2025-04-29-enforcing-stricter-data-source-uid-format.md
- docs/grafana-cloud/whats-new/2025-04-28-removal-of-angular.md
- docs/grafana-cloud/whats-new/2025-04-29-deprecated-apis-for-ui-extensions-will-be-removed.md
- docs/grafana-cloud/whats-new/2025-04-29-enforcing-stricter-version-compatibility-checks-in-plugin-cli-install-commands.md
- docs/grafana-cloud/whats-new/2025-04-28-removal-of-‘aggregate-by’-in-tempo.md
- docs/grafana-cloud/whats-new/2025-04-28-removing-the-feature-toggle-ui-from-grafana-cloud.md
whats_new_grafana_version: 12.0
weight: -49
---
# What’s new in Grafana v12.0
Welcome to Grafana 12.0! We have a _lot_ to share. This release marks general availability for Grafana Drilldown (previously Explore Metrics, Logs, and Traces), Grafana-managed alerts and recording rules, Cloud migration, and plugin management tooling. You can also try new [preview and experimental](https://grafana.com/docs/release-life-cycle/) tools: Sync your dashboards directly to a GitHub repository with Git Sync, and try our new Terraform provider and CLI. Add tabs, new layouts and conditional logic to your dashboards, and load tables and geomaps far faster. Join and transform data limitlessly from multiple sources with SQL Expressions. In Grafana Cloud and Enterprise, sync your users and teams instantly from your SAML identity provider using SCIM (the System for Cross-Domain Identity Management). Lastly, don't forget to try on one of several new color themes for the user interface.
Read on to learn about these and more improvements to Grafana!
{{< youtube id=mHSzaVYBh38 >}}
For even more detail about all the changes in this release, refer to the [changelog](https://github.com/grafana/grafana/blob/main/CHANGELOG.md). For the specific steps we recommend when you upgrade to v12.0, check out our [Upgrade Guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/upgrade-guide/upgrade-v12.0/).
## Breaking changes in Grafana v12.0
For Grafana v12.0, we've also provided a list of [breaking changes](https://grafana.com/docs/grafana/latest/whatsnew/whats-new-in-v12-0/#breaking-changes) to help you upgrade with greater confidence. For our purposes, a breaking change is any change that requires users or operators to do something. This includes:
- Changes in one part of the system that could cause other components to fail
- Deprecations or removal of a feature
- Changes to an API that could break automation
- Changes that affect some plugins or functions of Grafana
- Migrations that can’t be rolled back
For each change, the provided information:
- Helps you determine if you’re affected
- Describes the change or relevant background information
- Guides you in how to mitigate for the change or migrate
- Provides more learning resources
{{< docs/whats-new >}}

@ -0,0 +1,24 @@
import { e2e } from '../utils';
// Common flows for adding/editing variables on the new edit pane
export const flows = {
newEditPaneVariableClick() {
e2e.components.NavToolbar.editDashboard.editButton().should('be.visible').click();
e2e.components.PanelEditor.Outline.section().should('be.visible').click();
e2e.components.PanelEditor.Outline.item('Variables').should('be.visible').click();
e2e.components.PanelEditor.ElementEditPane.addVariableButton().should('be.visible').click();
},
newEditPanelCommonVariableInputs(variable: Variable) {
e2e.components.PanelEditor.ElementEditPane.variableType(variable.type).should('be.visible').click();
e2e.components.PanelEditor.ElementEditPane.variableNameInput().clear().type(variable.name).blur();
e2e.components.PanelEditor.ElementEditPane.variableLabelInput().clear().type(variable.label).blur();
},
};
export type Variable = {
type: string;
name: string;
label?: string;
description?: string;
value: string;
};

@ -1,9 +1,11 @@
import { e2e } from '../utils';
import { flows, Variable } from './dashboard-edit-flows';
const PAGE_UNDER_TEST = 'kVi2Gex7z/test-variable-output';
const DASHBOARD_NAME = 'Test variable output';
describe('Dashboard edit variables', () => {
describe('Dashboard edit - variables', () => {
beforeEach(() => {
e2e.flows.login(Cypress.env('USERNAME'), Cypress.env('PASSWORD'));
});
@ -33,28 +35,36 @@ describe('Dashboard edit variables', () => {
const values = variable.value.split(',');
e2e.pages.Dashboard.SubMenu.submenuItemValueDropDownValueLinkTexts(values[0]).should('be.visible');
});
});
// Common flows for adding/editing variables
// TODO: maybe move to e2e flows
const flows = {
newEditPaneVariableClick() {
e2e.components.NavToolbar.editDashboard.editButton().should('be.visible').click();
e2e.components.PanelEditor.Outline.section().should('be.visible').click();
e2e.components.PanelEditor.Outline.item('Variables').should('be.visible').click();
e2e.components.PanelEditor.ElementEditPane.addVariableButton().should('be.visible').click();
},
newEditPanelCommonVariableInputs(variable: Variable) {
e2e.components.PanelEditor.ElementEditPane.variableType(variable.type).should('be.visible').click();
e2e.components.PanelEditor.ElementEditPane.variableNameInput().clear().type(variable.name).blur();
e2e.components.PanelEditor.ElementEditPane.variableLabelInput().clear().type(variable.label).blur();
},
};
type Variable = {
type: string;
name: string;
label: string;
description?: string;
value: string;
};
it('can add a new constant variable', () => {
e2e.pages.Dashboards.visit();
e2e.flows.openDashboard({ uid: `${PAGE_UNDER_TEST}?orgId=1` });
cy.contains(DASHBOARD_NAME).should('be.visible');
const variable: Variable = {
type: 'constant',
name: 'VariableUnderTest',
value: 'foo',
label: 'VariableUnderTest', // constant doesn't really need a label
};
// common steps to add a new variable
flows.newEditPaneVariableClick();
flows.newEditPanelCommonVariableInputs(variable);
// set the constant variable value
const type = 'variable-type Value';
const field = e2e.components.PanelEditor.OptionsPane.fieldLabel(type);
field.should('be.visible');
field.find('input').should('be.visible').clear().type(variable.value).blur();
// assert the panel is visible and has the correct value
e2e.components.Panels.Panel.content()
.should('be.visible')
.first()
.within(() => {
cy.get('.markdown-html').should('include.text', `VariableUnderTest: ${variable.value}`);
});
});
});

@ -3,6 +3,13 @@ import { e2e } from '../utils';
describe('Dashboard templating', () => {
beforeEach(() => {
e2e.flows.login(Cypress.env('USERNAME'), Cypress.env('PASSWORD'));
// Note: Only works in Chrome/Chromium-based browsers
Cypress.automation('remote:debugger:protocol', {
command: 'Emulation.setTimezoneOverride',
params: {
timezoneId: 'Pacific/Easter', // OR 'UTC'
},
});
});
it('Verify variable interpolation works', () => {

@ -206,24 +206,24 @@ require (
)
require (
github.com/grafana/grafana/apps/advisor v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/plugins-platform-backend
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/alerting-backend
github.com/grafana/grafana/apps/dashboard v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad @grafana/dashboards-squad
github.com/grafana/grafana/apps/folder v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-search-and-storage
github.com/grafana/grafana/apps/investigations v0.0.0-20250422074709-7c8433fbb2c2 // @fcjack @matryer
github.com/grafana/grafana/apps/playlist v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/apps/advisor v0.0.0-20250506052906-7a2fc797fb4a // @grafana/plugins-platform-backend
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250506052906-7a2fc797fb4a // @grafana/alerting-backend
github.com/grafana/grafana/apps/dashboard v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-app-platform-squad @grafana/dashboards-squad
github.com/grafana/grafana/apps/folder v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-search-and-storage
github.com/grafana/grafana/apps/investigations v0.0.0-20250506052906-7a2fc797fb4a // @fcjack @matryer
github.com/grafana/grafana/apps/playlist v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-app-platform-squad
// This needs to be here for other projects that import grafana/grafana
// For local development grafana/grafana will always use the local files
// Check go.work file for details
github.com/grafana/grafana/pkg/promlib v0.0.8 // @grafana/oss-big-tent
github.com/grafana/grafana/pkg/semconv v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-search-and-storage
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250422074709-7c8433fbb2c2 // @grafana/grafana-search-and-storage
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-search-and-storage
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250506052906-7a2fc797fb4a // @grafana/grafana-search-and-storage
)
require (

@ -1590,6 +1590,7 @@ github.com/grafana/grafana-app-sdk v0.35.1/go.mod h1:Zx5MkVppYK+ElSDUAR6+fjzOVo6
github.com/grafana/grafana-app-sdk/logging v0.35.1 h1:taVpl+RoixTYl0JBJGhH+fPVmwA9wvdwdzJTZsv9buM=
github.com/grafana/grafana-app-sdk/logging v0.35.1/go.mod h1:Y/bvbDhBiV/tkIle9RW49pgfSPIPSON8Q4qjx3pyqDk=
github.com/grafana/grafana-aws-sdk v0.38.1 h1:4fU28F/UIs3YYuS52bBzTOKpFIIYGJZmgM6PO7IEj90=
github.com/grafana/grafana-aws-sdk v0.38.1/go.mod h1:j3vi+cXYHEFqjhBGrI6/lw1TNM+dl0Y3f0cSnDOPy+s=
github.com/grafana/grafana-azure-sdk-go/v2 v2.1.6 h1:OfCkitCuomzZKW1WYHrG8MxKwtMhALb7jqoj+487eTg=
github.com/grafana/grafana-azure-sdk-go/v2 v2.1.6/go.mod h1:V7y2BmsWxS3A9Ohebwn4OiSfJJqi//4JQydQ8fHTduo=
github.com/grafana/grafana-cloud-migration-snapshot v1.6.0 h1:S4kHwr//AqhtL9xHBtz1gqVgZQeCRGTxjgsRBAkpjKY=
@ -1600,34 +1601,34 @@ github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79
github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79/go.mod h1:wc6Hbh3K2TgCUSfBC/BOzabItujtHMESZeFk5ZhdxhQ=
github.com/grafana/grafana-plugin-sdk-go v0.277.0 h1:VDU2F4Y5NeRS//ejctdZtsAshrGaEdbtW33FsK0EQss=
github.com/grafana/grafana-plugin-sdk-go v0.277.0/go.mod h1:mAUWg68w5+1f5TLDqagIr8sWr1RT9h7ufJl5NMcWJAU=
github.com/grafana/grafana/apps/advisor v0.0.0-20250422074709-7c8433fbb2c2 h1:IoXfNDcVQLh4/9pjKqm2MUz1oo5mJnUCtb3tst/GIHA=
github.com/grafana/grafana/apps/advisor v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:xOL9buMMbQg+3m0jPfrza4/5iwe4EBrnur/aJGAA1pM=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250422074709-7c8433fbb2c2 h1:5MQ9mLe/3t2oExmvhnUgqhj9N1+3swjFjVhd/rbKaEs=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:hfz29ggGyj8XNPNzvkz9jaMms5a6/LZkZQWfvaQGPK0=
github.com/grafana/grafana/apps/dashboard v0.0.0-20250422074709-7c8433fbb2c2 h1:AaDJAh7JnBXmMAagob+ewlIUiWPSxQuCESM34EBK5wM=
github.com/grafana/grafana/apps/dashboard v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:LBRNpF2LR2ktSk44HcT9l0SOXKZOn4svTYKxBHCBhtc=
github.com/grafana/grafana/apps/folder v0.0.0-20250422074709-7c8433fbb2c2 h1:XGyEA0CHFb7noYqN/E6hW5Hvtvw/agLEkXV0SdgIwNg=
github.com/grafana/grafana/apps/folder v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:lpi6D5h/zQMUI5VqiV6lDomXyTQA1IZSYyuqyn7xFK4=
github.com/grafana/grafana/apps/investigations v0.0.0-20250422074709-7c8433fbb2c2 h1:GIcowS7OuHaTiZEwLscPPPCGc9Qv3+UtscJZIXHcVLo=
github.com/grafana/grafana/apps/investigations v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:pI3xLwHRhaMTYO5pgA34/ros5suc5J1H+HAdRfwlbx4=
github.com/grafana/grafana/apps/playlist v0.0.0-20250422074709-7c8433fbb2c2 h1:J0cA0yYx/6MB5qa9VIMlsT5rlZAgGtJe2URt2I4GHT0=
github.com/grafana/grafana/apps/playlist v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:9U44mptAJW8bkvgPgCxsnki58/nz3wKPgDayeyeFWJs=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250422074709-7c8433fbb2c2 h1:a7OgvUdcGHBTwVjejQXqH1q1C5kBz3ZRYiiHHZdRjeM=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:GHOXRpm34Y827pUToxwKgM2ZEf2tyuFHdtHYOgAOxrI=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250422074709-7c8433fbb2c2 h1:kvG92f3XbJlQPUcZfXlTNLziI4e8LYeA9Jv2ixmM5Ic=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:ll14OJrUGYgXApz3YX6zmxYjRMZHL+pgQjoKBuRzaRs=
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250422074709-7c8433fbb2c2 h1:fwsq+3uDUcmmV91ly4fESt3U4gDlGXbB6389S5ueljA=
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:bszKnjm4DxPx96DEYduncSVDxfFz14NwW0+bntRdjY0=
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250422074709-7c8433fbb2c2 h1:ZreXete9lRBJmBe49OHeYh8yo+MyXDs5q/96mlRnr0s=
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:Xaz4wiMdzfuMqzxZ5ZhNwzBCGGJVn/IQfzDSY5aosQY=
github.com/grafana/grafana/apps/advisor v0.0.0-20250506052906-7a2fc797fb4a h1:pcrnyCVvShv7XXdOKpLswEqigk0sYkBeEskYpb3Xnf8=
github.com/grafana/grafana/apps/advisor v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:xOL9buMMbQg+3m0jPfrza4/5iwe4EBrnur/aJGAA1pM=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250506052906-7a2fc797fb4a h1:2wLw1n/lTM8woHPBwLzJtyh2M/74GDF8vx/sEzQB2qM=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:VkX53kBiqIMHBoGgeEDJnzm5Nwcmv/726tuZuT5SvJY=
github.com/grafana/grafana/apps/dashboard v0.0.0-20250506052906-7a2fc797fb4a h1:Op8IaMLc8GyltYqTHbBJqEJE53K7u/kgeRRKldnbIoU=
github.com/grafana/grafana/apps/dashboard v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:Jm+oN15Mz5zLomueCvg7JTzc0bU5TDf8FVEsVYRbivU=
github.com/grafana/grafana/apps/folder v0.0.0-20250506052906-7a2fc797fb4a h1:/P+/CCZQ76reMtSvUdyTKSXBQjX+DDNBa1MXgXRRHy8=
github.com/grafana/grafana/apps/folder v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:hYtCxd1P5H0oBU42yf3iy8usA/0amreOaOaETRKqG/4=
github.com/grafana/grafana/apps/investigations v0.0.0-20250506052906-7a2fc797fb4a h1:x8Bmzx0QuG50c8pbkIGQMtZNY2mVRY0XCtUApleKBnA=
github.com/grafana/grafana/apps/investigations v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:pI3xLwHRhaMTYO5pgA34/ros5suc5J1H+HAdRfwlbx4=
github.com/grafana/grafana/apps/playlist v0.0.0-20250506052906-7a2fc797fb4a h1:6DXQ84Hen7lY73uf2iB8gEEokAKJQWMi0NUhLA0Dqzg=
github.com/grafana/grafana/apps/playlist v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:9U44mptAJW8bkvgPgCxsnki58/nz3wKPgDayeyeFWJs=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250506052906-7a2fc797fb4a h1:BuI54sGZ50y2bJzLUIfVTyVADoT15j00vJ4UE8lXiS4=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:lR9iC/cP9HjGEuuGdmY12srQWxnJ57apBR3wjs8Nu24=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250506052906-7a2fc797fb4a h1:fifgkG+6ZtI+S7NRrtTCA1+Ue3AApN3mxyAJEORrtDE=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:3MwgP0ISxGviTy3ZUJZsNz/56NNtHztMlH+gcxDt6Tw=
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250506052906-7a2fc797fb4a h1:yiirQj9r2M+imIwsexRV+uk5ZYaEZ1eJiQ+PFHbWVg4=
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:L6lSC6q+tugol2mX0So7X1p4r8Z1+/j3yJd3dANpQiM=
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250506052906-7a2fc797fb4a h1:HCOrXGvdPolbZ/Cw9AoT1p1de1PInZTB3ri0Y9K0E18=
github.com/grafana/grafana/pkg/apiserver v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:Ex2YDQIZ4MQ2btNuCGs8c8aDb6CG3oJ3X0vLtejAfPM=
github.com/grafana/grafana/pkg/promlib v0.0.8 h1:VUWsqttdf0wMI4j9OX9oNrykguQpZcruudDAFpJJVw0=
github.com/grafana/grafana/pkg/promlib v0.0.8/go.mod h1:U1ezG/MGaEPoThqsr3lymMPN5yIPdVTJnDZ+wcXT+ao=
github.com/grafana/grafana/pkg/semconv v0.0.0-20250422074709-7c8433fbb2c2 h1:uKOBkqzjMwimPJvTOjlo0bFrrR17w8U5l3HtDETPacQ=
github.com/grafana/grafana/pkg/semconv v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:w5oIOh8JhAEY/GwiIrLGBBRv2w0D7Ngv+dznv4k8Tek=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250422074709-7c8433fbb2c2 h1:LtuJWMxi64Zm43AVirn1uNu6SYMmnwkRAfigvm3tpB8=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:LivE9S7HU1uU0cZ99wG77ZgPmOPZYuFWfZ68Lh59gPU=
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250422074709-7c8433fbb2c2 h1:1f8d/Jy/9kv4bqtI5dQjxhpzFBWFrmtPXAPjOd8e6WA=
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250422074709-7c8433fbb2c2/go.mod h1:c1wMG6p6/zlMsi1KoOGYNMdFW2f8xM690CSZcl2i4eI=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250506052906-7a2fc797fb4a h1:2g/anKApzZYtClnHryXpURF3gPyGHbJs1pyU4Y6u55A=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:/eXW4g4cROVmDxH0OhCoXmp9n+xULrmnncMUyFHXxP0=
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250506052906-7a2fc797fb4a h1:7QIwgb9nZmVPRjSi+UQDhjxrAWmEVExXLh4+TfoN/MU=
github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:fs8tmn4kliX1914EZfGOL+A6BUqejjbdL+7Dj9ZmZPA=
github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ=
github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 h1:ZYk42718kSXOiIKdjZKljWLgBpzL5z1yutKABksQCMg=

@ -1116,24 +1116,30 @@ github.com/grafana/cog v0.0.23 h1:/0CCJ24Z8XXM2DnboSd2FzoIswUroqIZzVr8oJWmMQs=
github.com/grafana/cog v0.0.23/go.mod h1:jrS9indvWuDs60RHEZpLaAkmZdgyoLKMOEUT0jiB1t0=
github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak=
github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90=
github.com/grafana/grafana-aws-sdk v0.38.1 h1:4fU28F/UIs3YYuS52bBzTOKpFIIYGJZmgM6PO7IEj90=
github.com/grafana/grafana-aws-sdk v0.38.1/go.mod h1:j3vi+cXYHEFqjhBGrI6/lw1TNM+dl0Y3f0cSnDOPy+s=
github.com/grafana/grafana-plugin-sdk-go v0.263.0/go.mod h1:U43Cnrj/9DNYyvFcNdeUWNjMXTKNB0jcTcQGpWKd2gw=
github.com/grafana/grafana-plugin-sdk-go v0.267.0/go.mod h1:OuwS4c/JYgn0rr/w5zhJBpLo4gKm/vw15RsfpYAvK9Q=
github.com/grafana/grafana-plugin-sdk-go v0.269.1/go.mod h1:yv2KbO4mlr9WuDK2f+2gHAMTwwLmLuqaEnrPXTRU+OI=
github.com/grafana/grafana/apps/advisor v0.0.0-20250123151950-b066a6313173/go.mod h1:goSDiy3jtC2cp8wjpPZdUHRENcoSUHae1/Px/MDfddA=
github.com/grafana/grafana/apps/advisor v0.0.0-20250220154326-6e5de80ef295/go.mod h1:9I1dKV3Dqr0NPR9Af0WJGxOytp5/6W3JLiNChOz8r+c=
github.com/grafana/grafana/apps/advisor v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:xOL9buMMbQg+3m0jPfrza4/5iwe4EBrnur/aJGAA1pM=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250121113133-e747350fee2d/go.mod h1:AvleS6icyPmcBjihtx5jYEvdzLmHGBp66NuE0AMR57A=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250416173722-ec17e0e4ce03/go.mod h1:oemrhKvFxxc5m32xKHPxInEHAObH0/hPPyHUiBUZ1Cc=
github.com/grafana/grafana/apps/alerting/notifications v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:VkX53kBiqIMHBoGgeEDJnzm5Nwcmv/726tuZuT5SvJY=
github.com/grafana/grafana/apps/investigation v0.0.0-20250121113133-e747350fee2d/go.mod h1:HQprw3MmiYj5OUV9CZnkwA1FKDZBmYACuAB3oDvUOmI=
github.com/grafana/grafana/apps/investigations v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:pI3xLwHRhaMTYO5pgA34/ros5suc5J1H+HAdRfwlbx4=
github.com/grafana/grafana/apps/playlist v0.0.0-20250121113133-e747350fee2d/go.mod h1:DjJe5osrW/BKrzN9hAAOSElNWutj1bcriExa7iDP7kA=
github.com/grafana/grafana/apps/playlist v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:9U44mptAJW8bkvgPgCxsnki58/nz3wKPgDayeyeFWJs=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250121113133-e747350fee2d/go.mod h1:1sq0guad+G4SUTlBgx7SXfhnzy7D86K/LcVOtiQCiMA=
github.com/grafana/grafana/pkg/aggregator v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:lR9iC/cP9HjGEuuGdmY12srQWxnJ57apBR3wjs8Nu24=
github.com/grafana/grafana/pkg/apis/secret v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:L6lSC6q+tugol2mX0So7X1p4r8Z1+/j3yJd3dANpQiM=
github.com/grafana/grafana/pkg/build v0.0.0-20250220114259-be81314e2118/go.mod h1:STVpVboMYeBAfyn6Zw6XHhTHqUxzMy7pzRiVgk1l0W0=
github.com/grafana/grafana/pkg/build v0.0.0-20250227105625-8f465f124924/go.mod h1:Vw0LdoMma64VgIMVpRY3i0D156jddgUGjTQBOcyeF3k=
github.com/grafana/grafana/pkg/build v0.0.0-20250227163402-d78c646f93bb/go.mod h1:Vw0LdoMma64VgIMVpRY3i0D156jddgUGjTQBOcyeF3k=
github.com/grafana/grafana/pkg/build v0.0.0-20250403075254-4918d8720c61/go.mod h1:LGVnSwdrS0ZnJ2WXEl5acgDoYPm74EUSFavca1NKHI8=
github.com/grafana/grafana/pkg/semconv v0.0.0-20250121113133-e747350fee2d/go.mod h1:tfLnBpPYgwrBMRz4EXqPCZJyCjEG4Ev37FSlXnocJ2c=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250121113133-e747350fee2d/go.mod h1:CXpwZ3Mkw6xVlGKc0SqUxqXCP3Uv182q6qAQnLaLxRg=
github.com/grafana/grafana/pkg/storage/unified/apistore v0.0.0-20250506052906-7a2fc797fb4a/go.mod h1:/eXW4g4cROVmDxH0OhCoXmp9n+xULrmnncMUyFHXxP0=
github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw=
github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3/go.mod h1:YeND+6FDA7OuFgDzYODN8kfPhXLCehcpxe4T9mdnpCY=
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250331083058-4563aec7a975 h1:4/BZkGObFWZf4cLbE2Vqg/1VTz67Q0AJ7LHspWLKJoQ=

@ -1026,4 +1026,8 @@ export interface FeatureToggles {
* @default true
*/
alertingBulkActionsInUI?: boolean;
/**
* Use proxy-based read-only objects for plugin extensions instead of deep cloning
*/
extensionsReadOnlyProxy?: boolean;
}

@ -10,6 +10,7 @@ import { getFieldTypeIcon } from '../../../../types';
import { Icon } from '../../../Icon/Icon';
import { Filter } from '../Filter/Filter';
import { TableColumnResizeActionCallback, FilterType, TableRow, TableSummaryRow } from '../types';
import { getDisplayName } from '../utils';
interface HeaderCellProps {
column: Column<TableRow, TableSummaryRow>;
@ -46,16 +47,17 @@ const HeaderCell: React.FC<HeaderCellProps> = ({
const headerRef = useRef<HTMLDivElement>(null);
const filterable = field.config?.custom?.filterable ?? false;
const displayName = getDisplayName(field);
let isColumnFilterable = filterable;
if (field.config.custom?.filterable !== filterable) {
isColumnFilterable = field.config.custom?.filterable || false;
}
// we have to remove/reset the filter if the column is not filterable
if (!isColumnFilterable && filter[field.name]) {
if (!isColumnFilterable && filter[displayName]) {
setFilter((filter: FilterType) => {
const newFilter = { ...filter };
delete newFilter[field.name];
delete newFilter[displayName];
return newFilter;
});
}

@ -101,6 +101,7 @@ export const SparklineCell = (props: SparklineCellProps) => {
textAlign: 'right',
marginRight: theme.spacing(1),
}}
className={styles.valueContainer}
value={displayValue}
/>
);
@ -157,4 +158,7 @@ const getStyles = (theme: GrafanaTheme2, justifyContent: Property.JustifyContent
alignItems: 'center',
justifyContent,
}),
valueContainer: css({
div: { width: 'inherit' },
}),
});

@ -18,7 +18,7 @@ import {
FILTER_OUT_OPERATOR,
TableCellNGProps,
} from '../types';
import { getCellColors, getTextAlign } from '../utils';
import { getCellColors, getDisplayName, getTextAlign } from '../utils';
import { ActionsCell } from './ActionsCell';
import AutoCell from './AutoCell';
@ -49,6 +49,7 @@ export function TableCellNG(props: TableCellNGProps) {
} = props;
const cellInspect = field.config?.custom?.inspect ?? false;
const displayName = getDisplayName(field);
const { config: fieldConfig } = field;
const defaultCellOptions: TableAutoCellOptions = { type: TableCellDisplayMode.Auto };
@ -180,15 +181,23 @@ export function TableCellNG(props: TableCellNGProps) {
const onFilterFor = useCallback(() => {
if (onCellFilterAdded) {
onCellFilterAdded({ key: field.name, operator: FILTER_FOR_OPERATOR, value: String(value ?? '') });
onCellFilterAdded({
key: displayName,
operator: FILTER_FOR_OPERATOR,
value: String(value ?? ''),
});
}
}, [field.name, onCellFilterAdded, value]);
}, [displayName, onCellFilterAdded, value]);
const onFilterOut = useCallback(() => {
if (onCellFilterAdded) {
onCellFilterAdded({ key: field.name, operator: FILTER_OUT_OPERATOR, value: String(value ?? '') });
onCellFilterAdded({
key: displayName,
operator: FILTER_OUT_OPERATOR,
value: String(value ?? ''),
});
}
}, [field.name, onCellFilterAdded, value]);
}, [displayName, onCellFilterAdded, value]);
return (
<div ref={divWidthRef} onMouseEnter={handleMouseEnter} onMouseLeave={handleMouseLeave} className={styles.cell}>

@ -1,5 +1,7 @@
import { Field, formattedValueToString, SelectableValue } from '@grafana/data';
import { getDisplayName } from '../utils';
export function calculateUniqueFieldValues(rows: any[], field?: Field) {
if (!field || rows.length === 0) {
return {};
@ -9,7 +11,7 @@ export function calculateUniqueFieldValues(rows: any[], field?: Field) {
for (let index = 0; index < rows.length; index++) {
const row = rows[index];
const fieldValue = row[field.name];
const fieldValue = row[getDisplayName(field)];
const displayValue = field.display ? field.display(fieldValue) : fieldValue;
const value = field.display ? formattedValueToString(displayValue) : displayValue;

@ -48,6 +48,7 @@ import {
getCellHeightCalculator,
getComparator,
getDefaultRowHeight,
getDisplayName,
getFooterItemNG,
getFooterStyles,
getIsNestedTable,
@ -55,6 +56,7 @@ import {
getTextAlign,
handleSort,
MapFrameToGridOptions,
processNestedTableRows,
shouldTextOverflow,
} from './utils';
@ -196,7 +198,7 @@ export function TableNG(props: TableNGProps) {
// Create a map of column key to column type
const columnTypes = useMemo(
() => props.data.fields.reduce<ColumnTypes>((acc, { name, type }) => ({ ...acc, [name]: type }), {}),
() => props.data.fields.reduce<ColumnTypes>((acc, field) => ({ ...acc, [getDisplayName(field)]: field.type }), {}),
[props.data.fields]
);
@ -204,7 +206,10 @@ export function TableNG(props: TableNGProps) {
const textWraps = useMemo(
() =>
props.data.fields.reduce<{ [key: string]: boolean }>(
(acc, { name, config }) => ({ ...acc, [name]: config?.custom?.cellOptions?.wrapText ?? false }),
(acc, field) => ({
...acc,
[getDisplayName(field)]: field.config?.custom?.cellOptions?.wrapText ?? false,
}),
{}
),
[props.data.fields]
@ -218,12 +223,13 @@ export function TableNG(props: TableNGProps) {
const widths: Record<string, number> = {};
// Set default widths from field config if they exist
props.data.fields.forEach(({ name, config }) => {
const configWidth = config?.custom?.width;
props.data.fields.forEach((field) => {
const displayName = getDisplayName(field);
const configWidth = field.config?.custom?.width;
const totalWidth = typeof configWidth === 'number' ? configWidth : COLUMN.DEFAULT_WIDTH;
// subtract out padding and 1px right border
const contentWidth = totalWidth - 2 * TABLE.CELL_PADDING - 1;
widths[name] = contentWidth;
widths[displayName] = contentWidth;
});
// Measure actual widths if available
@ -243,9 +249,9 @@ export function TableNG(props: TableNGProps) {
}, [props.data.fields]);
const fieldDisplayType = useMemo(() => {
return props.data.fields.reduce<Record<string, TableCellDisplayMode>>((acc, { config, name }) => {
if (config?.custom?.cellOptions?.type) {
acc[name] = config.custom.cellOptions.type;
return props.data.fields.reduce<Record<string, TableCellDisplayMode>>((acc, field) => {
if (field.config?.custom?.cellOptions?.type) {
acc[getDisplayName(field)] = field.config.custom.cellOptions.type;
}
return acc;
}, {});
@ -263,12 +269,6 @@ export function TableNG(props: TableNGProps) {
[textWraps, columnTypes, getColumnWidths, headersLength, fieldDisplayType]
);
const getDisplayedValue = (row: TableRow, key: string) => {
const field = props.data.fields.find((field) => field.name === key)!;
const displayedValue = formattedValueToString(field.display!(row[key]));
return displayedValue;
};
// Filter rows
const filteredRows = useMemo(() => {
const filterValues = Object.entries(filter);
@ -278,6 +278,13 @@ export function TableNG(props: TableNGProps) {
return rows;
}
// Helper function to get displayed value
const getDisplayedValue = (row: TableRow, key: string) => {
const field = props.data.fields.find((field) => field.name === key)!;
const displayedValue = formattedValueToString(field.display!(row[key]));
return displayedValue;
};
// Update crossFilterOrder
const filterKeys = new Set(filterValues.map(([key]) => key));
filterKeys.forEach((key) => {
@ -293,6 +300,28 @@ export function TableNG(props: TableNGProps) {
// reset crossFilterRows
crossFilterRows.current = {};
// For nested tables, only filter parent rows and keep their children
if (isNestedTable) {
return processNestedTableRows(rows, (parents) =>
parents.filter((row) => {
for (const [key, value] of filterValues) {
const displayedValue = getDisplayedValue(row, key);
if (!value.filteredSet.has(displayedValue)) {
return false;
}
// collect rows for crossFilter
if (!crossFilterRows.current[key]) {
crossFilterRows.current[key] = [row];
} else {
crossFilterRows.current[key].push(row);
}
}
return true;
})
);
}
// Regular filtering for non-nested tables
return rows.filter((row) => {
for (const [key, value] of filterValues) {
const displayedValue = getDisplayedValue(row, key);
@ -308,35 +337,38 @@ export function TableNG(props: TableNGProps) {
}
return true;
});
}, [rows, filter, props.data.fields]); // eslint-disable-line react-hooks/exhaustive-deps
}, [rows, filter, isNestedTable, props.data.fields]);
// Sort rows
const sortedRows = useMemo(() => {
const comparators = sortColumns.map(({ columnKey }) => getComparator(columnTypes[columnKey]));
const sortDirs = sortColumns.map(({ direction }) => (direction === 'ASC' ? 1 : -1));
if (sortColumns.length === 0) {
return filteredRows;
}
return filteredRows.slice().sort((a, b) => {
// Common sort comparator function
const compareRows = (a: TableRow, b: TableRow): number => {
let result = 0;
let sortIndex = 0;
for (const { columnKey } of sortColumns) {
const compare = comparators[sortIndex];
result = sortDirs[sortIndex] * compare(a[columnKey], b[columnKey]);
for (let i = 0; i < sortColumns.length; i++) {
const { columnKey, direction } = sortColumns[i];
const compare = getComparator(columnTypes[columnKey]);
const sortDir = direction === 'ASC' ? 1 : -1;
result = sortDir * compare(a[columnKey], b[columnKey]);
if (result !== 0) {
break;
}
}
return result;
};
sortIndex += 1;
// Handle nested tables
if (isNestedTable) {
return processNestedTableRows(filteredRows, (parents) => [...parents].sort(compareRows));
}
return result;
});
}, [filteredRows, sortColumns, columnTypes]);
// Regular sort for tables without nesting
return filteredRows.slice().sort((a, b) => compareRows(a, b));
}, [filteredRows, sortColumns, columnTypes, isNestedTable]);
// Paginated rows
// TODO consolidate calculations into pagination wrapper component and only use when needed
@ -444,8 +476,6 @@ export function TableNG(props: TableNGProps) {
ctx,
onSortByChange,
rows,
// INFO: sortedRows is for correct row indexing for cell background coloring
sortedRows,
setContextMenuProps,
setFilter,
setIsInspecting,
@ -655,7 +685,6 @@ export function mapFrameToDataGrid({
ctx,
onSortByChange,
rows,
sortedRows,
setContextMenuProps,
setFilter,
setIsInspecting,
@ -762,7 +791,7 @@ export function mapFrameToDataGrid({
return;
}
const fieldTableOptions: TableFieldOptionsType = field.config.custom || {};
const key = field.name;
const key = getDisplayName(field);
const justifyColumnContent = getTextAlign(field);
const footerStyles = getFooterStyles(justifyColumnContent);
@ -778,9 +807,9 @@ export function mapFrameToDataGrid({
key,
name: field.name,
field,
cellClass: textWraps[field.name] ? styles.cellWrapped : styles.cell,
cellClass: textWraps[getDisplayName(field)] ? styles.cellWrapped : styles.cell,
renderCell: (props: RenderCellProps<TableRow, TableSummaryRow>): JSX.Element => {
const { row, rowIdx } = props;
const { row } = props;
const cellType = field.config?.custom?.cellOptions?.type ?? TableCellDisplayMode.Auto;
const value = row[key];
// Cell level rendering here
@ -794,7 +823,7 @@ export function mapFrameToDataGrid({
timeRange={timeRange ?? getDefaultTimeRange()}
height={defaultRowHeight}
justifyContent={justifyColumnContent}
rowIdx={sortedRows[rowIdx].__index}
rowIdx={row.__index}
shouldTextOverflow={() =>
shouldTextOverflow(
key,
@ -805,7 +834,7 @@ export function mapFrameToDataGrid({
defaultLineHeight,
defaultRowHeight,
TABLE.CELL_PADDING,
textWraps[field.name],
textWraps[getDisplayName(field)],
field,
cellType
)

@ -11,6 +11,7 @@ import {
ActionModel,
InterpolateFunction,
FieldType,
DataFrameWithValue,
} from '@grafana/data';
import { TableCellOptions, TableCellHeight, TableFieldOptions } from '@grafana/schema';
@ -66,6 +67,7 @@ export type TableCellValue =
| Date // FieldType.time
| DataFrame // For nested data
| DataFrame[] // For nested frames
| DataFrameWithValue // For sparklines
| undefined; // For undefined values
export interface TableRow {

@ -313,7 +313,7 @@ export function getFooterItemNG(rows: TableRow[], field: Field, options: TableFo
const value = reduceField({
field: {
...field,
values: rows.map((row) => row[field.name]),
values: rows.map((row) => row[getDisplayName(field)]),
},
reducers: options.reducer,
})[calc];
@ -470,7 +470,7 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
rows[rowCount] = {
__depth: 0,
__index: i,
${frame.fields.map((field, fieldIdx) => `${JSON.stringify(field.name)}: values[${fieldIdx}][i]`).join(',')}
${frame.fields.map((field, fieldIdx) => `${JSON.stringify(getDisplayName(field))}: values[${fieldIdx}][i]`).join(',')}
};
rowCount += 1;
if (rows[rowCount-1]['Nested frames']){
@ -502,7 +502,6 @@ export interface MapFrameToGridOptions extends TableNGProps {
ctx: CanvasRenderingContext2D;
onSortByChange?: (sortBy: TableSortByFieldState[]) => void;
rows: TableRow[];
sortedRows: TableRow[];
setContextMenuProps: (props: { value: string; top?: number; left?: number; mode?: TableCellInspectorMode }) => void;
setFilter: React.Dispatch<React.SetStateAction<FilterType>>;
setIsInspecting: (isInspecting: boolean) => void;
@ -520,6 +519,12 @@ export interface MapFrameToGridOptions extends TableNGProps {
const compare = new Intl.Collator('en', { sensitivity: 'base', numeric: true }).compare;
export function getComparator(sortColumnType: FieldType): Comparator {
switch (sortColumnType) {
// Handle sorting for frame type fields (sparklines)
case FieldType.frame:
return (a, b) => {
// @ts-ignore The values are DataFrameWithValue
return (a?.value ?? 0) - (b?.value ?? 0);
};
case FieldType.time:
case FieldType.number:
case FieldType.boolean:
@ -594,3 +599,42 @@ export function migrateTableDisplayModeToCellOptions(displayMode: TableCellDispl
/** Returns true if the DataFrame contains nested frames */
export const getIsNestedTable = (dataFrame: DataFrame): boolean =>
dataFrame.fields.some(({ type }) => type === FieldType.nestedFrames);
/** Processes nested table rows */
export const processNestedTableRows = (
rows: TableRow[],
processParents: (parents: TableRow[]) => TableRow[]
): TableRow[] => {
// Separate parent and child rows
// Array for parentRows: enables sorting and maintains order for iteration
// Map for childRows: provides O(1) lookup by parent index when reconstructing the result
const parentRows: TableRow[] = [];
const childRows: Map<number, TableRow> = new Map();
rows.forEach((row) => {
if (Number(row.__depth) === 0) {
parentRows.push(row);
} else {
childRows.set(Number(row.__index), row);
}
});
// Process parent rows (filter or sort)
const processedParents = processParents(parentRows);
// Reconstruct the result
const result: TableRow[] = [];
processedParents.forEach((row) => {
result.push(row);
const childRow = childRows.get(Number(row.__index));
if (childRow) {
result.push(childRow);
}
});
return result;
};
export const getDisplayName = (field: Field): string => {
return field.state?.displayName ?? field.name;
};

@ -0,0 +1,13 @@
import { FeatureToggles } from '@grafana/data';
type FeatureToggleName = keyof FeatureToggles;
/**
* Check a featureToggle
* @param featureName featureToggle name
* @param def default value if featureToggles aren't defined, false if not provided
* @returns featureToggle value or def.
*/
export function getFeatureToggle(featureName: FeatureToggleName, def = false) {
return window.grafanaBootData?.settings.featureToggles[featureName] ?? def;
}

@ -20,13 +20,17 @@ var (
var _ ProtoClient = (*protoClient)(nil)
type ProtoClient interface {
type PluginV2 interface {
pluginv2.DataClient
pluginv2.ResourceClient
pluginv2.DiagnosticsClient
pluginv2.StreamClient
pluginv2.AdmissionControlClient
pluginv2.ResourceConversionClient
}
type ProtoClient interface {
PluginV2
PID(context.Context) (string, error)
PluginID() string

@ -0,0 +1,28 @@
package cipher
import (
"context"
)
const (
AesCfb = "aes-cfb"
AesGcm = "aes-gcm"
)
type Cipher interface {
Encrypter
Decrypter
}
type Encrypter interface {
Encrypt(ctx context.Context, payload []byte, secret string) ([]byte, error)
}
type Decrypter interface {
Decrypt(ctx context.Context, payload []byte, secret string) ([]byte, error)
}
type Provider interface {
ProvideCiphers() map[string]Encrypter
ProvideDeciphers() map[string]Decrypter
}

@ -0,0 +1,14 @@
package provider
import (
"crypto/pbkdf2"
"crypto/sha256"
)
// aes256CipherKey is used to calculate a key for AES-256 blocks.
// It returns a key of 32 bytes, which causes aes.NewCipher to choose AES-256.
// The implementation is equal to that of the legacy secrets system.
// If this changes, we either need to rotate all encrypted secrets, or keep a fallback implementation (being this).
func aes256CipherKey(password string, salt []byte) ([]byte, error) {
return pbkdf2.Key(sha256.New, password, salt, 10000, 32)
}

@ -0,0 +1,44 @@
package provider
import (
"crypto/rand"
"encoding/hex"
"testing"
"github.com/stretchr/testify/require"
)
func TestAes256CipherKey(t *testing.T) {
t.Parallel()
t.Run("with regular password", func(t *testing.T) {
t.Parallel()
key, err := aes256CipherKey("password", []byte("salt"))
require.NoError(t, err)
require.Len(t, key, 32)
})
t.Run("with very long password", func(t *testing.T) {
t.Parallel()
key, err := aes256CipherKey("a very long secret key that is much larger than 32 bytes", []byte("salt"))
require.NoError(t, err)
require.Len(t, key, 32)
})
t.Run("withstands randomness", func(t *testing.T) {
t.Parallel()
password := make([]byte, 512)
salt := make([]byte, 512)
_, err := rand.Read(password)
require.NoError(t, err, "failed to generate random password")
_, err = rand.Read(salt)
require.NoError(t, err, "failed to generate random salt")
key, err := aes256CipherKey(hex.EncodeToString(password), salt)
require.NoError(t, err, "failed to generate key")
require.Len(t, key, 32, "key should be 32 bytes long")
})
}

@ -0,0 +1,118 @@
package provider
import (
"context"
"crypto/aes"
cpr "crypto/cipher"
"crypto/rand"
"io"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
)
const gcmSaltLength = 8
var (
_ cipher.Encrypter = (*aesGcmCipher)(nil)
_ cipher.Decrypter = (*aesGcmCipher)(nil)
)
type aesGcmCipher struct {
// randReader is used to generate random bytes for the nonce.
// This allows us to change out the entropy source for testing.
randReader io.Reader
}
func newAesGcmCipher() aesGcmCipher {
return aesGcmCipher{
randReader: rand.Reader,
}
}
func (c aesGcmCipher) Encrypt(_ context.Context, payload []byte, secret string) ([]byte, error) {
salt, err := c.readEntropy(gcmSaltLength)
if err != nil {
return nil, err
}
key, err := aes256CipherKey(secret, salt)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cpr.NewGCM(block)
if err != nil {
return nil, err
}
nonce, err := c.readEntropy(gcm.NonceSize())
if err != nil {
return nil, err
}
ciphertext := gcm.Seal(nil, nonce, payload, nil)
// Salt Nonce Encrypted
// | | Payload
// | | |
// | +---------v-------------+ |
// +-->SSSSSSSNNNNNNNEEEEEEEEE<--+
// +-----------------------+
prefix := append(salt, nonce...)
ciphertext = append(prefix, ciphertext...)
return ciphertext, nil
}
func (c aesGcmCipher) Decrypt(_ context.Context, payload []byte, secret string) ([]byte, error) {
// The input payload looks like:
// Salt Nonce Encrypted
// | | Payload
// | | |
// | +---------v-------------+ |
// +-->SSSSSSSNNNNNNNEEEEEEEEE<--+
// +-----------------------+
if len(payload) < gcmSaltLength {
// If we don't return here, we'd panic.
return nil, ErrPayloadTooShort
}
salt, payload := payload[:gcmSaltLength], payload[gcmSaltLength:]
// Can't get nonce until we get a size from the AEAD interface.
key, err := aes256CipherKey(secret, salt)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
gcm, err := cpr.NewGCM(block)
if err != nil {
return nil, err
}
if len(payload) < gcm.NonceSize() {
// If we don't return here, we'd panic.
return nil, ErrPayloadTooShort
}
nonce, payload := payload[:gcm.NonceSize()], payload[gcm.NonceSize():]
return gcm.Open(nil, nonce, payload, nil)
}
func (c aesGcmCipher) readEntropy(n int) ([]byte, error) {
entropy := make([]byte, n)
if _, err := io.ReadFull(c.randReader, entropy); err != nil {
return nil, err
}
return entropy, nil
}

@ -0,0 +1,144 @@
package provider
import (
"bytes"
"encoding/hex"
"io"
"testing"
"github.com/stretchr/testify/require"
)
func TestGcmEncryption(t *testing.T) {
t.Parallel()
t.Run("encrypts correctly", func(t *testing.T) {
t.Parallel()
// The expected values are generated by test_fixtures/aesgcm_encrypt_correct_output.rb
salt := []byte("abcdefgh")
nonce := []byte("123456789012")
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader(append(salt, nonce...))
payload := []byte("grafana unit test")
secret := "secret here"
encrypted, err := cipher.Encrypt(t.Context(), payload, secret)
require.NoError(t, err, "failed to encrypt with GCM")
require.NotEmpty(t, encrypted, "encrypted payload should not be empty")
require.Equal(t, "61626364656667683132333435363738393031328123655291d1f5eebe34c54ba55900f68a2700818a8fda9e2921190b67271d97ce",
hex.EncodeToString(encrypted), "encrypted payload should match expected value")
// Sanity check that all our pre-provided random data is used.
_, err = cipher.randReader.Read([]byte{0})
require.ErrorIs(t, err, io.EOF, "expected us to have read the entire random source")
})
t.Run("fails if random source is empty", func(t *testing.T) {
t.Parallel()
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{})
payload := []byte("grafana unit test")
secret := "secret here"
_, err := cipher.Encrypt(t.Context(), payload, secret)
require.Error(t, err, "expected error when random source is empty")
})
t.Run("fails if random source does not provide nonce", func(t *testing.T) {
t.Parallel()
// Scenario: the random source has enough entropy for the salt, but not for the nonce.
// In this case, we should fail with an error.
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte("abcdefgh")) // 8 bytes for salt, but not enough for nonce
payload := []byte("grafana unit test")
secret := "secret here"
_, err := cipher.Encrypt(t.Context(), payload, secret)
require.Error(t, err, "expected error when random source does not provide nonce")
})
}
func TestGcmDecryption(t *testing.T) {
t.Parallel()
t.Run("decrypts correctly", func(t *testing.T) {
t.Parallel()
// The expected values are generated by test_fixtures/aesgcm_encrypt_correct_output.rb
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{}) // should not be used
payload, err := hex.DecodeString("61626364656667683132333435363738393031328123655291d1f5eebe34c54ba55900f68a2700818a8fda9e2921190b67271d97ce")
require.NoError(t, err, "failed to decode pre-computed encrypted payload")
secret := "secret here"
decrypted, err := cipher.Decrypt(t.Context(), payload, secret)
require.NoError(t, err, "failed to decrypt with GCM")
require.Equal(t, "grafana unit test", string(decrypted), "decrypted payload should match expected value")
})
t.Run("fails if payload is shorter than salt", func(t *testing.T) {
t.Parallel()
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{}) // should not be used
payload := []byte{1, 2, 3, 4}
secret := "secret here"
_, err := cipher.Decrypt(t.Context(), payload, secret)
require.Error(t, err, "expected error when payload is shorter than salt")
})
t.Run("fails if payload has length of salt but no nonce", func(t *testing.T) {
t.Parallel()
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{}) // should not be used
payload := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} // salt and a little more
secret := "secret here"
_, err := cipher.Decrypt(t.Context(), payload, secret)
require.Error(t, err, "expected error when payload has length of salt but no nonce")
})
t.Run("fails when authentication tag is wrong", func(t *testing.T) {
t.Parallel()
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{}) // should not be used
// Removed 2 bytes from the end of the payload to simulate a wrong authentication tag.
payload, err := hex.DecodeString("61626364656667683132333435363738393031328123655291d1f5eebe34c54ba55900f68a2700818a8fda9e2921190b67271d")
require.NoError(t, err, "failed to decode pre-computed encrypted payload")
secret := "secret here"
_, err = cipher.Decrypt(t.Context(), payload, secret)
require.Error(t, err, "expected to fail validation")
})
t.Run("fails if secret does not match", func(t *testing.T) {
t.Parallel()
cipher := newAesGcmCipher()
cipher.randReader = bytes.NewReader([]byte{}) // should not be used
payload, err := hex.DecodeString("61626364656667683132333435363738393031328123655291d1f5eebe34c54ba55900f68a2700818a8fda9e2921190b67271d97ce")
require.NoError(t, err, "failed to decode pre-computed encrypted payload")
secret := "should have been 'secret here'"
_, err = cipher.Decrypt(t.Context(), payload, secret)
require.Error(t, err, "expected to fail decryption")
})
}

@ -0,0 +1,52 @@
package provider
import (
"context"
"crypto/aes"
cpr "crypto/cipher"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
)
const cfbSaltLength = 8
var _ cipher.Decrypter = aesCfbDecipher{}
type aesCfbDecipher struct{}
func (aesCfbDecipher) Decrypt(_ context.Context, payload []byte, secret string) ([]byte, error) {
// payload is formatted:
// Salt Nonce Encrypted
// | | Payload
// | | |
// | +---------v-------------+ |
// +-->SSSSSSSNNNNNNNEEEEEEEEE<--+
// +-----------------------+
if len(payload) < cfbSaltLength+aes.BlockSize {
// If we don't return here, we'd panic.
return nil, ErrPayloadTooShort
}
salt := payload[:cfbSaltLength]
key, err := aes256CipherKey(secret, salt)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
iv, payload := payload[cfbSaltLength:][:aes.BlockSize], payload[cfbSaltLength+aes.BlockSize:]
payloadDst := make([]byte, len(payload))
//nolint:staticcheck // We need to support CFB _decryption_, though we don't support it for future encryption.
stream := cpr.NewCFBDecrypter(block, iv)
// XORKeyStream can work in-place if the two arguments are the same.
stream.XORKeyStream(payloadDst, payload)
return payloadDst, nil
}

@ -0,0 +1,70 @@
package provider
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/require"
)
func TestCfbDecryption(t *testing.T) {
t.Parallel()
t.Run("decrypts correctly", func(t *testing.T) {
t.Parallel()
// The expected values are generated by test_fixtures/aescfb_encrypt_correct_output.rb
cipher := aesCfbDecipher{}
payload, err := hex.DecodeString("616263646566676831323334353637383930313234353637f1114227cb6af678cad6ee35f67f25f40b")
require.NoError(t, err, "failed to decode hex string")
secret := "secret here"
decrypted, err := cipher.Decrypt(t.Context(), payload, secret)
require.NoError(t, err, "failed to decrypt with CFB")
require.Equal(t, "grafana unit test", string(decrypted), "decrypted payload should match expected value")
})
t.Run("fails if payload is too short", func(t *testing.T) {
t.Parallel()
cipher := aesCfbDecipher{}
payload := []byte{1, 2, 3, 4}
secret := "secret here"
_, err := cipher.Decrypt(t.Context(), payload, secret)
require.Error(t, err, "expected error when payload is shorter than salt")
})
t.Run("fails if payload is not an AES-encrypted value", func(t *testing.T) {
t.Parallel()
cipher := aesCfbDecipher{}
payload, err := hex.DecodeString("616263646566676831323334353637383930313234353637f1114227cb")
require.NoError(t, err, "failed to decode hex string")
secret := "secret here"
// We don't have any authentication tag, so we can't return an error in this case.
decrypted, err := cipher.Decrypt(t.Context(), payload, secret)
require.NoError(t, err, "expected no error")
require.NotEqual(t, "grafana unit test", string(decrypted), "decrypted payload should not match real exposed secret")
})
t.Run("fails if secret is wrong", func(t *testing.T) {
t.Parallel()
cipher := aesCfbDecipher{}
payload, err := hex.DecodeString("616263646566676831323334353637383930313234353637f1114227cb6af678cad6ee35f67f25f40b")
require.NoError(t, err, "failed to decode hex string")
secret := "should've been 'secret here'"
// We don't have any authentication tag, so we can't return an error in this case.
decrypted, err := cipher.Decrypt(t.Context(), payload, secret)
require.NoError(t, err, "expected no error")
require.NotEqual(t, "grafana unit test", string(decrypted), "decrypted payload should not match real exposed secret")
})
}

@ -0,0 +1,7 @@
package provider
import "errors"
// ErrPayloadTooShort is returned when the payload is too short to be decrypted.
// In some situations, the error may instead be io.ErrUnexpectedEOF or a cipher-specific error.
var ErrPayloadTooShort = errors.New("payload too short")

@ -0,0 +1,18 @@
package provider
import (
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
)
func ProvideCiphers() map[string]cipher.Encrypter {
return map[string]cipher.Encrypter{
cipher.AesGcm: newAesGcmCipher(),
}
}
func ProvideDeciphers() map[string]cipher.Decrypter {
return map[string]cipher.Decrypter{
cipher.AesGcm: newAesGcmCipher(),
cipher.AesCfb: aesCfbDecipher{},
}
}

@ -0,0 +1,17 @@
package provider_test
import (
"testing"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher/provider"
"github.com/stretchr/testify/require"
)
func TestNoCfbEncryptionCipher(t *testing.T) {
// CFB encryption is insecure, and as such we should not permit any cipher for encryption to be added.
// Changing/removing this test MUST be accompanied with an approval from the app security team.
ciphers := provider.ProvideCiphers()
require.NotContains(t, ciphers, cipher.AesCfb, "CFB cipher should not be used for encryption")
}

@ -0,0 +1,35 @@
#!/usr/bin/env ruby
# Used by ../decipher_aescfb_test.go
# Why Ruby? It has a mostly available OpenSSL library that can be easily fetched (and most who have Ruby already have it!). And it is easy to read for this purpose.
require 'openssl'
salt = "abcdefgh"
nonce = "1234567890124567"
secret = "secret here"
plaintext = "grafana unit test"
# reimpl of aes256CipherKey
# the key is always the same value given the inputs
iterations = 10_000
len = 32
hash = OpenSSL::Digest::SHA256.new
key = OpenSSL::KDF.pbkdf2_hmac(secret, salt: salt, iterations: iterations, length: len, hash: hash)
cipher = OpenSSL::Cipher::AES256.new(:CFB).encrypt
cipher.iv = nonce
cipher.key = key
encrypted = cipher.update(plaintext)
def to_hex(s)
s.unpack('H*').first
end
# Salt Nonce Encrypted
# | | Payload
# | | |
# | +---------v-------------+ |
# +-->SSSSSSSNNNNNNNEEEEEEEEE<--+
# +-----------------------+
printf("%s%s%s%s\n", to_hex(salt), to_hex(nonce), cipher.final, to_hex(encrypted))

@ -0,0 +1,38 @@
#!/usr/bin/env ruby
# Used by ../cipher_aesgcm_test.go
# Why Ruby? It has a mostly available OpenSSL library that can be easily fetched (and most who have Ruby already have it!). And it is easy to read for this purpose.
require 'openssl'
# randReader field
salt = "abcdefgh"
nonce = "123456789012"
# inputs to Encrypt
secret = "secret here"
plaintext = "grafana unit test"
# reimpl of aes256CipherKey
# the key is always the same value given the inputs
iterations = 10_000
len = 32
hash = OpenSSL::Digest::SHA256.new
key = OpenSSL::KDF.pbkdf2_hmac(secret, salt: salt, iterations: iterations, length: len, hash: hash)
cipher = OpenSSL::Cipher::AES256.new(:GCM).encrypt
cipher.iv = nonce
cipher.key = key
cipher.auth_data = ""
encrypted = cipher.update(plaintext)
def to_hex(s)
s.unpack('H*').first
end
# Salt Nonce Encrypted
# | | Payload
# | | |
# | +---------v-------------+ |
# +-->SSSSSSSNNNNNNNEEEEEEEEE<--+
# +-----------------------+
printf("%s%s%s%s%s\n", to_hex(salt), to_hex(nonce), cipher.final, to_hex(encrypted), to_hex(cipher.auth_tag))

@ -0,0 +1,199 @@
package service
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
encryptionprovider "github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher/provider"
"github.com/grafana/grafana/pkg/setting"
)
const (
encryptionAlgorithmDelimiter = '*'
)
// Service must not be used for cipher.
// Use secrets.Service implementing envelope encryption instead.
type Service struct {
tracer tracing.Tracer
log log.Logger
cfg *setting.Cfg
usageMetrics usagestats.Service
ciphers map[string]cipher.Encrypter
deciphers map[string]cipher.Decrypter
}
func NewEncryptionService(
tracer tracing.Tracer,
usageMetrics usagestats.Service,
cfg *setting.Cfg,
) (*Service, error) {
if cfg.SecretsManagement.SecretKey == "" {
return nil, fmt.Errorf("`[secrets_manager]secret_key` is not set")
}
if cfg.SecretsManagement.Encryption.Algorithm == "" {
return nil, fmt.Errorf("`[secrets_manager.encryption]algorithm` is not set")
}
s := &Service{
tracer: tracer,
log: log.New("encryption"),
ciphers: encryptionprovider.ProvideCiphers(),
deciphers: encryptionprovider.ProvideDeciphers(),
usageMetrics: usageMetrics,
cfg: cfg,
}
algorithm := s.cfg.SecretsManagement.Encryption.Algorithm
if err := s.checkEncryptionAlgorithm(algorithm); err != nil {
return nil, err
}
s.registerUsageMetrics()
return s, nil
}
func (s *Service) checkEncryptionAlgorithm(algorithm string) error {
var err error
defer func() {
if err != nil {
s.log.Error("Wrong security encryption configuration", "algorithm", algorithm, "error", err)
}
}()
if _, ok := s.ciphers[algorithm]; !ok {
err = fmt.Errorf("no cipher registered for encryption algorithm '%s'", algorithm)
return err
}
if _, ok := s.deciphers[algorithm]; !ok {
err = fmt.Errorf("no decipher registered for encryption algorithm '%s'", algorithm)
return err
}
return nil
}
func (s *Service) registerUsageMetrics() {
s.usageMetrics.RegisterMetricsFunc(func(context.Context) (map[string]any, error) {
algorithm := s.cfg.SecretsManagement.Encryption.Algorithm
return map[string]any{
fmt.Sprintf("stats.%s.encryption.cipher.%s.count", encryption.UsageInsightsPrefix, algorithm): 1,
}, nil
})
}
func (s *Service) Decrypt(ctx context.Context, payload []byte, secret string) ([]byte, error) {
ctx, span := s.tracer.Start(ctx, "cipher.service.Decrypt")
defer span.End()
var err error
defer func() {
if err != nil {
s.log.FromContext(ctx).Error("Decryption failed", "error", err)
}
}()
var (
algorithm string
toDecrypt []byte
)
algorithm, toDecrypt, err = s.deriveEncryptionAlgorithm(payload)
if err != nil {
return nil, err
}
decipher, ok := s.deciphers[algorithm]
if !ok {
err = fmt.Errorf("no decipher available for algorithm '%s'", algorithm)
return nil, err
}
span.SetAttributes(attribute.String("cipher.algorithm", algorithm))
var decrypted []byte
decrypted, err = decipher.Decrypt(ctx, toDecrypt, secret)
return decrypted, err
}
func (s *Service) deriveEncryptionAlgorithm(payload []byte) (string, []byte, error) {
if len(payload) == 0 {
return "", nil, fmt.Errorf("unable to derive encryption algorithm")
}
if payload[0] != encryptionAlgorithmDelimiter {
return cipher.AesCfb, payload, nil // backwards compatibility
}
payload = payload[1:]
algorithmDelimiterIdx := bytes.Index(payload, []byte{encryptionAlgorithmDelimiter})
if algorithmDelimiterIdx == -1 {
return cipher.AesCfb, payload, nil // backwards compatibility
}
algorithmB64 := payload[:algorithmDelimiterIdx]
payload = payload[algorithmDelimiterIdx+1:]
algorithm := make([]byte, base64.RawStdEncoding.DecodedLen(len(algorithmB64)))
_, err := base64.RawStdEncoding.Decode(algorithm, algorithmB64)
if err != nil {
return "", nil, err
}
return string(algorithm), payload, nil
}
func (s *Service) Encrypt(ctx context.Context, payload []byte, secret string) ([]byte, error) {
ctx, span := s.tracer.Start(ctx, "cipher.service.Encrypt")
defer span.End()
var err error
defer func() {
if err != nil {
s.log.Error("Encryption failed", "error", err)
}
}()
algorithm := s.cfg.SecretsManagement.Encryption.Algorithm
cipher, ok := s.ciphers[algorithm]
if !ok {
err = fmt.Errorf("no cipher available for algorithm '%s'", algorithm)
return nil, err
}
span.SetAttributes(attribute.String("cipher.algorithm", algorithm))
var encrypted []byte
encrypted, err = cipher.Encrypt(ctx, payload, secret)
prefix := make([]byte, base64.RawStdEncoding.EncodedLen(len([]byte(algorithm)))+2)
base64.RawStdEncoding.Encode(prefix[1:], []byte(algorithm))
prefix[0] = encryptionAlgorithmDelimiter
prefix[len(prefix)-1] = encryptionAlgorithmDelimiter
ciphertext := make([]byte, len(prefix)+len(encrypted))
copy(ciphertext, prefix)
copy(ciphertext[len(prefix):], encrypted)
return ciphertext, nil
}

@ -0,0 +1,78 @@
package service
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
"github.com/grafana/grafana/pkg/setting"
)
func newGcmService(t *testing.T) *Service {
t.Helper()
usageStats := &usagestats.UsageStatsMock{}
settings := &setting.Cfg{
SecretsManagement: setting.SecretsManagerSettings{
SecretKey: "SdlklWklckeLS",
EncryptionProvider: "secretKey.v1",
Encryption: setting.EncryptionSettings{
DataKeysCacheTTL: 5 * time.Minute,
DataKeysCleanupInterval: 1 * time.Nanosecond,
Algorithm: cipher.AesGcm,
},
},
}
svc, err := NewEncryptionService(tracing.InitializeTracerForTest(), usageStats, settings)
require.NoError(t, err, "failed to set up encryption service")
return svc
}
func TestService(t *testing.T) {
t.Parallel()
t.Run("decrypt empty payload should return error", func(t *testing.T) {
t.Parallel()
svc := newGcmService(t)
_, err := svc.Decrypt(t.Context(), []byte(""), "1234")
require.Error(t, err)
assert.Equal(t, "unable to derive encryption algorithm", err.Error())
})
t.Run("encrypt and decrypt with GCM should work", func(t *testing.T) {
t.Parallel()
svc := newGcmService(t)
encrypted, err := svc.Encrypt(t.Context(), []byte("grafana"), "1234")
require.NoError(t, err)
decrypted, err := svc.Decrypt(t.Context(), encrypted, "1234")
require.NoError(t, err)
assert.Equal(t, []byte("grafana"), decrypted)
// We'll let the provider deal with testing details.
})
t.Run("decrypting legacy ciphertext should work", func(t *testing.T) {
t.Parallel()
// Raw slice of bytes that corresponds to the following ciphertext:
// - 'grafana' as payload
// - '1234' as secret
// - no encryption algorithm metadata
ciphertext := []byte{73, 71, 50, 57, 121, 110, 90, 109, 115, 23, 237, 13, 130, 188, 151, 118, 98, 103, 80, 209, 79, 143, 22, 122, 44, 40, 102, 41, 136, 16, 27}
svc := newGcmService(t)
decrypted, err := svc.Decrypt(t.Context(), ciphertext, "1234")
require.NoError(t, err)
assert.Equal(t, []byte("grafana"), decrypted)
})
}

@ -0,0 +1,3 @@
package encryption
const UsageInsightsPrefix = "secrets_manager"

@ -1765,6 +1765,15 @@ var (
HideFromDocs: true,
Expression: "true", // enabled by default
},
{
Name: "extensionsReadOnlyProxy",
Description: "Use proxy-based read-only objects for plugin extensions instead of deep cloning",
Stage: FeatureStageExperimental,
Owner: grafanaPluginsPlatformSquad,
HideFromAdminPage: true,
HideFromDocs: true,
FrontendOnly: true,
},
}
)

@ -231,3 +231,4 @@ multiTenantFrontend,experimental,@grafana/grafana-frontend-platform,false,false,
alertingListViewV2PreviewToggle,privatePreview,@grafana/alerting-squad,false,false,true
alertRuleUseFiredAtForStartsAt,experimental,@grafana/alerting-squad,false,false,false
alertingBulkActionsInUI,GA,@grafana/alerting-squad,false,false,true
extensionsReadOnlyProxy,experimental,@grafana/plugins-platform-backend,false,false,true

1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
231 alertingListViewV2PreviewToggle privatePreview @grafana/alerting-squad false false true
232 alertRuleUseFiredAtForStartsAt experimental @grafana/alerting-squad false false false
233 alertingBulkActionsInUI GA @grafana/alerting-squad false false true
234 extensionsReadOnlyProxy experimental @grafana/plugins-platform-backend false false true

@ -934,4 +934,8 @@ const (
// FlagAlertingBulkActionsInUI
// Enables the alerting bulk actions in the UI
FlagAlertingBulkActionsInUI = "alertingBulkActionsInUI"
// FlagExtensionsReadOnlyProxy
// Use proxy-based read-only objects for plugin extensions instead of deep cloning
FlagExtensionsReadOnlyProxy = "extensionsReadOnlyProxy"
)

File diff suppressed because it is too large Load Diff

@ -13,12 +13,13 @@ import (
"github.com/grafana/grafana/pkg/infra/slugify"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/services/user"
)
func parseUnstructuredToLegacyFolder(item *unstructured.Unstructured) (*folder.Folder, string, string, error) {
func convertUnstructuredToFolder(item *unstructured.Unstructured, identifiers map[string]*user.User) (*folder.Folder, error) {
meta, err := utils.MetaAccessor(item)
if err != nil {
return nil, "", "", err
return nil, err
}
info, _ := authlib.ParseNamespace(meta.GetNamespace())
@ -45,10 +46,20 @@ func parseUnstructuredToLegacyFolder(item *unstructured.Unstructured) (*folder.F
updated = &tmp
}
creator := meta.GetCreatedBy()
updater := meta.GetUpdatedBy()
if updater == "" {
updater = creator
createdBy, updatedBy := int64(0), int64(0)
createdByUID, updatedByUID := "", ""
if len(identifiers) > 0 {
user, ok := identifiers[meta.GetCreatedBy()]
if ok {
createdBy = user.ID
createdByUID = user.UID
}
user, ok = identifiers[meta.GetUpdatedBy()]
if ok {
updatedBy = user.ID
updatedByUID = user.UID
}
}
manager, _ := meta.GetManagerProperties()
@ -67,90 +78,91 @@ func parseUnstructuredToLegacyFolder(item *unstructured.Unstructured) (*folder.F
Created: created,
Updated: *updated,
OrgID: info.OrgID,
}, creator, updater, nil
CreatedBy: createdBy,
CreatedByUID: createdByUID,
UpdatedBy: updatedBy,
UpdatedByUID: updatedByUID,
}, nil
}
func (ss *FolderUnifiedStoreImpl) UnstructuredToLegacyFolder(ctx context.Context, item *unstructured.Unstructured) (*folder.Folder, error) {
folder, creatorRaw, updaterRaw, err := parseUnstructuredToLegacyFolder(item)
meta, err := utils.MetaAccessor(item)
if err != nil {
return nil, err
}
userUIDtoIDmapping, err := ss.getUserUIDtoIDmappingFromIdentifiers(ctx, []string{creatorRaw, updaterRaw})
identifiers := make(map[string]struct{}, 0)
identifiers[meta.GetCreatedBy()] = struct{}{}
identifiers[meta.GetUpdatedBy()] = struct{}{}
folderUserIdentifiers, err := ss.getFolderIdentifiers(ctx, identifiers)
if err != nil {
return nil, err
}
creatorId := getIdFromMapping(creatorRaw, userUIDtoIDmapping)
updaterId := getIdFromMapping(updaterRaw, userUIDtoIDmapping)
if updaterId == 0 {
updaterId = creatorId
folder, err := convertUnstructuredToFolder(item, folderUserIdentifiers)
if err != nil {
return nil, err
}
folder.Version = int(item.GetGeneration())
folder.CreatedBy = creatorId
folder.UpdatedBy = updaterId
return folder, nil
}
func (ss *FolderUnifiedStoreImpl) UnstructuredToLegacyFolderList(ctx context.Context, unstructuredList *unstructured.UnstructuredList) ([]*folder.Folder, error) {
folders := make([]*folder.Folder, 0)
identifiers := make([]string, 0)
identifiers := make(map[string]struct{}, 0)
for _, item := range unstructuredList.Items {
meta, err := utils.MetaAccessor(&item)
if err != nil {
return nil, fmt.Errorf("unable to convert unstructured item to legacy folder %w", err)
}
identifiers = append(identifiers, meta.GetCreatedBy(), meta.GetUpdatedBy())
identifiers[meta.GetCreatedBy()] = struct{}{}
identifiers[meta.GetUpdatedBy()] = struct{}{}
}
userUIDtoIDmapping, err := ss.getUserUIDtoIDmappingFromIdentifiers(ctx, identifiers)
folderUserIdentifiers, err := ss.getFolderIdentifiers(ctx, identifiers)
if err != nil {
return nil, err
}
folders := make([]*folder.Folder, 0)
for _, item := range unstructuredList.Items {
folder, creatorRaw, updaterRaw, err := parseUnstructuredToLegacyFolder(&item)
folder, err := convertUnstructuredToFolder(&item, folderUserIdentifiers)
if err != nil {
return nil, err
}
creatorId := getIdFromMapping(creatorRaw, userUIDtoIDmapping)
updaterId := getIdFromMapping(updaterRaw, userUIDtoIDmapping)
if updaterId == 0 {
updaterId = creatorId
}
folder.Version = int(item.GetGeneration())
folder.CreatedBy = creatorId
folder.UpdatedBy = updaterId
folders = append(folders, folder)
}
return folders, nil
}
func (ss *FolderUnifiedStoreImpl) getUserUIDtoIDmappingFromIdentifiers(ctx context.Context, rawIdentifiers []string) (map[string]int64, error) {
userUIDs, userIds := parseIdentifiers(rawIdentifiers)
allUsers, err := ss.userService.ListByIdOrUID(ctx, userUIDs, userIds)
func (ss *FolderUnifiedStoreImpl) getFolderIdentifiers(ctx context.Context, identifiers map[string]struct{}) (map[string]*user.User, error) {
identifierMap, userUIDs, userIds := separateUIDsAndIDs(identifiers)
if len(userUIDs) == 0 && len(userIds) == 0 {
return nil, nil
}
users, err := ss.userService.ListByIdOrUID(ctx, userUIDs, userIds)
if err != nil {
return nil, err
}
mapping := make(map[string]int64)
for _, user := range allUsers {
mapping[user.UID] = user.ID
userMap := make(map[string]*user.User, len(users))
for _, u := range users {
if _, ok := identifierMap[fmt.Sprintf("user:%d", u.ID)]; ok {
userMap[fmt.Sprintf("user:%d", u.ID)] = u
}
return mapping, nil
if _, ok := identifierMap[fmt.Sprintf("user:%s", u.UID)]; ok {
userMap[fmt.Sprintf("user:%s", u.UID)] = u
}
}
return userMap, nil
}
func getIdentifier(rawIdentifier string) string {
parts := strings.Split(rawIdentifier, ":")
func parseIdentifier(identifier string) string {
parts := strings.Split(identifier, ":")
if len(parts) < 2 {
return ""
}
@ -160,41 +172,26 @@ func getIdentifier(rawIdentifier string) string {
return parts[1]
}
func parseIdentifiers(rawIdentifiers []string) ([]string, []int64) {
func separateUIDsAndIDs(identifiers map[string]struct{}) (map[string]string, []string, []int64) {
uids := make([]string, 0)
ids := make([]int64, 0)
for _, rawIdentifier := range rawIdentifiers {
identifier := getIdentifier(rawIdentifier)
if identifier == "" {
continue
}
identifierMap := make(map[string]string, 0)
id, err := strconv.ParseInt(identifier, 10, 64)
if err == nil {
ids = append(ids, id)
} else if identifier != "" {
uids = append(uids, identifier)
}
for identifier := range identifiers {
value := parseIdentifier(identifier)
if value == "" {
continue
}
return uids, ids
}
func getIdFromMapping(rawIdentifier string, mapping map[string]int64) int64 {
identifier := getIdentifier(rawIdentifier)
if identifier == "" {
return 0
}
identifierMap[identifier] = value
id, err := strconv.ParseInt(identifier, 10, 64)
id, err := strconv.ParseInt(value, 10, 64)
if err == nil {
return id
ids = append(ids, id)
} else {
uids = append(uids, value)
}
uid, ok := mapping[identifier]
if ok {
return uid
}
return 0
return identifierMap, uids, ids
}

@ -80,6 +80,8 @@ func TestFolderConversions(t *testing.T) {
Updated: created.Add(time.Hour * 5),
CreatedBy: 1,
UpdatedBy: 2,
CreatedByUID: "useruid",
UpdatedByUID: "useruid2",
}, *converted)
}
@ -117,7 +119,8 @@ func TestFolderListConversions(t *testing.T) {
"kind": "Folder",
"metadata": {
"annotations": {
"grafana.app/createdBy": "user:uuuuuuuuuuuuuu"
"grafana.app/createdBy": "user:uuuuuuuuuuuuuu",
"grafana.app/updatedBy": "user:uuuuuuuuuuuuuu"
},
"creationTimestamp": "2022-12-02T02:02:02Z",
"generation": 1,
@ -162,7 +165,8 @@ func TestFolderListConversions(t *testing.T) {
"kind": "Folder",
"metadata": {
"annotations": {
"grafana.app/createdBy": "user:1"
"grafana.app/createdBy": "user:1",
"grafana.app/updatedBy": "user:1"
},
"creationTimestamp": "2022-12-02T02:02:02Z",
"generation": 1,
@ -268,7 +272,8 @@ func TestFolderListConversions(t *testing.T) {
converted, err := fs.UnstructuredToLegacyFolderList(context.Background(), input)
require.NoError(t, err)
require.Equal(t, 1, len(fake.ListUsersByIdOrUidCalls)) // only one call to the user service
require.Equal(t, usertest.ListUsersByIdOrUidCall{Uids: []string{"uuuuuuuuuuuuuu", "iiiiiiiiiiiiii", "jjjjjjjjjjjjjj"}, Ids: []int64{1, 2, 3}}, fake.ListUsersByIdOrUidCalls[0])
require.ElementsMatch(t, []string{"uuuuuuuuuuuuuu", "iiiiiiiiiiiiii", "jjjjjjjjjjjjjj"}, fake.ListUsersByIdOrUidCalls[0].Uids)
require.ElementsMatch(t, []int64{1, 2, 3}, fake.ListUsersByIdOrUidCalls[0].Ids)
require.Equal(t, 6, len(converted))
require.Equal(t, []*folder.Folder{
{
@ -300,6 +305,8 @@ func TestFolderListConversions(t *testing.T) {
Updated: created,
CreatedBy: 4,
UpdatedBy: 4,
CreatedByUID: "uuuuuuuuuuuuuu",
UpdatedByUID: "uuuuuuuuuuuuuu",
},
{
ID: 145,
@ -315,6 +322,8 @@ func TestFolderListConversions(t *testing.T) {
Updated: created,
CreatedBy: 5,
UpdatedBy: 6,
CreatedByUID: "iiiiiiiiiiiiii",
UpdatedByUID: "jjjjjjjjjjjjjj",
},
{
ID: 146,
@ -330,6 +339,8 @@ func TestFolderListConversions(t *testing.T) {
Updated: created,
CreatedBy: 1,
UpdatedBy: 1,
CreatedByUID: "aaaaaaaaaaaaaa",
UpdatedByUID: "aaaaaaaaaaaaaa",
},
{
ID: 147,
@ -345,6 +356,8 @@ func TestFolderListConversions(t *testing.T) {
Updated: created,
CreatedBy: 2,
UpdatedBy: 3,
CreatedByUID: "oooooooooooooo",
UpdatedByUID: "eeeeeeeeeeeeee",
},
{
ID: 148,

@ -353,14 +353,9 @@ func (s *Service) GetLegacy(ctx context.Context, q *folder.GetFolderQuery) (*fol
return f, err
}
func (s *Service) setFullpath(ctx context.Context, f *folder.Folder, user identity.Requester, forceLegacy bool) (*folder.Folder, error) {
func (s *Service) setFullpath(ctx context.Context, f *folder.Folder, forceLegacy bool) (*folder.Folder, error) {
ctx, span := s.tracer.Start(ctx, "folder.setFullpath")
defer span.End()
// #TODO is some kind of intermediate conversion required as is the case with user id where
// it gets parsed using UserIdentifier(). Also is there some kind of validation taking place as
// part of the parsing?
f.CreatedByUID = user.GetUID()
f.UpdatedByUID = user.GetUID()
if f.ParentUID == "" {
return f, nil
@ -386,15 +381,7 @@ func (s *Service) setFullpath(ctx context.Context, f *folder.Folder, user identi
}
// #TODO revisit setting permissions so that we can centralise the logic for escaping slashes in titles
// Escape forward slashes in the title
escapedSlash := "\\/"
title := strings.ReplaceAll(f.Title, "/", escapedSlash)
f.Fullpath = title
f.FullpathUIDs = f.UID
for _, p := range parents {
pt := strings.ReplaceAll(p.Title, "/", escapedSlash)
f.Fullpath = f.Fullpath + "/" + pt
f.FullpathUIDs = f.FullpathUIDs + "/" + p.UID
}
f.Fullpath, f.FullpathUIDs = computeFullPath(append(parents, f))
return f, nil
}
@ -573,7 +560,7 @@ func (s *Service) GetSharedWithMe(ctx context.Context, q *folder.GetChildrenQuer
return nil, folder.ErrInternal.Errorf("failed to fetch root folders to which the user has access: %w", err)
}
dedupAvailableNonRootFolders := s.deduplicateAvailableFolders(ctx, availableNonRootFolders, rootFolders, q.OrgID)
dedupAvailableNonRootFolders := s.deduplicateAvailableFolders(ctx, availableNonRootFolders, rootFolders)
s.metrics.sharedWithMeFetchFoldersRequestsDuration.WithLabelValues("success").Observe(time.Since(start).Seconds())
return dedupAvailableNonRootFolders, nil
}
@ -641,7 +628,7 @@ func (s *Service) getAvailableNonRootFolders(ctx context.Context, q *folder.GetC
return nonRootFolders, nil
}
func (s *Service) deduplicateAvailableFolders(ctx context.Context, folders []*folder.Folder, rootFolders []*folder.FolderReference, orgID int64) []*folder.FolderReference {
func (s *Service) deduplicateAvailableFolders(ctx context.Context, folders []*folder.Folder, rootFolders []*folder.FolderReference) []*folder.FolderReference {
foldersRef := make([]*folder.FolderReference, len(folders))
for i, f := range folders {
foldersRef[i] = f.ToFolderReference()

@ -151,10 +151,12 @@ func (s *Service) getFromApiServer(ctx context.Context, q *folder.GetFolderQuery
f.ID = dashFolder.ID
f.Version = dashFolder.Version
f, err = s.setFullpath(ctx, f, q.SignedInUser, false)
if q.WithFullpath || q.WithFullpathUIDs {
f, err = s.setFullpath(ctx, f, false)
if err != nil {
return nil, err
}
}
return f, err
}
@ -512,8 +514,6 @@ func (s *Service) createOnApiServer(ctx context.Context, cmd *folder.CreateFolde
return nil, dashboards.ErrFolderInvalidUID
}
user := cmd.SignedInUser
cmd = &folder.CreateFolderCommand{
// TODO: Today, if a UID isn't specified, the dashboard store
// generates a new UID. The new folder store will need to do this as
@ -531,11 +531,6 @@ func (s *Service) createOnApiServer(ctx context.Context, cmd *folder.CreateFolde
return nil, err
}
f, err = s.setFullpath(ctx, f, user, false)
if err != nil {
return nil, err
}
return f, nil
}

@ -15,7 +15,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
clientrest "k8s.io/client-go/rest"
foldersv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
folderv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/bus"
@ -77,9 +77,9 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
t.Skip("skipping integration test")
}
m := map[string]foldersv1.Folder{}
m := map[string]folderv1.Folder{}
unifiedStorageFolder := &foldersv1.Folder{}
unifiedStorageFolder := &folderv1.Folder{}
unifiedStorageFolder.Kind = "folder"
fooFolder := &folder.Folder{
@ -88,8 +88,8 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
OrgID: orgID,
UID: "foo",
URL: "/dashboards/f/foo/foo-folder",
CreatedByUID: "user:1",
UpdatedByUID: "user:1",
CreatedBy: 1,
UpdatedBy: 1,
}
updateFolder := &folder.Folder{
@ -106,7 +106,7 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
mux.HandleFunc("GET /apis/folder.grafana.app/v1beta1/namespaces/default/folders", func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
l := &foldersv1.FolderList{}
l := &folderv1.FolderList{}
l.Kind = "Folder"
err := json.NewEncoder(w).Encode(l)
require.NoError(t, err)
@ -137,7 +137,7 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
buf, err := io.ReadAll(req.Body)
require.NoError(t, err)
var foldr foldersv1.Folder
var foldr folderv1.Folder
err = json.Unmarshal(buf, &foldr)
require.NoError(t, err)
@ -166,7 +166,7 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
buf, err := io.ReadAll(req.Body)
require.NoError(t, err)
var folder foldersv1.Folder
var folder folderv1.Folder
err = json.Unmarshal(buf, &folder)
require.NoError(t, err)
@ -202,7 +202,7 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
features := featuremgmt.WithFeatures(featuresArr...)
dashboardStore := dashboards.NewFakeDashboardStore(t)
k8sCli := client.NewK8sHandler(dualwrite.ProvideTestService(), request.GetNamespaceMapper(cfg), foldersv1.FolderResourceInfo.GroupVersionResource(), restCfgProvider.GetRestConfig, dashboardStore, userService, nil, sort.ProvideService())
k8sCli := client.NewK8sHandler(dualwrite.ProvideTestService(), request.GetNamespaceMapper(cfg), folderv1.FolderResourceInfo.GroupVersionResource(), restCfgProvider.GetRestConfig, dashboardStore, userService, nil, sort.ProvideService())
unifiedStore := ProvideUnifiedStore(k8sCli, userService)
ctx := context.Background()
@ -319,8 +319,8 @@ func TestIntegrationFolderServiceViaUnifiedStorage(t *testing.T) {
Title: "Test-Folder",
UID: "testfolder",
URL: "/dashboards/f/testfolder/test-folder",
CreatedByUID: "user:1",
UpdatedByUID: "user:1",
CreatedBy: 1,
UpdatedBy: 1,
}
t.Run("When creating folder should not return access denied error", func(t *testing.T) {
@ -559,8 +559,8 @@ func TestSearchFoldersFromApiServer(t *testing.T) {
Options: &resource.ListOptions{
Key: &resource.ResourceKey{
Namespace: "default",
Group: foldersv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: foldersv1.FolderResourceInfo.GroupVersionResource().Resource,
Group: folderv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: folderv1.FolderResourceInfo.GroupVersionResource().Resource,
},
Fields: []*resource.Requirement{
{
@ -649,8 +649,8 @@ func TestSearchFoldersFromApiServer(t *testing.T) {
Options: &resource.ListOptions{
Key: &resource.ResourceKey{
Namespace: "default",
Group: foldersv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: foldersv1.FolderResourceInfo.GroupVersionResource().Resource,
Group: folderv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: folderv1.FolderResourceInfo.GroupVersionResource().Resource,
},
Fields: []*resource.Requirement{},
Labels: []*resource.Requirement{
@ -718,8 +718,8 @@ func TestSearchFoldersFromApiServer(t *testing.T) {
Options: &resource.ListOptions{
Key: &resource.ResourceKey{
Namespace: "default",
Group: foldersv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: foldersv1.FolderResourceInfo.GroupVersionResource().Resource,
Group: folderv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: folderv1.FolderResourceInfo.GroupVersionResource().Resource,
},
Fields: []*resource.Requirement{},
Labels: []*resource.Requirement{},
@ -798,8 +798,13 @@ func TestGetFoldersFromApiServer(t *testing.T) {
user := &user.SignedInUser{OrgID: 1}
ctx := identity.WithRequester(context.Background(), user)
fakeK8sClient.On("GetNamespace", mock.Anything, mock.Anything).Return("default")
folderkey := &resource.ResourceKey{
Namespace: "default",
Group: folderv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: folderv1.FolderResourceInfo.GroupVersionResource().Resource,
}
t.Run("Get folder by title)", func(t *testing.T) {
t.Run("Get folder by title", func(t *testing.T) {
// the search here will return a parent, this will be the parent folder returned when we query for it to add to the hit info
fakeFolderStore := folder.NewFakeStore()
fakeFolderStore.ExpectedFolder = &folder.Folder{
@ -813,16 +818,13 @@ func TestGetFoldersFromApiServer(t *testing.T) {
service.unifiedStore = fakeFolderStore
fakeK8sClient.On("Search", mock.Anything, int64(1), &resource.ResourceSearchRequest{
Options: &resource.ListOptions{
Key: &resource.ResourceKey{
Namespace: "default",
Group: foldersv1.FolderResourceInfo.GroupVersionResource().Group,
Resource: foldersv1.FolderResourceInfo.GroupVersionResource().Resource,
},
Key: folderkey,
Fields: []*resource.Requirement{},
Labels: []*resource.Requirement{},
},
Query: "foo title",
Limit: folderSearchLimit}).Return(&resource.ResourceSearchResponse{
Limit: folderSearchLimit}).
Return(&resource.ResourceSearchResponse{
Results: &resource.ResourceTable{
Columns: []*resource.ResourceTableColumnDefinition{
{
@ -860,10 +862,6 @@ func TestGetFoldersFromApiServer(t *testing.T) {
Title: "foo title",
OrgID: 1,
URL: "/dashboards/f/foouid/foo-title",
Fullpath: "foo title",
FullpathUIDs: "foouid",
CreatedByUID: ":0",
UpdatedByUID: ":0",
}
compareFoldersNormalizeTime(t, expectedResult, result)
fakeK8sClient.AssertExpectations(t)

@ -321,7 +321,9 @@ func (ss *FolderUnifiedStoreImpl) GetHeight(ctx context.Context, foldrUID string
// The full path UIDs of B is "uid1/uid2".
// The full path UIDs of A is "uid1".
func (ss *FolderUnifiedStoreImpl) GetFolders(ctx context.Context, q folder.GetFoldersFromStoreQuery) ([]*folder.Folder, error) {
opts := v1.ListOptions{}
opts := v1.ListOptions{
Limit: folderSearchLimit,
}
if q.WithFullpath || q.WithFullpathUIDs {
// only supported in modes 0-2, to keep the alerting queries from causing tons of get folder requests
// to retrieve the parent for all folders in grafana

@ -244,6 +244,8 @@ func getDelay(numErrors int) time.Duration {
// run stream until context canceled or stream finished without an error.
func (s *Manager) runStream(ctx context.Context, cancelFn func(), sr streamRequest) {
ctx = identity.WithRequester(ctx, sr.user)
defer func() { s.stopStream(sr, cancelFn) }()
var numFastErrors int
var delay time.Duration

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save