Provisioning: Define large parts of our infrastructure (#101029)

* Provisioning: Define secrets service

* Provisioning: Create and store secrets service

* Provisioning: Define safepath

* Provisioning: Define the repository

* Identity: Support a provisioning service

* Provisioning: Define a job queue

* Chore: Regen code

* Provisioning: Show progress more often

Co-Authored-By: Ryan McKinley <ryantxu@gmail.com>

* Provisioning: Rename hash field to lastRef

Co-Authored-By: =?UTF-8?q?Roberto=20Jim=C3=A9nez=20S=C3=A1nchez?= <roberto.jimenez@grafana.com>

* Provisioning: Workflows as write access

Co-Authored-By: Ryan McKinley <ryantxu@gmail.com>

* Provisioning: Regen OpenAPI snapshot

* Provisioning: Update tests to match new fields

---------

Co-authored-by: Ryan McKinley <ryantxu@gmail.com>
Co-authored-by: =?UTF-8?q?Roberto=20Jim=C3=A9nez=20S=C3=A1nchez?= <roberto.jimenez@grafana.com>
pull/100479/head^2
Mariell Hoversholm 3 months ago committed by GitHub
parent 8f9972a509
commit 279b641469
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      pkg/apis/provisioning/v0alpha1/jobs.go
  2. 26
      pkg/apis/provisioning/v0alpha1/types.go
  3. 18
      pkg/apis/provisioning/v0alpha1/zz_generated.deepcopy.go
  4. 54
      pkg/apis/provisioning/v0alpha1/zz_generated.openapi.go
  5. 2
      pkg/apis/provisioning/v0alpha1/zz_generated.openapi_violation_exceptions.list
  6. 34
      pkg/generated/applyconfiguration/provisioning/v0alpha1/githubrepositoryconfig.go
  7. 14
      pkg/generated/applyconfiguration/provisioning/v0alpha1/repositoryspec.go
  8. 10
      pkg/generated/applyconfiguration/provisioning/v0alpha1/syncstatus.go
  9. 203
      pkg/registry/apis/provisioning/jobs/progress.go
  10. 47
      pkg/registry/apis/provisioning/jobs/queue.go
  11. 367
      pkg/registry/apis/provisioning/jobs/store.go
  12. 379
      pkg/registry/apis/provisioning/jobs/watchset.go
  13. 45
      pkg/registry/apis/provisioning/register.go
  14. 128
      pkg/registry/apis/provisioning/repository/repository.go
  15. 59
      pkg/registry/apis/provisioning/safepath/path.go
  16. 74
      pkg/registry/apis/provisioning/safepath/path_test.go
  17. 31
      pkg/registry/apis/provisioning/safepath/walk.go
  18. 35
      pkg/registry/apis/provisioning/secrets/secret.go
  19. 735
      pkg/tests/apis/openapi_snapshots/provisioning.grafana.app-v0alpha1.json
  20. 3
      pkg/tests/apis/provisioning/testdata/github-example.json
  21. 2
      pkg/tests/apis/provisioning/testdata/local-devenv.json

@ -119,7 +119,7 @@ type JobStatus struct {
Progress float64 `json:"progress,omitempty"`
// Summary of processed actions
Summary []JobResourceSummary `json:"summary,omitempty"`
Summary []*JobResourceSummary `json:"summary,omitempty"`
}
type JobResourceSummary struct {

@ -28,10 +28,10 @@ type LocalRepositoryConfig struct {
type Workflow string
const (
// WriteWorkflow allows a user to write directly to the repository
WriteWorkflow Workflow = "write"
// BranchWorkflow creates a branch for changes
BranchWorkflow Workflow = "branch"
// PushWorkflow pushes changes directly the configured branch
PushWorkflow Workflow = "push"
)
type GitHubRepositoryConfig struct {
@ -39,23 +39,13 @@ type GitHubRepositoryConfig struct {
URL string `json:"url,omitempty"`
// The branch to use in the repository.
// By default, this is the main branch.
Branch string `json:"branch,omitempty"`
Branch string `json:"branch"`
// Token for accessing the repository. If set, it will be encrypted into encryptedToken, then set to an empty string again.
Token string `json:"token,omitempty"`
// Token for accessing the repository, but encrypted. This is not possible to read back to a user decrypted.
// +listType=atomic
EncryptedToken []byte `json:"encryptedToken,omitempty"`
// Workflow allowed for changes to the repository.
// The order is relevant for defining the precedence of the workflows.
// Possible values: pull-request, branch, push.
Workflows []Workflow `json:"workflows,omitempty"`
// Whether we should commit to change branches and use a Pull Request flow to achieve this.
// By default, this is false (i.e. we will commit straight to the main branch).
BranchWorkflow bool `json:"branchWorkflow,omitempty"`
// Whether we should show dashboard previews for pull requests.
// By default, this is false (i.e. we will not create previews).
GenerateDashboardPreviews bool `json:"generateDashboardPreviews,omitempty"`
@ -78,8 +68,10 @@ type RepositorySpec struct {
// Repository description
Description string `json:"description,omitempty"`
// ReadOnly repository does not allow any write commands
ReadOnly bool `json:"readOnly"`
// UI driven Workflow that allow changes to the contends of the repository.
// The order is relevant for defining the precedence of the workflows.
// When empty, the repository does not support any edits (eg, readonly)
Workflows []Workflow `json:"workflows"`
// Sync settings -- how values are pulled from the repository into grafana
Sync SyncOptions `json:"sync"`
@ -183,8 +175,8 @@ type SyncStatus struct {
// +listType=atomic
Message []string `json:"message"`
// The repository hash when the last sync ran
Hash string `json:"hash,omitempty"`
// The repository ref when the last successful sync ran
LastRef string `json:"lastRef,omitempty"`
// Incremental synchronization for versioned repositories
Incremental bool `json:"incremental,omitempty"`

@ -98,11 +98,6 @@ func (in *GitHubRepositoryConfig) DeepCopyInto(out *GitHubRepositoryConfig) {
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.Workflows != nil {
in, out := &in.Workflows, &out.Workflows
*out = make([]Workflow, len(*in))
copy(*out, *in)
}
return
}
@ -314,9 +309,13 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
}
if in.Summary != nil {
in, out := &in.Summary, &out.Summary
*out = make([]JobResourceSummary, len(*in))
*out = make([]*JobResourceSummary, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(JobResourceSummary)
(*in).DeepCopyInto(*out)
}
}
}
return
@ -444,6 +443,11 @@ func (in *RepositoryList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepositorySpec) DeepCopyInto(out *RepositorySpec) {
*out = *in
if in.Workflows != nil {
in, out := &in.Workflows, &out.Workflows
*out = make([]Workflow, len(*in))
copy(*out, *in)
}
out.Sync = in.Sync
if in.Local != nil {
in, out := &in.Local, &out.Local

@ -246,7 +246,8 @@ func schema_pkg_apis_provisioning_v0alpha1_GitHubRepositoryConfig(ref common.Ref
},
"branch": {
SchemaProps: spec.SchemaProps{
Description: "The branch to use in the repository. By default, this is the main branch.",
Description: "The branch to use in the repository.",
Default: "",
Type: []string{"string"},
Format: "",
},
@ -270,29 +271,6 @@ func schema_pkg_apis_provisioning_v0alpha1_GitHubRepositoryConfig(ref common.Ref
Format: "byte",
},
},
"workflows": {
SchemaProps: spec.SchemaProps{
Description: "Workflow allowed for changes to the repository. The order is relevant for defining the precedence of the workflows. Possible values: pull-request, branch, push.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"branch", "push"},
},
},
},
},
},
"branchWorkflow": {
SchemaProps: spec.SchemaProps{
Description: "Whether we should commit to change branches and use a Pull Request flow to achieve this. By default, this is false (i.e. we will commit straight to the main branch).",
Type: []string{"boolean"},
Format: "",
},
},
"generateDashboardPreviews": {
SchemaProps: spec.SchemaProps{
Description: "Whether we should show dashboard previews for pull requests. By default, this is false (i.e. we will not create previews).",
@ -301,6 +279,7 @@ func schema_pkg_apis_provisioning_v0alpha1_GitHubRepositoryConfig(ref common.Ref
},
},
},
Required: []string{"branch"},
},
},
}
@ -749,8 +728,7 @@ func schema_pkg_apis_provisioning_v0alpha1_JobStatus(ref common.ReferenceCallbac
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1.JobResourceSummary"),
Ref: ref("github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1.JobResourceSummary"),
},
},
},
@ -977,12 +955,20 @@ func schema_pkg_apis_provisioning_v0alpha1_RepositorySpec(ref common.ReferenceCa
Format: "",
},
},
"readOnly": {
"workflows": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly repository does not allow any write commands",
Default: false,
Type: []string{"boolean"},
Format: "",
Description: "UI driven Workflow that allow changes to the contends of the repository. The order is relevant for defining the precedence of the workflows. When empty, the repository does not support any edits (eg, readonly)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"branch", "write"},
},
},
},
},
},
"sync": {
@ -1014,7 +1000,7 @@ func schema_pkg_apis_provisioning_v0alpha1_RepositorySpec(ref common.ReferenceCa
},
},
},
Required: []string{"title", "readOnly", "sync", "type"},
Required: []string{"title", "workflows", "sync", "type"},
},
},
Dependencies: []string{
@ -1717,9 +1703,9 @@ func schema_pkg_apis_provisioning_v0alpha1_SyncStatus(ref common.ReferenceCallba
},
},
},
"hash": {
"lastRef": {
SchemaProps: spec.SchemaProps{
Description: "The repository hash when the last sync ran",
Description: "The repository ref when the last successful sync ran",
Type: []string{"string"},
Format: "",
},

@ -1,10 +1,10 @@
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,FileList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,GitHubRepositoryConfig,Workflows
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,HistoryList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Errors
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,JobStatus,Errors
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,JobStatus,Summary
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,RepositoryList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,RepositorySpec,Workflows
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,RepositoryViewList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,ResourceList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1,ResourceStats,Items

@ -4,20 +4,14 @@
package v0alpha1
import (
provisioningv0alpha1 "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
)
// GitHubRepositoryConfigApplyConfiguration represents a declarative configuration of the GitHubRepositoryConfig type for use
// with apply.
type GitHubRepositoryConfigApplyConfiguration struct {
URL *string `json:"url,omitempty"`
Branch *string `json:"branch,omitempty"`
Token *string `json:"token,omitempty"`
EncryptedToken []byte `json:"encryptedToken,omitempty"`
Workflows []provisioningv0alpha1.Workflow `json:"workflows,omitempty"`
BranchWorkflow *bool `json:"branchWorkflow,omitempty"`
GenerateDashboardPreviews *bool `json:"generateDashboardPreviews,omitempty"`
URL *string `json:"url,omitempty"`
Branch *string `json:"branch,omitempty"`
Token *string `json:"token,omitempty"`
EncryptedToken []byte `json:"encryptedToken,omitempty"`
GenerateDashboardPreviews *bool `json:"generateDashboardPreviews,omitempty"`
}
// GitHubRepositoryConfigApplyConfiguration constructs a declarative configuration of the GitHubRepositoryConfig type for use with
@ -60,24 +54,6 @@ func (b *GitHubRepositoryConfigApplyConfiguration) WithEncryptedToken(values ...
return b
}
// WithWorkflows adds the given value to the Workflows field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Workflows field.
func (b *GitHubRepositoryConfigApplyConfiguration) WithWorkflows(values ...provisioningv0alpha1.Workflow) *GitHubRepositoryConfigApplyConfiguration {
for i := range values {
b.Workflows = append(b.Workflows, values[i])
}
return b
}
// WithBranchWorkflow sets the BranchWorkflow field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BranchWorkflow field is set to the value of the last call.
func (b *GitHubRepositoryConfigApplyConfiguration) WithBranchWorkflow(value bool) *GitHubRepositoryConfigApplyConfiguration {
b.BranchWorkflow = &value
return b
}
// WithGenerateDashboardPreviews sets the GenerateDashboardPreviews field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateDashboardPreviews field is set to the value of the last call.

@ -13,7 +13,7 @@ import (
type RepositorySpecApplyConfiguration struct {
Title *string `json:"title,omitempty"`
Description *string `json:"description,omitempty"`
ReadOnly *bool `json:"readOnly,omitempty"`
Workflows []provisioningv0alpha1.Workflow `json:"workflows,omitempty"`
Sync *SyncOptionsApplyConfiguration `json:"sync,omitempty"`
Type *provisioningv0alpha1.RepositoryType `json:"type,omitempty"`
Local *LocalRepositoryConfigApplyConfiguration `json:"local,omitempty"`
@ -42,11 +42,13 @@ func (b *RepositorySpecApplyConfiguration) WithDescription(value string) *Reposi
return b
}
// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ReadOnly field is set to the value of the last call.
func (b *RepositorySpecApplyConfiguration) WithReadOnly(value bool) *RepositorySpecApplyConfiguration {
b.ReadOnly = &value
// WithWorkflows adds the given value to the Workflows field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Workflows field.
func (b *RepositorySpecApplyConfiguration) WithWorkflows(values ...provisioningv0alpha1.Workflow) *RepositorySpecApplyConfiguration {
for i := range values {
b.Workflows = append(b.Workflows, values[i])
}
return b
}

@ -17,7 +17,7 @@ type SyncStatusApplyConfiguration struct {
Finished *int64 `json:"finished,omitempty"`
Scheduled *int64 `json:"scheduled,omitempty"`
Message []string `json:"message,omitempty"`
Hash *string `json:"hash,omitempty"`
LastRef *string `json:"lastRef,omitempty"`
Incremental *bool `json:"incremental,omitempty"`
}
@ -77,11 +77,11 @@ func (b *SyncStatusApplyConfiguration) WithMessage(values ...string) *SyncStatus
return b
}
// WithHash sets the Hash field in the declarative configuration to the given value
// WithLastRef sets the LastRef field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Hash field is set to the value of the last call.
func (b *SyncStatusApplyConfiguration) WithHash(value string) *SyncStatusApplyConfiguration {
b.Hash = &value
// If called multiple times, the LastRef field is set to the value of the last call.
func (b *SyncStatusApplyConfiguration) WithLastRef(value string) *SyncStatusApplyConfiguration {
b.LastRef = &value
return b
}

@ -0,0 +1,203 @@
package jobs
import (
"context"
"fmt"
"time"
"github.com/grafana/grafana-app-sdk/logging"
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
)
// maybeNotifyProgress will only notify if a certain amount of time has passed
// or if the job completed
func maybeNotifyProgress(threshold time.Duration, fn ProgressFn) ProgressFn {
var last time.Time
return func(ctx context.Context, status provisioning.JobStatus) error {
if status.Finished != 0 || last.IsZero() || time.Since(last) > threshold {
last = time.Now()
return fn(ctx, status)
}
return nil
}
}
// FIXME: ProgressRecorder should be initialized in the queue
type JobResourceResult struct {
Name string
Resource string
Group string
Path string
Action repository.FileAction
Error error
}
type jobProgressRecorder struct {
started time.Time
total int
ref string
message string
resultCount int
errorCount int
errors []string
progressFn ProgressFn
summaries map[string]*provisioning.JobResourceSummary
}
func newJobProgressRecorder(ProgressFn ProgressFn) JobProgressRecorder {
return &jobProgressRecorder{
started: time.Now(),
progressFn: maybeNotifyProgress(5*time.Second, ProgressFn),
summaries: make(map[string]*provisioning.JobResourceSummary),
}
}
func (r *jobProgressRecorder) Record(ctx context.Context, result JobResourceResult) {
r.resultCount++
logger := logging.FromContext(ctx).With("path", result.Path, "resource", result.Resource, "group", result.Group, "action", result.Action, "name", result.Name)
if result.Error != nil {
logger.Error("job resource operation failed", "err", result.Error)
if len(r.errors) < 20 {
r.errors = append(r.errors, result.Error.Error())
}
r.errorCount++
} else {
logger.Info("job resource operation succeeded")
}
r.updateSummary(result)
r.notify(ctx)
}
func (r *jobProgressRecorder) SetMessage(msg string) {
r.message = msg
}
func (r *jobProgressRecorder) GetMessage() string {
return r.message
}
func (r *jobProgressRecorder) SetRef(ref string) {
r.ref = ref
}
func (r *jobProgressRecorder) GetRef() string {
return r.ref
}
func (r *jobProgressRecorder) SetTotal(total int) {
r.total = total
}
func (r *jobProgressRecorder) TooManyErrors() error {
if r.errorCount > 20 {
return fmt.Errorf("too many errors: %d", r.errorCount)
}
return nil
}
func (r *jobProgressRecorder) summary() []*provisioning.JobResourceSummary {
if len(r.summaries) == 0 {
return nil
}
summaries := make([]*provisioning.JobResourceSummary, 0, len(r.summaries))
for _, summary := range r.summaries {
summaries = append(summaries, summary)
}
return summaries
}
func (r *jobProgressRecorder) updateSummary(result JobResourceResult) {
key := result.Resource + ":" + result.Group
summary, exists := r.summaries[key]
if !exists {
summary = &provisioning.JobResourceSummary{
Resource: result.Resource,
Group: result.Group,
}
r.summaries[key] = summary
}
if result.Error != nil {
summary.Errors = append(summary.Errors, result.Error.Error())
summary.Error++
} else {
switch result.Action {
case repository.FileActionDeleted:
summary.Delete++
case repository.FileActionUpdated:
summary.Update++
case repository.FileActionCreated:
summary.Create++
case repository.FileActionIgnored:
summary.Noop++
case repository.FileActionRenamed:
summary.Delete++
summary.Create++
}
summary.Write = summary.Create + summary.Update
}
}
func (r *jobProgressRecorder) progress() float64 {
if r.total == 0 {
return 0
}
return float64(r.resultCount) / float64(r.total) * 100
}
func (r *jobProgressRecorder) notify(ctx context.Context) {
jobStatus := provisioning.JobStatus{
State: provisioning.JobStateWorking,
Message: r.message,
Errors: r.errors,
Progress: r.progress(),
Summary: r.summary(),
}
logger := logging.FromContext(ctx)
if err := r.progressFn(ctx, jobStatus); err != nil {
logger.Warn("error notifying progress", "err", err)
}
}
func (r *jobProgressRecorder) Complete(ctx context.Context, err error) provisioning.JobStatus {
// Initialize base job status
jobStatus := provisioning.JobStatus{
Started: r.started.UnixMilli(),
// FIXME: if we call this method twice, the state will be different
// This results in sync status to be different from job status
Finished: time.Now().UnixMilli(),
State: provisioning.JobStateSuccess,
Message: "completed successfully",
}
if err != nil {
jobStatus.State = provisioning.JobStateError
jobStatus.Message = err.Error()
}
jobStatus.Summary = r.summary()
jobStatus.Errors = r.errors
// Check for errors during execution
if len(jobStatus.Errors) > 0 && jobStatus.State != provisioning.JobStateError {
jobStatus.State = provisioning.JobStateError
jobStatus.Message = "completed with errors"
}
// Override message if progress have a more explicit message
if r.message != "" && jobStatus.State != provisioning.JobStateError {
jobStatus.Message = r.message
}
return jobStatus
}

@ -0,0 +1,47 @@
package jobs
import (
"context"
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
)
type RepoGetter interface {
GetRepository(ctx context.Context, name string) (repository.Repository, error)
}
// Basic job queue infrastructure
type JobQueue interface {
// Add a new Job to the Queue. The status must be empty
Add(ctx context.Context, job *provisioning.Job) (*provisioning.Job, error)
// Get the next job we should process
Next(ctx context.Context) *provisioning.Job
// Update the status on a given job
// This is only valid if current job is not finished
Update(ctx context.Context, namespace string, name string, status provisioning.JobStatus) error
// Register a worker (inline for now)
Register(worker Worker)
}
type JobProgressRecorder interface {
Record(ctx context.Context, result JobResourceResult)
SetMessage(msg string)
GetMessage() string
SetRef(ref string)
GetRef() string
SetTotal(total int)
TooManyErrors() error
Complete(ctx context.Context, err error) provisioning.JobStatus
}
type Worker interface {
IsSupported(ctx context.Context, job provisioning.Job) bool
Process(ctx context.Context, repo repository.Repository, job provisioning.Job, progress JobProgressRecorder) error
}
// ProgressFn is a function that can be called to update the progress of a job
type ProgressFn func(ctx context.Context, status provisioning.JobStatus) error

@ -0,0 +1,367 @@
package jobs
import (
"context"
"errors"
"fmt"
"net/http"
"strconv"
"sync"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana/pkg/apimachinery/identity"
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
"github.com/grafana/grafana/pkg/util"
)
var (
_ JobQueue = (*jobStore)(nil)
_ rest.Scoper = (*jobStore)(nil)
_ rest.SingularNameProvider = (*jobStore)(nil)
_ rest.Getter = (*jobStore)(nil)
_ rest.Lister = (*jobStore)(nil)
_ rest.Storage = (*jobStore)(nil)
_ rest.Watcher = (*jobStore)(nil)
)
func NewJobStore(capacity int, getter RepoGetter) *jobStore {
return &jobStore{
workers: make([]Worker, 0),
getter: getter,
rv: 1,
capacity: capacity,
jobs: []provisioning.Job{},
watchSet: NewWatchSet(),
versioner: &storage.APIObjectVersioner{},
}
}
type jobStore struct {
getter RepoGetter
capacity int
workers []Worker
// All jobs
jobs []provisioning.Job
rv int64 // updates whenever changed
watchSet *WatchSet
versioner storage.Versioner
mutex sync.RWMutex
}
// Implementing Kube interfaces
func (s *jobStore) New() runtime.Object {
return provisioning.JobResourceInfo.NewFunc()
}
func (s *jobStore) Destroy() {}
func (s *jobStore) NamespaceScoped() bool {
return true // namespace == org
}
func (s *jobStore) GetSingularName() string {
return provisioning.JobResourceInfo.GetSingularName()
}
func (s *jobStore) NewList() runtime.Object {
return provisioning.JobResourceInfo.NewListFunc()
}
func (s *jobStore) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
return provisioning.JobResourceInfo.TableConverter().ConvertToTable(ctx, object, tableOptions)
}
func (s *jobStore) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
ns, ok := request.NamespaceFrom(ctx)
if !ok {
return nil, fmt.Errorf("missing namespace")
}
queue := &provisioning.JobList{
ListMeta: metav1.ListMeta{
ResourceVersion: strconv.FormatInt(s.rv, 10),
},
}
query := options.LabelSelector
s.mutex.RLock()
defer s.mutex.RUnlock()
for _, job := range s.jobs {
if job.Namespace != ns {
continue
}
// maybe filter
if query != nil && !query.Matches(labels.Set(job.Labels)) {
continue
}
copy := job.DeepCopy()
queue.Items = append(queue.Items, *copy)
}
return queue, nil
}
func (s *jobStore) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
ns, ok := request.NamespaceFrom(ctx)
if !ok {
return nil, fmt.Errorf("missing namespace")
}
for _, job := range s.jobs {
if job.Name == name && job.Namespace == ns {
return job.DeepCopy(), nil
}
}
return nil, apierrors.NewNotFound(provisioning.JobResourceInfo.GroupResource(), name)
}
func (s *jobStore) Watch(ctx context.Context, opts *internalversion.ListOptions) (watch.Interface, error) {
ns, ok := request.NamespaceFrom(ctx)
if !ok {
return nil, fmt.Errorf("missing namespace")
}
p := storage.SelectionPredicate{
Label: labels.Everything(), // TODO... limit
Field: fields.Everything(),
}
// Can watch by label selection
jw := s.watchSet.newWatch(ctx, 0, p, s.versioner, &ns)
jw.Start()
return jw, nil
}
// Implementing JobQueue
// Register a worker (inline for now)
func (s *jobStore) Register(worker Worker) {
s.workers = append(s.workers, worker)
}
func (s *jobStore) Add(ctx context.Context, job *provisioning.Job) (*provisioning.Job, error) {
if job.Namespace == "" {
return nil, apierrors.NewBadRequest("missing metadata.namespace")
}
if job.Name != "" {
return nil, apierrors.NewBadRequest("name will always be generated")
}
if job.Spec.Repository == "" {
return nil, apierrors.NewBadRequest("missing spec.repository")
}
if job.Spec.Action == "" {
return nil, apierrors.NewBadRequest("missing spec.action")
}
if job.Spec.Action == provisioning.JobActionExport && job.Spec.Export == nil {
return nil, apierrors.NewBadRequest("missing spec.export")
}
if job.Spec.Action == provisioning.JobActionSync && job.Spec.Sync == nil {
return nil, apierrors.NewBadRequest("missing spec.sync")
}
// Only for add
if job.Status.State != "" {
return nil, apierrors.NewBadRequest("must add jobs with empty status")
}
if job.Labels == nil {
job.Labels = make(map[string]string)
}
job.Labels["repository"] = job.Spec.Repository // for now, make sure we can search Multi-tenant
job.Name = fmt.Sprintf("%s:%s:%s", job.Spec.Repository, job.Spec.Action, util.GenerateShortUID())
s.mutex.Lock()
defer s.mutex.Unlock()
s.rv++
job.ResourceVersion = strconv.FormatInt(s.rv, 10)
job.Status.State = provisioning.JobStatePending
job.CreationTimestamp = metav1.NewTime(time.Now())
jobs := make([]provisioning.Job, 0, len(s.jobs)+2)
jobs = append(jobs, *job)
for i, j := range s.jobs {
if i >= s.capacity {
// Remove the old jobs
s.watchSet.notifyWatchers(watch.Event{
Object: j.DeepCopyObject(),
Type: watch.Deleted,
}, nil)
continue
}
jobs = append(jobs, j)
}
// Send add event
s.watchSet.notifyWatchers(watch.Event{
Object: job.DeepCopyObject(),
Type: watch.Added,
}, nil)
// For now, start a thread processing each job
go s.drainPending()
s.jobs = jobs // replace existing list
return job, nil
}
// Reads the queue until no jobs remain
func (s *jobStore) drainPending() {
logger := logging.DefaultLogger.With("logger", "job-store")
ctx := logging.Context(context.Background(), logger)
var err error
for {
time.Sleep(time.Microsecond * 200)
job := s.Next(ctx)
if job == nil {
return // done
}
logger := logger.With("job", job.GetName(), "namespace", job.GetNamespace())
ctx := logging.Context(ctx, logger)
var foundWorker bool
recorder := newJobProgressRecorder(func(ctx context.Context, j provisioning.JobStatus) error {
return s.Update(ctx, job.Namespace, job.Name, j)
})
for _, worker := range s.workers {
if !worker.IsSupported(ctx, *job) {
continue
}
// Already found a worker, no need to continue
foundWorker = true
err = s.processByWorker(ctx, worker, *job, recorder)
break
}
if !foundWorker {
err = errors.New("no registered worker supports this job")
}
status := recorder.Complete(ctx, err)
err = s.Update(ctx, job.Namespace, job.Name, status)
if err != nil {
logger.Error("error running job", "error", err)
}
logger.Debug("job has been fully completed")
}
}
func (s *jobStore) processByWorker(ctx context.Context, worker Worker, job provisioning.Job, recorder JobProgressRecorder) error {
ctx = request.WithNamespace(ctx, job.Namespace)
ctx, _, err := identity.WithProvisioningIdentitiy(ctx, job.Namespace)
if err != nil {
return fmt.Errorf("get worker identity: %w", err)
}
repoName := job.Spec.Repository
logger := logging.FromContext(ctx)
logger = logger.With("repository", repoName)
ctx = logging.Context(ctx, logger)
repo, err := s.getter.GetRepository(ctx, repoName)
if err != nil {
return fmt.Errorf("get repository: %w", err)
}
// TODO: does this really happen?
if repo == nil {
return errors.New("unknown repository")
}
return worker.Process(ctx, repo, job, recorder)
}
// Checkout the next "pending" job
func (s *jobStore) Next(ctx context.Context) *provisioning.Job {
s.mutex.Lock()
defer s.mutex.Unlock()
// The oldest jobs should be checked out first
for i := len(s.jobs) - 1; i >= 0; i-- {
if s.jobs[i].Status.State == provisioning.JobStatePending {
oldObj := s.jobs[i].DeepCopyObject()
s.rv++
s.jobs[i].ResourceVersion = strconv.FormatInt(s.rv, 10)
s.jobs[i].Status.State = provisioning.JobStateWorking
s.jobs[i].Status.Started = time.Now().UnixMilli()
job := s.jobs[i]
s.watchSet.notifyWatchers(watch.Event{
Object: job.DeepCopyObject(),
Type: watch.Modified,
}, oldObj)
return &job
}
}
return nil
}
func (s *jobStore) Update(ctx context.Context, namespace string, name string, status provisioning.JobStatus) error {
s.mutex.Lock()
defer s.mutex.Unlock()
s.rv++
if status.State == "" {
return apierrors.NewBadRequest("The state must be set")
}
if status.Progress > 100 || status.Progress < 0 {
return apierrors.NewBadRequest("progress must be between 0 and 100")
}
for idx, job := range s.jobs {
if job.Name == name && job.Namespace == namespace {
if job.Status.State.Finished() {
return &apierrors.StatusError{ErrStatus: metav1.Status{
Code: http.StatusPreconditionFailed,
Message: "The job is already finished and can not be updated",
}}
}
if status.State.Finished() {
status.Finished = time.Now().UnixMilli()
}
oldObj := job.DeepCopyObject()
job.ResourceVersion = strconv.FormatInt(s.rv, 10)
job.Status = status
s.jobs[idx] = job
s.watchSet.notifyWatchers(watch.Event{
Object: job.DeepCopyObject(),
Type: watch.Modified,
}, oldObj)
return nil
}
}
return apierrors.NewNotFound(provisioning.JobResourceInfo.GroupResource(), name)
}

@ -0,0 +1,379 @@
// SPDX-License-Identifier: AGPL-3.0-only
// Provenance-includes-location: https://github.com/tilt-dev/tilt-apiserver/blob/main/pkg/storage/filepath/watchset.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: The Kubernetes Authors.
// See also
// https://github.com/grafana/grafana/blob/v11.1.9/pkg/apiserver/storage/file/watchset.go
package jobs
import (
"context"
"fmt"
"sync"
"sync/atomic"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
"k8s.io/klog/v2"
)
const (
UpdateChannelSize = 25
InitialWatchNodesSize = 20
InitialBufferedEventsSize = 25
)
type eventWrapper struct {
ev watch.Event
// optional: oldObject is only set for modifications for determining their type as necessary (when using predicate filtering)
oldObject runtime.Object
}
type watchNode struct {
ctx context.Context
s *WatchSet
id uint64
updateCh chan eventWrapper
outCh chan watch.Event
requestedRV uint64
// the watch may or may not be namespaced for a namespaced resource. This is always nil for cluster-scoped kinds
watchNamespace *string
predicate storage.SelectionPredicate
versioner storage.Versioner
}
// Keeps track of which watches need to be notified
type WatchSet struct {
mu sync.RWMutex
// mu protects both nodes and counter
nodes map[uint64]*watchNode
counter atomic.Uint64
buffered []eventWrapper
bufferedMutex sync.RWMutex
}
func NewWatchSet() *WatchSet {
return &WatchSet{
buffered: make([]eventWrapper, 0, InitialBufferedEventsSize),
nodes: make(map[uint64]*watchNode, InitialWatchNodesSize),
}
}
// Creates a new watch with a unique id, but
// does not start sending events to it until start() is called.
func (s *WatchSet) newWatch(ctx context.Context, requestedRV uint64, p storage.SelectionPredicate, versioner storage.Versioner, namespace *string) *watchNode {
s.counter.Add(1)
node := &watchNode{
ctx: ctx,
requestedRV: requestedRV,
id: s.counter.Load(),
s: s,
// updateCh size needs to be > 1 to allow slower clients to not block passing new events
updateCh: make(chan eventWrapper, UpdateChannelSize),
// outCh size needs to be > 1 for single process use-cases such as tests where watch and event seeding from CUD
// events is happening on the same thread
outCh: make(chan watch.Event, UpdateChannelSize),
predicate: p,
watchNamespace: namespace,
versioner: versioner,
}
return node
}
func (s *WatchSet) CleanupWatchers() {
s.mu.Lock()
defer s.mu.Unlock()
for _, w := range s.nodes {
w.stop()
}
}
// oldObject is only passed in the event of a modification
// in case a predicate filtered watch is impacted as a result of modification
// NOTE: this function gives one the misperception that a newly added node will never
// get a double event, one from buffered and one from the update channel
// That perception is not true. Even though this function maintains the lock throughout the function body
// it is not true of the Start function. So basically, the Start function running after this function
// fully stands the chance of another future notifyWatchers double sending it the event through the two means mentioned
func (s *WatchSet) notifyWatchers(ev watch.Event, oldObject runtime.Object) {
s.mu.RLock()
defer s.mu.RUnlock()
updateEv := eventWrapper{
ev: ev,
}
if oldObject != nil {
updateEv.oldObject = oldObject
}
// Events are always buffered.
// this is because of an inadvertent delay which is built into the watch process
// Watch() from storage returns Watch.Interface with a async start func.
// The only way to guarantee that we can interpret the passed RV correctly is to play it against missed events
// (notice the loop below over s.nodes isn't exactly going to work on a new node
// unless start is called on it)
s.bufferedMutex.Lock()
s.buffered = append(s.buffered, updateEv)
s.bufferedMutex.Unlock()
for _, w := range s.nodes {
w.updateCh <- updateEv
}
}
// isValid is not necessary to be called on oldObject in UpdateEvents - assuming the Watch pushes correctly setup eventWrapper our way
// first bool is whether the event is valid for current watcher
// second bool is whether checking the old value against the predicate may be valuable to the caller
// second bool may be a helpful aid to establish context around MODIFIED events
// (note that this second bool is only marked true if we pass other checks first, namely RV and namespace)
func (w *watchNode) isValid(e eventWrapper) (bool, bool, error) {
obj, err := meta.Accessor(e.ev.Object)
if err != nil {
klog.Error("Could not get accessor to object in event")
return false, false, nil
}
eventRV, err := w.getResourceVersionAsInt(e.ev.Object)
if err != nil {
return false, false, err
}
if eventRV < w.requestedRV {
return false, false, nil
}
if w.watchNamespace != nil && *w.watchNamespace != obj.GetNamespace() {
return false, false, err
}
valid, err := w.predicate.Matches(e.ev.Object)
if err != nil {
return false, false, err
}
return valid, e.ev.Type == watch.Modified, nil
}
// Only call this method if current object matches the predicate
func (w *watchNode) handleAddedForFilteredList(e eventWrapper) (*watch.Event, error) {
if e.oldObject == nil {
return nil, fmt.Errorf("oldObject should be set for modified events")
}
ok, err := w.predicate.Matches(e.oldObject)
if err != nil {
return nil, err
}
if !ok {
e.ev.Type = watch.Added
return &e.ev, nil
}
return nil, nil
}
func (w *watchNode) handleDeletedForFilteredList(e eventWrapper) (*watch.Event, error) {
if e.oldObject == nil {
return nil, fmt.Errorf("oldObject should be set for modified events")
}
ok, err := w.predicate.Matches(e.oldObject)
if err != nil {
return nil, err
}
if !ok {
return nil, nil
}
// isn't a match but used to be
e.ev.Type = watch.Deleted
oldObjectAccessor, err := meta.Accessor(e.oldObject)
if err != nil {
klog.Errorf("Could not get accessor to correct the old RV of filtered out object")
return nil, err
}
currentRV, err := getResourceVersion(e.ev.Object)
if err != nil {
klog.Errorf("Could not get accessor to object in event")
return nil, err
}
oldObjectAccessor.SetResourceVersion(currentRV)
e.ev.Object = e.oldObject
return &e.ev, nil
}
func (w *watchNode) processEvent(e eventWrapper, isInitEvent bool) error {
if isInitEvent {
// Init events have already been vetted against the predicate and other RV behavior
// Let them pass through
w.outCh <- e.ev
return nil
}
valid, runDeleteFromFilteredListHandler, err := w.isValid(e)
if err != nil {
klog.Errorf("Could not determine validity of the event: %v", err)
return err
}
if valid {
if e.ev.Type == watch.Modified {
ev, err := w.handleAddedForFilteredList(e)
if err != nil {
return err
}
if ev != nil {
w.outCh <- *ev
} else {
// forward the original event if add handling didn't signal any impact
w.outCh <- e.ev
}
} else {
w.outCh <- e.ev
}
return nil
}
if runDeleteFromFilteredListHandler {
if e.ev.Type == watch.Modified {
ev, err := w.handleDeletedForFilteredList(e)
if err != nil {
return err
}
if ev != nil {
w.outCh <- *ev
}
} // explicitly doesn't have an event forward for the else case here
return nil
}
return nil
}
// Start sending events to this watch.
func (w *watchNode) Start(initEvents ...watch.Event) {
w.s.mu.Lock()
w.s.nodes[w.id] = w
w.s.mu.Unlock()
go func() {
maxRV := uint64(0)
for _, ev := range initEvents {
currentRV, err := w.getResourceVersionAsInt(ev.Object)
if err != nil {
klog.Errorf("Could not determine init event RV for deduplication of buffered events: %v", err)
continue
}
if maxRV < currentRV {
maxRV = currentRV
}
if err := w.processEvent(eventWrapper{ev: ev}, true); err != nil {
klog.Errorf("Could not process event: %v", err)
}
}
// If we had no init events, simply rely on the passed RV
if maxRV == 0 {
maxRV = w.requestedRV
}
w.s.bufferedMutex.RLock()
for _, e := range w.s.buffered {
eventRV, err := w.getResourceVersionAsInt(e.ev.Object)
if err != nil {
klog.Errorf("Could not determine RV for deduplication of buffered events: %v", err)
continue
}
if maxRV >= eventRV {
continue
} else {
maxRV = eventRV
}
if err := w.processEvent(e, false); err != nil {
klog.Errorf("Could not process event: %v", err)
}
}
w.s.bufferedMutex.RUnlock()
for {
select {
case e, ok := <-w.updateCh:
if !ok {
close(w.outCh)
return
}
eventRV, err := w.getResourceVersionAsInt(e.ev.Object)
if err != nil {
klog.Errorf("Could not determine RV for deduplication of channel events: %v", err)
continue
}
if maxRV >= eventRV {
continue
} else {
maxRV = eventRV
}
if err := w.processEvent(e, false); err != nil {
klog.Errorf("Could not process event: %v", err)
}
case <-w.ctx.Done():
close(w.outCh)
return
}
}
}()
}
func (w *watchNode) Stop() {
w.s.mu.Lock()
defer w.s.mu.Unlock()
w.stop()
}
// Unprotected func: ensure mutex on the parent watch set is locked before calling
func (w *watchNode) stop() {
if _, ok := w.s.nodes[w.id]; ok {
delete(w.s.nodes, w.id)
close(w.updateCh)
}
}
func (w *watchNode) ResultChan() <-chan watch.Event {
return w.outCh
}
func getResourceVersion(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Error("Could not get accessor to object in event")
return "", err
}
return accessor.GetResourceVersion(), nil
}
func (w *watchNode) getResourceVersionAsInt(obj runtime.Object) (uint64, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Error("Could not get accessor to object in event")
return 0, err
}
return w.versioner.ParseResourceVersion(accessor.GetResourceVersion())
}

@ -6,8 +6,12 @@ import (
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/secrets"
"github.com/grafana/grafana/pkg/services/apiserver/builder"
"github.com/grafana/grafana/pkg/services/featuremgmt"
grafanasecrets "github.com/grafana/grafana/pkg/services/secrets"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -22,16 +26,28 @@ import (
)
var (
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
_ builder.APIGroupMutation = (*APIBuilder)(nil)
_ builder.APIGroupValidation = (*APIBuilder)(nil)
_ builder.APIGroupPostStartHookProvider = (*APIBuilder)(nil)
_ builder.OpenAPIPostProcessor = (*APIBuilder)(nil)
)
type APIBuilder struct{}
type APIBuilder struct {
secrets secrets.Service
jobs jobs.JobQueue
getter rest.Getter
}
// NewAPIBuilder creates an API builder.
// It avoids anything that is core to Grafana, such that it can be used in a multi-tenant service down the line.
// This means there are no hidden dependencies, and no use of e.g. *settings.Cfg.
func NewAPIBuilder() *APIBuilder {
return &APIBuilder{}
func NewAPIBuilder(
secrets secrets.Service,
) *APIBuilder {
return &APIBuilder{
secrets: secrets,
}
}
// RegisterAPIService returns an API builder, from [NewAPIBuilder]. It is called by Wire.
@ -39,13 +55,14 @@ func NewAPIBuilder() *APIBuilder {
func RegisterAPIService(
features featuremgmt.FeatureToggles,
apiregistration builder.APIRegistrar,
secretsSvc grafanasecrets.Service,
) (*APIBuilder, error) {
if !features.IsEnabledGlobally(featuremgmt.FlagProvisioning) &&
!features.IsEnabledGlobally(featuremgmt.FlagGrafanaAPIServerWithExperimentalAPIs) {
return nil, nil // skip registration unless opting into experimental apis OR the feature specifically
}
builder := NewAPIBuilder()
builder := NewAPIBuilder(secrets.NewSingleTenant(secretsSvc))
apiregistration.RegisterAPI(builder)
return builder, nil
}
@ -87,9 +104,14 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI
return fmt.Errorf("failed to create repository storage: %w", err)
}
// FIXME: Make job queue store the jobs somewhere persistent.
jobStore := jobs.NewJobStore(50, b) // in memory, for now...
b.jobs = jobStore
repositoryStatusStorage := grafanaregistry.NewRegistryStatusStore(opts.Scheme, repositoryStorage)
storage := map[string]rest.Storage{}
storage[provisioning.JobResourceInfo.StoragePath()] = jobStore
storage[provisioning.RepositoryResourceInfo.StoragePath()] = repositoryStorage
storage[provisioning.RepositoryResourceInfo.StoragePath("status")] = repositoryStatusStorage
apiGroupInfo.VersionedResourcesStorageMap[provisioning.VERSION] = storage
@ -158,3 +180,16 @@ func (b *APIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.OpenAPI, err
return oas, nil
}
// Helpers for fetching valid Repository objects
func (b *APIBuilder) GetRepository(ctx context.Context, name string) (repository.Repository, error) {
obj, err := b.getter.Get(ctx, name, &metav1.GetOptions{})
if err != nil {
return nil, err
}
_ = obj
// FIXME: Return a valid Repository object with the correct underlying storage.
panic("FIXME")
}

@ -0,0 +1,128 @@
package repository
import (
"context"
"io/fs"
"net/http"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
)
type Repository interface {
// Config returns the saved Kubernetes object.
Config() *provisioning.Repository
// Validate ensures the resource _looks_ correct.
// It should be called before trying to upsert a resource into the Kubernetes API server.
// This is not an indication that the connection information works, just that they are reasonably configured (see also Test).
Validate() field.ErrorList
// Test checks if the connection information actually works.
Test(ctx context.Context) (*provisioning.TestResults, error)
}
// ErrFileNotFound indicates that a path could not be found in the repository.
var ErrFileNotFound error = fs.ErrNotExist
type FileInfo struct {
// Path to the file on disk.
// No leading or trailing slashes will be contained within.
// This uses '/' for separation. Use the 'path' package to interact with this.
Path string
// The raw bytes
Data []byte
// The git branch or reference commit
Ref string
// The git hash for a given file
Hash string
// When was the file changed (if known)
Modified *metav1.Time
}
// An entry in the file tree, as returned by 'ReadFileTree'. Like FileInfo, but contains less information.
type FileTreeEntry struct {
// The path to the file from the base path given (if any).
// No leading or trailing slashes will be contained within.
// This uses '/' for separation. Use the 'path' package to interact with this.
Path string
// The hash for the file. Lower-case hex.
// Empty string if Blob is false.
Hash string
// The size of the file.
// 0 if Blob is false.
Size int64
// Whether this entry is a blob or a subtree.
Blob bool
}
type Reader interface {
// Read a file from the resource
// This data will be parsed and validated before it is shown to end users
Read(ctx context.Context, path, ref string) (*FileInfo, error)
// Read all file names from the tree.
// This data will be parsed and validated before it is shown.
//
// TODO: Make some API contract that lets us ignore files that aren't relevant to us (e.g. CI/CD, CODEOWNERS, other configs or source code).
// TODO: Test scale: do we want to stream entries instead somehow?
ReadTree(ctx context.Context, ref string) ([]FileTreeEntry, error)
}
type Writer interface {
// Write a file to the repository.
// The data has already been validated and is ready for save
Create(ctx context.Context, path, ref string, data []byte, message string) error
// Update a file in the remote repository
// The data has already been validated and is ready for save
Update(ctx context.Context, path, ref string, data []byte, message string) error
// Write a file to the repository.
// Functionally the same as Read then Create or Update, but more efficient depending on the backend
Write(ctx context.Context, path, ref string, data []byte, message string) error
// Delete a file in the remote repository
Delete(ctx context.Context, path, ref, message string) error
}
// Hooks called after the repository has been created, updated or deleted
type Hooks interface {
// For repositories that support webhooks
Webhook(ctx context.Context, req *http.Request) (*provisioning.WebhookResponse, error)
OnCreate(ctx context.Context) (*provisioning.WebhookStatus, error)
OnUpdate(ctx context.Context) (*provisioning.WebhookStatus, error)
OnDelete(ctx context.Context) error
}
type FileAction string
const (
FileActionCreated FileAction = "created"
FileActionUpdated FileAction = "updated"
FileActionDeleted FileAction = "deleted"
FileActionIgnored FileAction = "ignored"
// Renamed actions may be reconstructed as delete then create
FileActionRenamed FileAction = "renamed"
)
type VersionedFileChange struct {
Action FileAction
Path string
Ref string
PreviousRef string // rename | update
PreviousPath string // rename
}
// Versioned is a repository that supports versioning.
// This interface may be extended to make the the original Repository interface more agnostic to the underlying storage system.
type Versioned interface {
// History of changes for a path
History(ctx context.Context, path, ref string) ([]provisioning.HistoryItem, error)
LatestRef(ctx context.Context) (string, error)
CompareFiles(ctx context.Context, base, ref string) ([]VersionedFileChange, error)
}

@ -0,0 +1,59 @@
package safepath
import (
"os"
"path"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
// ErrUnsafePathTraversal indicates that an input path had a path traversal which led to escaping the required prefix.
// E.g. Join("/test", "..") would return this, because it doesn't stay within the '/test' directory.
var ErrUnsafePathTraversal = apierrors.NewBadRequest("the input path had an unacceptable path traversal")
// Join joins any number of elements in a path under a common prefix path.
// If the elems do path traversal, they are permitted to do so under their own directories.
// The output result will _always_ have a prefix of the given prefix, and no path traversals in the output string.
// The output result will not end with a trailing slash.
// The output result will have a leading slash if one is given as a prefix.
// If the prefix would ultimately be escaped, an error is returned.
//
// This function is safe for <https://securego.io/docs/rules/g304.html>.
func Join(prefix string, elem ...string) (string, error) {
// We clean early to make the HasPrefix check be sensible after path.Join does a Clean for us.
prefix = replaceOSSeparators(path.Clean(prefix))
if len(elem) == 0 {
return prefix, nil
}
for i, e := range elem {
// We don't use Clean here because the output of path.Join will clean for us.
elem[i] = replaceOSSeparators(e)
}
subPath := path.Join(elem...) // performs a Clean after joining
completePath := path.Join(prefix, subPath)
if !strings.HasPrefix(completePath, prefix) {
return "", ErrUnsafePathTraversal
}
return completePath, nil
}
// Performs a [path.Clean] on the path, as well as replacing its OS separators.
// Note that this does no effort to ensure the paths are safe to use. It only cleans them.
func Clean(p string) string {
return path.Clean(replaceOSSeparators(p))
}
// osSeparator is declared as a var here only to ensure we can change it in tests.
var osSeparator = os.PathSeparator
// This replaces the OS separator with a slash.
// All OSes we target (Linux, macOS, and Windows) support forward-slashes in path traversals, as such it's simpler to use the same character everywhere.
// BSDs do as well (even though they're not a target as of writing).
func replaceOSSeparators(p string) string {
if osSeparator == '/' { // perf: nothing to do!
return p
}
return strings.ReplaceAll(p, string(osSeparator), "/")
}

@ -0,0 +1,74 @@
package safepath
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPathJoin(t *testing.T) {
orig := osSeparator
osSeparator = '\\' // pretend we're on Windows
defer func() { osSeparator = orig }()
testCases := []struct {
Comment string
In []string
Out any // string or error
}{
{"Empty elements should not change input", []string{"/test/"}, "/test"},
{"Empty elements without leading slash should not change input", []string{"test/"}, "test"},
{"Single element should be added to path", []string{"/test/", "abc"}, "/test/abc"},
{"Single element should be added to path with current dir prefix", []string{"./test/", "abc"}, "test/abc"},
{"Single element with leading slash should be added to path", []string{"/test/", "/abc"}, "/test/abc"},
{"Many elements are all appended to path", []string{"/test/", "a", "b", "c"}, "/test/a/b/c"},
{"Path traversal within same directory should be expanded", []string{"/test/", "a", "..", "b", ".", "..", "c"}, "/test/c"},
{"Path traversal escaping root dir prefix should return err", []string{"/test/", ".."}, ErrUnsafePathTraversal},
{"Path traversal escaping no dir prefix should return err", []string{"test/", ".."}, ErrUnsafePathTraversal},
{"Path traversal escaping current dir prefix should return err", []string{"./test/", ".."}, ErrUnsafePathTraversal},
{"Complex path traversal escaping prefix should return err", []string{"/test/", "a/..///c/", "../../test/d/../a/../.."}, ErrUnsafePathTraversal},
{"Complex path traversal remaining in prefix should be expanded", []string{"/test/", "a/..///c/", "../../test/d/"}, "/test/d"},
{"Problematic code example from the g304 website", []string{"/safe/path", "../../private/path"}, ErrUnsafePathTraversal},
{"Traversing beyond root should be expanded", []string{"/test/", "/../a"}, "/test/a"},
{"OS separator should be replaced with a slash", []string{"/test\\test", "abc\\test"}, "/test/test/abc/test"},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.Comment, func(t *testing.T) {
path, err := Join(tc.In[0], tc.In[1:]...)
if ee, ok := tc.Out.(error); ok {
assert.ErrorIs(t, err, ee, "expected unsuccessful outcome")
assert.Empty(t, path, "expected empty string when unsuccessful")
} else if str, ok := tc.Out.(string); ok {
assert.NoError(t, err, "expected successful outcome")
assert.Equal(t, str, path)
} else {
panic("expected out was neither string nor error")
}
})
}
}
func TestPathClean(t *testing.T) {
orig := osSeparator
osSeparator = '\\' // pretend we're on Windows
defer func() { osSeparator = orig }()
testCases := []struct {
Comment string
In string
Out string
}{
{"Simple path", "/test/", "/test"},
{"Simple path with OS separators", "\\test\\here", "/test/here"},
{"Simple path with mixed separators", "\\test/here", "/test/here"},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.Comment, func(t *testing.T) {
assert.Equal(t, tc.Out, Clean(tc.In))
})
}
}

@ -0,0 +1,31 @@
package safepath
import (
"context"
"path"
"strings"
)
type WalkFunc = func(ctx context.Context, path string) error
// Walk walks the given folder path and calls the given function for each folder.
func Walk(ctx context.Context, p string, fn WalkFunc) error {
if p == "." || p == "/" {
return nil
}
var currentPath string
for _, folder := range strings.Split(p, "/") {
if folder == "" {
// Trailing / leading slash?
continue
}
currentPath = path.Join(currentPath, folder)
if err := fn(ctx, currentPath); err != nil {
return err
}
}
return nil
}

@ -0,0 +1,35 @@
package secrets
import (
"context"
"github.com/grafana/grafana/pkg/services/secrets"
)
// A secrets encryption service. It only operates on values, no names or similar.
// It is likely we will need to change this when the multi-tenant service comes around.
//
// FIXME: this is a temporary service/package until we can make use of
// the new secrets service in app platform.
type Service interface {
Encrypt(ctx context.Context, data []byte) ([]byte, error)
Decrypt(ctx context.Context, data []byte) ([]byte, error)
}
var _ Service = (*singleTenant)(nil)
type singleTenant struct {
inner secrets.Service
}
func NewSingleTenant(svc secrets.Service) *singleTenant {
return &singleTenant{svc}
}
func (s *singleTenant) Encrypt(ctx context.Context, data []byte) ([]byte, error) {
return s.inner.Encrypt(ctx, data, secrets.WithoutScope())
}
func (s *singleTenant) Decrypt(ctx context.Context, data []byte) ([]byte, error) {
return s.inner.Decrypt(ctx, data)
}

@ -36,6 +36,380 @@
}
}
},
"/apis/provisioning.grafana.app/v0alpha1/jobs": {
"get": {
"tags": [
"Job"
],
"description": "list or watch objects of kind Job",
"operationId": "listJobForAllNamespaces",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/json;stream=watch": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/vnd.kubernetes.protobuf": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/vnd.kubernetes.protobuf;stream=watch": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/yaml": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
}
}
}
},
"x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": {
"group": "provisioning.grafana.app",
"version": "v0alpha1",
"kind": "Job"
}
},
"parameters": [
{
"name": "allowWatchBookmarks",
"in": "query",
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
},
{
"name": "continue",
"in": "query",
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "fieldSelector",
"in": "query",
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "labelSelector",
"in": "query",
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "limit",
"in": "query",
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
"schema": {
"type": "integer",
"uniqueItems": true
}
},
{
"name": "pretty",
"in": "query",
"description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "resourceVersion",
"in": "query",
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "resourceVersionMatch",
"in": "query",
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "sendInitialEvents",
"in": "query",
"description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
},
{
"name": "timeoutSeconds",
"in": "query",
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
"schema": {
"type": "integer",
"uniqueItems": true
}
},
{
"name": "watch",
"in": "query",
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
}
]
},
"/apis/provisioning.grafana.app/v0alpha1/namespaces/{namespace}/jobs": {
"get": {
"tags": [
"Job"
],
"description": "list or watch objects of kind Job",
"operationId": "listJob",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/json;stream=watch": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/vnd.kubernetes.protobuf": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/vnd.kubernetes.protobuf;stream=watch": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
},
"application/yaml": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList"
}
}
}
}
},
"x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": {
"group": "provisioning.grafana.app",
"version": "v0alpha1",
"kind": "Job"
}
},
"parameters": [
{
"name": "allowWatchBookmarks",
"in": "query",
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
},
{
"name": "continue",
"in": "query",
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "fieldSelector",
"in": "query",
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "labelSelector",
"in": "query",
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "limit",
"in": "query",
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
"schema": {
"type": "integer",
"uniqueItems": true
}
},
{
"name": "namespace",
"in": "path",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "pretty",
"in": "query",
"description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "resourceVersion",
"in": "query",
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "resourceVersionMatch",
"in": "query",
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "sendInitialEvents",
"in": "query",
"description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
},
{
"name": "timeoutSeconds",
"in": "query",
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
"schema": {
"type": "integer",
"uniqueItems": true
}
},
{
"name": "watch",
"in": "query",
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
"schema": {
"type": "boolean",
"uniqueItems": true
}
}
]
},
"/apis/provisioning.grafana.app/v0alpha1/namespaces/{namespace}/jobs/{name}": {
"get": {
"tags": [
"Job"
],
"description": "read the specified Job",
"operationId": "getJob",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Job"
}
},
"application/vnd.kubernetes.protobuf": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Job"
}
},
"application/yaml": {
"schema": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Job"
}
}
}
}
},
"x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": {
"group": "provisioning.grafana.app",
"version": "v0alpha1",
"kind": "Job"
}
},
"parameters": [
{
"name": "name",
"in": "path",
"description": "name of the Job",
"required": true,
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "namespace",
"in": "path",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"schema": {
"type": "string",
"uniqueItems": true
}
},
{
"name": "pretty",
"in": "query",
"description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).",
"schema": {
"type": "string",
"uniqueItems": true
}
}
]
},
"/apis/provisioning.grafana.app/v0alpha1/namespaces/{namespace}/repositories": {
"get": {
"tags": [
@ -1290,21 +1664,50 @@
"uniqueItems": true
}
}
]
}
},
"components": {
"schemas": {
]
}
},
"components": {
"schemas": {
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.ExportJobOptions": {
"type": "object",
"required": [
"identifier"
],
"properties": {
"branch": {
"description": "Target branch for export (only git)",
"type": "string"
},
"folder": {
"description": "The source folder (or empty) to export",
"type": "string"
},
"history": {
"description": "Preserve history (if possible)",
"type": "boolean"
},
"identifier": {
"description": "Include the identifier in the exported metadata",
"type": "boolean",
"default": false
},
"prefix": {
"description": "Target file prefix",
"type": "string"
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.GitHubRepositoryConfig": {
"type": "object",
"required": [
"branch"
],
"properties": {
"branch": {
"description": "The branch to use in the repository. By default, this is the main branch.",
"type": "string"
},
"branchWorkflow": {
"description": "Whether we should commit to change branches and use a Pull Request flow to achieve this. By default, this is false (i.e. we will commit straight to the main branch).",
"type": "boolean"
"description": "The branch to use in the repository.",
"type": "string",
"default": ""
},
"encryptedToken": {
"description": "Token for accessing the repository, but encrypted. This is not possible to read back to a user decrypted.",
@ -1323,18 +1726,6 @@
"url": {
"description": "The repository URL (e.g. `https://github.com/example/test`).",
"type": "string"
},
"workflows": {
"description": "Workflow allowed for changes to the repository. The order is relevant for defining the precedence of the workflows. Possible values: pull-request, branch, push.",
"type": "array",
"items": {
"type": "string",
"default": "",
"enum": [
"branch",
"push"
]
}
}
}
},
@ -1365,6 +1756,243 @@
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Job": {
"description": "The repository name and type are stored as labels",
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string"
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"metadata": {
"default": {},
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
}
]
},
"spec": {
"default": {},
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobSpec"
}
]
},
"status": {
"default": {},
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobStatus"
}
]
}
},
"x-kubernetes-group-version-kind": [
{
"group": "provisioning.grafana.app",
"kind": "Job",
"version": "__internal"
},
{
"group": "provisioning.grafana.app",
"kind": "Job",
"version": "v0alpha1"
}
]
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobList": {
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string"
},
"items": {
"type": "array",
"items": {
"default": {},
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Job"
}
]
}
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"metadata": {
"default": {},
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
}
]
}
},
"x-kubernetes-group-version-kind": [
{
"group": "provisioning.grafana.app",
"kind": "JobList",
"version": "__internal"
},
{
"group": "provisioning.grafana.app",
"kind": "JobList",
"version": "v0alpha1"
}
]
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobResourceSummary": {
"type": "object",
"properties": {
"create": {
"type": "integer",
"format": "int64"
},
"delete": {
"type": "integer",
"format": "int64"
},
"error": {
"description": "Create or update (export)",
"type": "integer",
"format": "int64"
},
"errors": {
"description": "Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
"type": "array",
"items": {
"type": "string",
"default": ""
}
},
"group": {
"type": "string"
},
"noop": {
"description": "No action required (useful for sync)",
"type": "integer",
"format": "int64"
},
"resource": {
"type": "string"
},
"total": {
"type": "integer",
"format": "int64"
},
"update": {
"type": "integer",
"format": "int64"
},
"write": {
"type": "integer",
"format": "int64"
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobSpec": {
"type": "object",
"required": [
"action",
"repository"
],
"properties": {
"action": {
"description": "Possible enum values:\n - `\"export\"` Export from grafana into the remote repository\n - `\"pr\"` Update a pull request -- send preview images, links etc\n - `\"sync\"` Sync the remote branch with the grafana instance",
"type": "string",
"default": "",
"enum": [
"export",
"pr",
"sync"
]
},
"export": {
"description": "Required when the action is `export`",
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.ExportJobOptions"
}
]
},
"pr": {
"description": "Pull request options",
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.PullRequestJobOptions"
}
]
},
"repository": {
"description": "The the repository reference (for now also in labels)",
"type": "string",
"default": ""
},
"sync": {
"description": "Required when the action is `sync`",
"allOf": [
{
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.SyncJobOptions"
}
]
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobStatus": {
"description": "The job status",
"type": "object",
"properties": {
"errors": {
"type": "array",
"items": {
"type": "string",
"default": ""
}
},
"finished": {
"type": "integer",
"format": "int64"
},
"message": {
"type": "string"
},
"progress": {
"description": "Optional value 0-100 that can be set while running",
"type": "number",
"format": "double"
},
"started": {
"type": "integer",
"format": "int64"
},
"state": {
"description": "Possible enum values:\n - `\"error\"` Finished with errors\n - `\"pending\"` Job has been submitted, but not processed yet\n - `\"success\"` Finished with success\n - `\"working\"` The job is running",
"type": "string",
"enum": [
"error",
"pending",
"success",
"working"
]
},
"summary": {
"description": "Summary of processed actions",
"type": "array",
"items": {
"$ref": "#/components/schemas/com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.JobResourceSummary"
}
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.LocalRepositoryConfig": {
"type": "object",
"properties": {
@ -1373,6 +2001,27 @@
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.PullRequestJobOptions": {
"type": "object",
"properties": {
"hash": {
"type": "string"
},
"pr": {
"description": "Pull request number (when appropriate)",
"type": "integer",
"format": "int32"
},
"ref": {
"description": "The branch of commit hash",
"type": "string"
},
"url": {
"description": "URL to the originator (eg, PR URL)",
"type": "string"
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.Repository": {
"description": "When this code is changed, make sure to update the code generation. As of writing, this can be done via the hack dir in the root of the repo: ./hack/update-codegen.sh provisioning If you've opened the generated files in this dir at some point in VSCode, you may also have to re-open them to clear errors.",
"type": "object",
@ -1472,7 +2121,7 @@
"type": "object",
"required": [
"title",
"readOnly",
"workflows",
"sync",
"type"
],
@ -1497,11 +2146,6 @@
}
]
},
"readOnly": {
"description": "ReadOnly repository does not allow any write commands",
"type": "boolean",
"default": false
},
"sync": {
"description": "Sync settings -- how values are pulled from the repository into grafana",
"default": {},
@ -1524,6 +2168,18 @@
"github",
"local"
]
},
"workflows": {
"description": "UI driven Workflow that allow changes to the contends of the repository. The order is relevant for defining the precedence of the workflows. When empty, the repository does not support any edits (eg, readonly)",
"type": "array",
"items": {
"type": "string",
"default": "",
"enum": [
"branch",
"write"
]
}
}
}
},
@ -1610,6 +2266,19 @@
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.SyncJobOptions": {
"type": "object",
"required": [
"incremental"
],
"properties": {
"incremental": {
"description": "Incremental synchronization for versioned repositories",
"type": "boolean",
"default": false
}
}
},
"com.github.grafana.grafana.pkg.apis.provisioning.v0alpha1.SyncOptions": {
"type": "object",
"required": [
@ -1650,10 +2319,6 @@
"type": "integer",
"format": "int64"
},
"hash": {
"description": "The repository hash when the last sync ran",
"type": "string"
},
"incremental": {
"description": "Incremental synchronization for versioned repositories",
"type": "boolean"
@ -1662,6 +2327,10 @@
"description": "The ID for the job that ran this sync",
"type": "string"
},
"lastRef": {
"description": "The repository ref when the last successful sync ran",
"type": "string"
},
"message": {
"description": "Summary messages (will be shown to users)",
"type": "array",

@ -11,7 +11,6 @@
"github": {
"url": "https://github.com/grafana/git-ui-sync-demo",
"branch": "dummy-branch",
"branchWorkflow": true,
"generateDashboardPreviews": true,
"token": "github_pat_dummy"
},
@ -20,6 +19,6 @@
"target": "",
"intervalSeconds": 60
},
"readOnly": false
"workflows": ["push"]
}
}

@ -7,7 +7,7 @@
"spec": {
"title": "Load devenv dashboards",
"description": "Load /devenv/dev-dashboards (from root of repository)",
"readOnly": false,
"workflows": ["write"],
"sync": {
"enabled": true,
"target": "mirror",

Loading…
Cancel
Save