UnifiedSearch: Use ResourceIndex from dashboards apiserver (v0alpha1 only) (#96939)

pull/97078/head
Ryan McKinley 6 months ago committed by GitHub
parent 104f795156
commit f6ccf976e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      .github/CODEOWNERS
  2. 4
      pkg/api/api.go
  3. 8
      pkg/api/http_server.go
  4. 6
      pkg/apis/dashboard/v0alpha1/register.go
  5. 96
      pkg/apis/dashboard/v0alpha1/search.go
  6. 172
      pkg/apis/dashboard/v0alpha1/zz_generated.deepcopy.go
  7. 352
      pkg/apis/dashboard/v0alpha1/zz_generated.openapi.go
  8. 6
      pkg/apis/dashboard/v0alpha1/zz_generated.openapi_violation_exceptions.list
  9. 2
      pkg/plugins/backendplugin/pluginextensionv2/rendererv2.pb.go
  10. 2
      pkg/plugins/backendplugin/pluginextensionv2/sanitizer.pb.go
  11. 2
      pkg/plugins/backendplugin/secretsmanagerplugin/secretsmanager.pb.go
  12. 2
      pkg/registry/apis/apis.go
  13. 2
      pkg/registry/apis/dashboard/legacy/storage.go
  14. 1
      pkg/registry/apis/dashboard/legacy_storage.go
  15. 368
      pkg/registry/apis/dashboard/search.go
  16. 14
      pkg/registry/apis/dashboard/v0alpha1/register.go
  17. 150
      pkg/registry/apis/search/register.go
  18. 3
      pkg/registry/apis/wireset.go
  19. 3
      pkg/server/wire.go
  20. 6
      pkg/services/apiserver/builder/helper.go
  21. 2
      pkg/services/authz/proto/v1/extention.pb.go
  22. 7
      pkg/services/pluginsintegration/plugins_integration_test.go
  23. 73
      pkg/services/unifiedSearch/http.go
  24. 328
      pkg/services/unifiedSearch/service.go
  25. 49
      pkg/services/unifiedSearch/types.go
  26. 148
      pkg/storage/unified/resource/go.mod
  27. 578
      pkg/storage/unified/resource/index.go
  28. 253
      pkg/storage/unified/resource/index_mapping.go
  29. 126
      pkg/storage/unified/resource/index_metrics.go
  30. 254
      pkg/storage/unified/resource/index_server.go
  31. 281
      pkg/storage/unified/resource/index_test.go
  32. 17
      pkg/storage/unified/resource/noop.go
  33. 1250
      pkg/storage/unified/resource/resource.pb.go
  34. 42
      pkg/storage/unified/resource/resource.proto
  35. 14
      pkg/storage/unified/resource/resource_grpc.pb.go
  36. 117
      pkg/storage/unified/resource/search.go
  37. 43
      pkg/storage/unified/resource/server.go
  38. 12
      pkg/storage/unified/search/bleve.go
  39. 2
      pkg/storage/unified/search/bleve_test.go
  40. 33
      pkg/storage/unified/search/document.go
  41. 42
      pkg/storage/unified/sql/server.go
  42. 4
      pkg/storage/unified/sql/service.go
  43. 126
      pkg/storage/unified/sql/test/indexer_integration_test.go
  44. 1
      pkg/storage/unified/sql/test/integration_test.go
  45. 38
      pkg/tsdb/grafanads/grafana.go
  46. 2
      public/app/features/search/service/searcher.ts

@ -297,7 +297,6 @@
/pkg/modules/ @grafana/grafana-app-platform-squad
/pkg/services/grpcserver/ @grafana/grafana-search-and-storage
/pkg/generated @grafana/grafana-app-platform-squad
/pkg/services/unifiedSearch/ @grafana/grafana-search-and-storage
# Alerting
/pkg/services/ngalert/ @grafana/alerting-backend

@ -316,10 +316,6 @@ func (hs *HTTPServer) registerRoutes() {
apiRoute.Group("/search-v2", hs.SearchV2HTTPService.RegisterHTTPRoutes)
}
if hs.Features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageSearch) {
apiRoute.Group("/unified-search", hs.UnifiedSearchHTTPService.RegisterHTTPRoutes)
}
// current org
apiRoute.Group("/org", func(orgRoute routing.RouteRegister) {
userIDScope := ac.Scope("users", "id", ac.Parameter(":userId"))

@ -25,6 +25,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/youmark/pkcs8"
"github.com/grafana/grafana/pkg/api/avatar"
"github.com/grafana/grafana/pkg/api/routing"
httpstatic "github.com/grafana/grafana/pkg/api/static"
@ -104,14 +106,12 @@ import (
"github.com/grafana/grafana/pkg/services/tag"
"github.com/grafana/grafana/pkg/services/team"
tempUser "github.com/grafana/grafana/pkg/services/temp_user"
"github.com/grafana/grafana/pkg/services/unifiedSearch"
"github.com/grafana/grafana/pkg/services/updatechecker"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/services/validations"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/web"
"github.com/youmark/pkcs8"
)
type HTTPServer struct {
@ -157,7 +157,6 @@ type HTTPServer struct {
LivePushGateway *pushhttp.Gateway
StorageService store.StorageService
SearchV2HTTPService searchV2.SearchHTTPService
UnifiedSearchHTTPService unifiedSearch.SearchHTTPService
ContextHandler *contexthandler.ContextHandler
LoggerMiddleware loggermw.Logger
SQLStore db.DB
@ -268,7 +267,7 @@ func ProvideHTTPServer(opts ServerOptions, cfg *setting.Cfg, routeRegister routi
publicDashboardsApi *publicdashboardsApi.Api, userService user.Service, tempUserService tempUser.Service,
loginAttemptService loginAttempt.Service, orgService org.Service, teamService team.Service,
accesscontrolService accesscontrol.Service, navTreeService navtree.Service,
annotationRepo annotations.Repository, tagService tag.Service, searchv2HTTPService searchV2.SearchHTTPService, unifiedSearchHTTPService unifiedSearch.SearchHTTPService, oauthTokenService oauthtoken.OAuthTokenService,
annotationRepo annotations.Repository, tagService tag.Service, searchv2HTTPService searchV2.SearchHTTPService, oauthTokenService oauthtoken.OAuthTokenService,
statsService stats.Service, authnService authn.Service, pluginsCDNService *pluginscdn.Service, promGatherer prometheus.Gatherer,
starApi *starApi.API, promRegister prometheus.Registerer, clientConfigProvider grafanaapiserver.DirectRestConfigProvider, anonService anonymous.Service,
userVerifier user.Verifier,
@ -310,7 +309,6 @@ func ProvideHTTPServer(opts ServerOptions, cfg *setting.Cfg, routeRegister routi
AccessControl: accessControl,
DataProxy: dataSourceProxy,
SearchV2HTTPService: searchv2HTTPService,
UnifiedSearchHTTPService: unifiedSearchHTTPService,
SearchService: searchService,
Live: live,
LivePushGateway: livePushGateway,

@ -4,10 +4,11 @@ import (
"fmt"
"time"
"github.com/grafana/grafana/pkg/apimachinery/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
const (
@ -93,6 +94,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&LibraryPanelList{},
&metav1.PartialObjectMetadata{},
&metav1.PartialObjectMetadataList{},
&metav1.Table{},
&SearchResults{},
&SortableFields{},
)
metav1.AddToGroupVersion(scheme, schemeGroupVersion)
return nil

@ -0,0 +1,96 @@
package v0alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type SearchResults struct {
metav1.TypeMeta `json:",inline"`
// Where the query started from
Offset int64 `json:"offset,omitempty"`
// The number of matching results
TotalHits int64 `json:"totalHits"`
// The dashboard body (unstructured for now)
Hits []DashboardHit `json:"hits"`
// Cost of running the query
QueryCost float64 `json:"queryCost,omitempty"`
// Max score
MaxScore float64 `json:"maxScore,omitempty"`
// How are the results sorted
SortBy *SortBy `json:"sortBy,omitempty"`
// Facet results
Facets map[string]FacetResult `json:"facets,omitempty"`
}
type SortBy struct {
Field string `json:"field"`
Descending bool `json:"desc,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type SortableFields struct {
metav1.TypeMeta `json:",inline"`
// Sortable fields (depends on backend support)
Fields []SortableField `json:"fields"`
}
type SortableField struct {
Field string `json:"string,omitempty"`
Display string `json:"display,omitempty"`
Type string `json:"type,omitempty"` // string or number
}
// Dashboard or folder hit
// +enum
type HitKind string
// PluginType values
const (
HitTypeDash HitKind = "Dashboard"
HitTypeFolder HitKind = "Folder"
)
type DashboardHit struct {
// Dashboard or folder
Kind HitKind `json:"kind"`
// The k8s "name" (eg, grafana UID)
Name string `json:"name"`
// The display nam
Title string `json:"title"`
// Filter tags
Tags []string `json:"tags,omitempty"`
// The UID/name for the folder
Folder string `json:"folder,omitempty"`
// Stick untyped extra fields in this object (including the sort value)
Field *common.Unstructured `json:"field,omitempty"`
// Explain the score (if possible)
Explain *common.Unstructured `json:"explain,omitempty"`
// When using "real" search, this is the score
Score float64 `json:"score,omitempty"`
}
type FacetResult struct {
Field string `json:"field,omitempty"`
// The distinct terms
Total int64 `json:"total,omitempty"`
// The number of documents that do *not* have this field
Missing int64 `json:"missing,omitempty"`
// Term facets
Terms []TermFacet `json:"terms,omitempty"`
}
type TermFacet struct {
Term string `json:"term,omitempty"`
Count int64 `json:"count,omitempty"`
}

@ -94,6 +94,35 @@ func (in *DashboardAccess) DeepCopy() *DashboardAccess {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DashboardHit) DeepCopyInto(out *DashboardHit) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Field != nil {
in, out := &in.Field, &out.Field
*out = (*in).DeepCopy()
}
if in.Explain != nil {
in, out := &in.Explain, &out.Explain
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardHit.
func (in *DashboardHit) DeepCopy() *DashboardHit {
if in == nil {
return nil
}
out := new(DashboardHit)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DashboardList) DeepCopyInto(out *DashboardList) {
*out = *in
@ -200,6 +229,27 @@ func (in *DashboardWithAccessInfo) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FacetResult) DeepCopyInto(out *FacetResult) {
*out = *in
if in.Terms != nil {
in, out := &in.Terms, &out.Terms
*out = make([]TermFacet, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacetResult.
func (in *FacetResult) DeepCopy() *FacetResult {
if in == nil {
return nil
}
out := new(FacetResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LibraryPanel) DeepCopyInto(out *LibraryPanel) {
*out = *in
@ -317,6 +367,128 @@ func (in *LibraryPanelStatus) DeepCopy() *LibraryPanelStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SearchResults) DeepCopyInto(out *SearchResults) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Hits != nil {
in, out := &in.Hits, &out.Hits
*out = make([]DashboardHit, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SortBy != nil {
in, out := &in.SortBy, &out.SortBy
*out = new(SortBy)
**out = **in
}
if in.Facets != nil {
in, out := &in.Facets, &out.Facets
*out = make(map[string]FacetResult, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchResults.
func (in *SearchResults) DeepCopy() *SearchResults {
if in == nil {
return nil
}
out := new(SearchResults)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SearchResults) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SortBy) DeepCopyInto(out *SortBy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortBy.
func (in *SortBy) DeepCopy() *SortBy {
if in == nil {
return nil
}
out := new(SortBy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SortableField) DeepCopyInto(out *SortableField) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortableField.
func (in *SortableField) DeepCopy() *SortableField {
if in == nil {
return nil
}
out := new(SortableField)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SortableFields) DeepCopyInto(out *SortableFields) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Fields != nil {
in, out := &in.Fields, &out.Fields
*out = make([]SortableField, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortableFields.
func (in *SortableFields) DeepCopy() *SortableFields {
if in == nil {
return nil
}
out := new(SortableFields)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SortableFields) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TermFacet) DeepCopyInto(out *TermFacet) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TermFacet.
func (in *TermFacet) DeepCopy() *TermFacet {
if in == nil {
return nil
}
out := new(TermFacet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VersionsQueryOptions) DeepCopyInto(out *VersionsQueryOptions) {
*out = *in

@ -18,14 +18,21 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.AnnotationPermission": schema_pkg_apis_dashboard_v0alpha1_AnnotationPermission(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.Dashboard": schema_pkg_apis_dashboard_v0alpha1_Dashboard(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardAccess": schema_pkg_apis_dashboard_v0alpha1_DashboardAccess(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardHit": schema_pkg_apis_dashboard_v0alpha1_DashboardHit(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardList": schema_pkg_apis_dashboard_v0alpha1_DashboardList(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardVersionInfo": schema_pkg_apis_dashboard_v0alpha1_DashboardVersionInfo(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardVersionList": schema_pkg_apis_dashboard_v0alpha1_DashboardVersionList(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardWithAccessInfo": schema_pkg_apis_dashboard_v0alpha1_DashboardWithAccessInfo(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.FacetResult": schema_pkg_apis_dashboard_v0alpha1_FacetResult(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.LibraryPanel": schema_pkg_apis_dashboard_v0alpha1_LibraryPanel(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.LibraryPanelList": schema_pkg_apis_dashboard_v0alpha1_LibraryPanelList(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.LibraryPanelSpec": schema_pkg_apis_dashboard_v0alpha1_LibraryPanelSpec(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.LibraryPanelStatus": schema_pkg_apis_dashboard_v0alpha1_LibraryPanelStatus(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SearchResults": schema_pkg_apis_dashboard_v0alpha1_SearchResults(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortBy": schema_pkg_apis_dashboard_v0alpha1_SortBy(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortableField": schema_pkg_apis_dashboard_v0alpha1_SortableField(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortableFields": schema_pkg_apis_dashboard_v0alpha1_SortableFields(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.TermFacet": schema_pkg_apis_dashboard_v0alpha1_TermFacet(ref),
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.VersionsQueryOptions": schema_pkg_apis_dashboard_v0alpha1_VersionsQueryOptions(ref),
}
}
@ -203,6 +210,87 @@ func schema_pkg_apis_dashboard_v0alpha1_DashboardAccess(ref common.ReferenceCall
}
}
func schema_pkg_apis_dashboard_v0alpha1_DashboardHit(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Dashboard or folder\n\nPossible enum values:\n - `\"Dashboard\"`\n - `\"Folder\"`",
Default: "",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"Dashboard", "Folder"},
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "The k8s \"name\" (eg, grafana UID)",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"title": {
SchemaProps: spec.SchemaProps{
Description: "The display nam",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"tags": {
SchemaProps: spec.SchemaProps{
Description: "Filter tags",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"folder": {
SchemaProps: spec.SchemaProps{
Description: "The UID/name for the folder",
Type: []string{"string"},
Format: "",
},
},
"field": {
SchemaProps: spec.SchemaProps{
Description: "Stick untyped extra fields in this object (including the sort value)",
Ref: ref("github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.Unstructured"),
},
},
"explain": {
SchemaProps: spec.SchemaProps{
Description: "Explain the score (if possible)",
Ref: ref("github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.Unstructured"),
},
},
"score": {
SchemaProps: spec.SchemaProps{
Description: "When using \"real\" search, this is the score",
Type: []string{"number"},
Format: "double",
},
},
},
Required: []string{"kind", "name", "title"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.Unstructured"},
}
}
func schema_pkg_apis_dashboard_v0alpha1_DashboardList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@ -396,6 +484,54 @@ func schema_pkg_apis_dashboard_v0alpha1_DashboardWithAccessInfo(ref common.Refer
}
}
func schema_pkg_apis_dashboard_v0alpha1_FacetResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"field": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"total": {
SchemaProps: spec.SchemaProps{
Description: "The distinct terms",
Type: []string{"integer"},
Format: "int64",
},
},
"missing": {
SchemaProps: spec.SchemaProps{
Description: "The number of documents that do *not* have this field",
Type: []string{"integer"},
Format: "int64",
},
},
"terms": {
SchemaProps: spec.SchemaProps{
Description: "Term facets",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.TermFacet"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.TermFacet"},
}
}
func schema_pkg_apis_dashboard_v0alpha1_LibraryPanel(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@ -607,6 +743,222 @@ func schema_pkg_apis_dashboard_v0alpha1_LibraryPanelStatus(ref common.ReferenceC
}
}
func schema_pkg_apis_dashboard_v0alpha1_SearchResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"offset": {
SchemaProps: spec.SchemaProps{
Description: "Where the query started from",
Type: []string{"integer"},
Format: "int64",
},
},
"totalHits": {
SchemaProps: spec.SchemaProps{
Description: "The number of matching results",
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"hits": {
SchemaProps: spec.SchemaProps{
Description: "The dashboard body (unstructured for now)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardHit"),
},
},
},
},
},
"queryCost": {
SchemaProps: spec.SchemaProps{
Description: "Cost of running the query",
Type: []string{"number"},
Format: "double",
},
},
"maxScore": {
SchemaProps: spec.SchemaProps{
Description: "Max score",
Type: []string{"number"},
Format: "double",
},
},
"sortBy": {
SchemaProps: spec.SchemaProps{
Description: "How are the results sorted",
Ref: ref("github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortBy"),
},
},
"facets": {
SchemaProps: spec.SchemaProps{
Description: "Facet results",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.FacetResult"),
},
},
},
},
},
},
Required: []string{"totalHits", "hits"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.DashboardHit", "github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.FacetResult", "github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortBy"},
}
}
func schema_pkg_apis_dashboard_v0alpha1_SortBy(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"field": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"desc": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"field"},
},
},
}
}
func schema_pkg_apis_dashboard_v0alpha1_SortableField(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"string": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"display": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_dashboard_v0alpha1_SortableFields(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"fields": {
SchemaProps: spec.SchemaProps{
Description: "Sortable fields (depends on backend support)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortableField"),
},
},
},
},
},
},
Required: []string{"fields"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortableField"},
}
}
func schema_pkg_apis_dashboard_v0alpha1_TermFacet(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"term": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"count": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_dashboard_v0alpha1_VersionsQueryOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{

@ -1 +1,7 @@
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,DashboardHit,Tags
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,FacetResult,Terms
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,LibraryPanelStatus,Warnings
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,SearchResults,Hits
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,SortableFields,Fields
API rule violation: names_match,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,SortBy,Descending
API rule violation: names_match,github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1,SortableField,Field

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.1
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: rendererv2.proto

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.1
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: sanitizer.proto

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.1
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: secretsmanager.proto

@ -14,7 +14,6 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/peakq"
"github.com/grafana/grafana/pkg/registry/apis/query"
"github.com/grafana/grafana/pkg/registry/apis/scope"
"github.com/grafana/grafana/pkg/registry/apis/search"
"github.com/grafana/grafana/pkg/registry/apis/userstorage"
)
@ -36,7 +35,6 @@ func ProvideRegistryServiceSink(
_ *scope.ScopeAPIBuilder,
_ *query.QueryAPIBuilder,
_ *notifications.NotificationsAPIBuilder,
_ *search.SearchAPIBuilder,
_ *userstorage.UserStorageAPIBuilder,
) *Service {
return &Service{}

@ -246,7 +246,7 @@ func (a *dashboardSqlAccess) Read(ctx context.Context, req *resource.ReadRequest
return a.ReadResource(ctx, req), nil
}
func (a *dashboardSqlAccess) Search(ctx context.Context, req *resource.SearchRequest) (*resource.SearchResponse, error) {
func (a *dashboardSqlAccess) Search(ctx context.Context, req *resource.ResourceSearchRequest) (*resource.ResourceSearchResponse, error) {
return nil, fmt.Errorf("not yet (filter)")
}

@ -27,7 +27,6 @@ type DashboardStorage struct {
func (s *DashboardStorage) NewStore(scheme *runtime.Scheme, defaultOptsGetter generic.RESTOptionsGetter, reg prometheus.Registerer) (grafanarest.LegacyStorage, error) {
server, err := resource.NewResourceServer(resource.ResourceServerOptions{
Backend: s.Access,
Index: s.Access,
Reg: reg,
// WriteAccess: resource.WriteAccessHooks{
// Folder: func(ctx context.Context, user identity.Requester, uid string) bool {

@ -1,122 +1,332 @@
package dashboard
import (
"context"
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/rest"
"go.opentelemetry.io/otel/trace"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
"github.com/grafana/grafana/pkg/apimachinery/identity"
dashboardv0alpha1 "github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/apiserver/builder"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/util/errhttp"
)
// The DTO returns everything the UI needs in a single request
type SearchConnector struct {
newFunc func() runtime.Object
client resource.ResourceIndexClient
log log.Logger
type SearchHandler struct {
log log.Logger
client resource.ResourceIndexClient
tracer trace.Tracer
}
func NewSearchConnector(
client resource.ResourceIndexClient,
newFunc func() runtime.Object,
) (rest.Storage, error) {
v := &SearchConnector{
client: client,
newFunc: newFunc,
log: log.New("grafana-apiserver.dashboards.search"),
func NewSearchHandler(client resource.ResourceIndexClient, tracer trace.Tracer) *SearchHandler {
return &SearchHandler{
client: client,
log: log.New("grafana-apiserver.dashboards.search"),
tracer: tracer,
}
return v, nil
}
var (
_ rest.Connecter = (*SearchConnector)(nil)
_ rest.StorageMetadata = (*SearchConnector)(nil)
_ rest.Scoper = (*SearchConnector)(nil)
_ rest.SingularNameProvider = (*SearchConnector)(nil)
)
func (s *SearchHandler) GetAPIRoutes(defs map[string]common.OpenAPIDefinition) *builder.APIRoutes {
searchResults := defs["github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SearchResults"].Schema
sortableFields := defs["github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1.SortableFields"].Schema
func (s *SearchConnector) New() runtime.Object {
return s.newFunc()
return &builder.APIRoutes{
Namespace: []builder.APIRouteHandler{
{
Path: "search",
Spec: &spec3.PathProps{
Get: &spec3.Operation{
OperationProps: spec3.OperationProps{
Tags: []string{"Search"},
Description: "Dashboard search",
Parameters: []*spec3.Parameter{
{
ParameterProps: spec3.ParameterProps{
Name: "namespace",
In: "path",
Required: true,
Example: "default",
Description: "workspace",
Schema: spec.StringProperty(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "query",
In: "query",
Description: "user query string",
Required: false,
Schema: spec.StringProperty(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "folder",
In: "query",
Description: "search/list within a folder (not recursive)",
Required: false,
Schema: spec.StringProperty(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "sort",
In: "query",
Description: "sortable field",
Example: "", // not sorted
Examples: map[string]*spec3.Example{
"": {
ExampleProps: spec3.ExampleProps{
Summary: "default sorting",
Value: "",
},
},
"title": {
ExampleProps: spec3.ExampleProps{
Summary: "title ascending",
Value: "title",
},
},
"-title": {
ExampleProps: spec3.ExampleProps{
Summary: "title descending",
Value: "-title",
},
},
},
Required: false,
Schema: spec.StringProperty(),
},
},
},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: map[int]*spec3.Response{
200: {
ResponseProps: spec3.ResponseProps{
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &searchResults,
},
},
},
},
},
},
},
},
},
},
},
Handler: s.DoSearch,
},
{
Path: "search/sortable",
Spec: &spec3.PathProps{
Get: &spec3.Operation{
OperationProps: spec3.OperationProps{
Tags: []string{"Search"},
Description: "Get sortable fields",
Parameters: []*spec3.Parameter{
{
ParameterProps: spec3.ParameterProps{
Name: "namespace",
In: "path",
Required: true,
Example: "default",
Description: "workspace",
Schema: spec.StringProperty(),
},
},
},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
StatusCodeResponses: map[int]*spec3.Response{
200: {
ResponseProps: spec3.ResponseProps{
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &sortableFields,
},
},
},
},
},
},
},
},
},
},
},
Handler: s.DoSortable,
},
},
}
}
func (s *SearchConnector) Destroy() {
func (s *SearchHandler) DoSortable(w http.ResponseWriter, r *http.Request) {
sortable := &dashboardv0alpha1.SortableFields{
TypeMeta: v1.TypeMeta{
APIVersion: dashboardv0alpha1.APIVERSION,
Kind: "SortableFields",
},
Fields: []dashboardv0alpha1.SortableField{
{Field: "title", Display: "Title (A-Z)", Type: "string"},
{Field: "-title", Display: "Title (Z-A)", Type: "string"},
},
}
s.write(w, sortable)
}
func (s *SearchConnector) NamespaceScoped() bool {
return true // namespace == org
}
func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
ctx, span := s.tracer.Start(r.Context(), "dashboard.search")
defer span.End()
func (s *SearchConnector) GetSingularName() string {
return "Search"
}
user, err := identity.GetRequester(ctx)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
func (s *SearchConnector) ConnectMethods() []string {
return []string{"GET"}
}
queryParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
func (s *SearchConnector) NewConnectOptions() (runtime.Object, bool, string) {
return nil, false, ""
}
// get limit and offset from query params
limit := 50
offset := 0
if queryParams.Has("limit") {
limit, _ = strconv.Atoi(queryParams.Get("limit"))
}
if queryParams.Has("offset") {
offset, _ = strconv.Atoi(queryParams.Get("offset"))
}
func (s *SearchConnector) ProducesMIMETypes(verb string) []string {
return nil
}
searchRequest := &resource.ResourceSearchRequest{
Options: &resource.ListOptions{
Key: &resource.ResourceKey{
Namespace: user.GetNamespace(),
Group: dashboardv0alpha1.GROUP,
Resource: "dashboards",
},
},
Query: queryParams.Get("query"),
Limit: int64(limit),
Offset: int64(offset),
Fields: []string{
"title",
"folder",
"tags",
},
}
func (s *SearchConnector) ProducesObject(verb string) interface{} {
return s.newFunc()
}
// Add the folder constraint. Note this does not do recursive search
folder := queryParams.Get("folder")
if folder != "" {
searchRequest.Options.Fields = []*resource.Requirement{{
Key: "folder",
Operator: "=",
Values: []string{folder},
}}
}
func (s *SearchConnector) Connect(ctx context.Context, name string, opts runtime.Object, responder rest.Responder) (http.Handler, error) {
user, err := identity.GetRequester(ctx)
if err != nil {
return nil, err
// Add sorting
if queryParams.Has("sort") {
for _, sort := range queryParams["sort"] {
s := &resource.ResourceSearchRequest_Sort{Field: sort}
if strings.HasPrefix(sort, "-") {
s.Desc = true
s.Field = s.Field[1:]
}
searchRequest.SortBy = append(searchRequest.SortBy, s)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
queryParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
responder.Error(err)
return
// Also query folders
if searchRequest.Query != "" {
searchRequest.Federated = []*resource.ResourceKey{{
Namespace: searchRequest.Options.Key.Namespace,
Group: "folder.grafana.app",
Resource: "folders",
}}
}
// The facet term fields
facets, ok := queryParams["facet"]
if ok {
searchRequest.Facet = make(map[string]*resource.ResourceSearchRequest_Facet)
for _, v := range facets {
searchRequest.Facet[v] = &resource.ResourceSearchRequest_Facet{
Field: v,
Limit: 50,
}
}
}
// Run the query
result, err := s.client.Search(ctx, searchRequest)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
// get limit and offset from query params
limit := 0
offset := 0
if queryParams.Has("limit") {
limit, _ = strconv.Atoi(queryParams.Get("limit"))
sr := &dashboardv0alpha1.SearchResults{
Offset: searchRequest.Offset,
TotalHits: result.TotalHits,
QueryCost: result.QueryCost,
MaxScore: result.MaxScore,
Hits: make([]dashboardv0alpha1.DashboardHit, len(result.Results.Rows)),
}
for i, row := range result.Results.Rows {
hit := &dashboardv0alpha1.DashboardHit{
Kind: dashboardv0alpha1.HitTypeDash,
Name: row.Key.Name,
Title: string(row.Cells[0]),
Folder: string(row.Cells[1]),
}
if queryParams.Has("offset") {
offset, _ = strconv.Atoi(queryParams.Get("offset"))
if row.Cells[2] != nil {
_ = json.Unmarshal(row.Cells[2], &hit.Tags)
}
sr.Hits[i] = *hit
}
searchRequest := &resource.SearchRequest{
Tenant: user.GetNamespace(), //<< not necessary it is in the namespace (and user context)
Kind: strings.Split(queryParams.Get("kind"), ","),
QueryType: queryParams.Get("queryType"),
Query: queryParams.Get("query"),
Limit: int64(limit),
Offset: int64(offset),
// Add facet results
if result.Facet != nil {
sr.Facets = make(map[string]dashboardv0alpha1.FacetResult)
for k, v := range result.Facet {
sr.Facets[k] = dashboardv0alpha1.FacetResult{
Field: v.Field,
Total: v.Total,
Missing: v.Missing,
Terms: make([]dashboardv0alpha1.TermFacet, len(v.Terms)),
}
for j, t := range v.Terms {
sr.Facets[k].Terms[j] = dashboardv0alpha1.TermFacet{
Term: t.Term,
Count: t.Count,
}
}
}
}
// TODO... actually query
result, err := s.client.Search(r.Context(), searchRequest)
if err != nil {
responder.Error(err)
return
}
s.write(w, sr)
}
jj, err := json.Marshal(result)
if err != nil {
responder.Error(err)
return
}
_, _ = w.Write(jj)
}), nil
func (s *SearchHandler) write(w http.ResponseWriter, obj any) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(obj)
}

@ -11,6 +11,7 @@ import (
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
dashboardinternal "github.com/grafana/grafana/pkg/apis/dashboard"
dashboardv0alpha1 "github.com/grafana/grafana/pkg/apis/dashboard/v0alpha1"
@ -44,6 +45,7 @@ type DashboardsAPIBuilder struct {
accessControl accesscontrol.AccessControl
legacy *dashboard.DashboardStorage
search *dashboard.SearchHandler
unified resource.ResourceClient
log log.Logger
@ -74,6 +76,7 @@ func RegisterAPIService(cfg *setting.Cfg, features featuremgmt.FeatureToggles,
dashboardService: dashboardService,
accessControl: accessControl,
unified: unified,
search: dashboard.NewSearchHandler(unified, tracing),
legacy: &dashboard.DashboardStorage{
Resource: dashboardv0alpha1.DashboardResourceInfo,
@ -177,14 +180,6 @@ func (b *DashboardsAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver
return err
}
// Requires hack in to resolve with no name:
// pkg/services/apiserver/builder/helper.go#L58
storage["search"], err = dashboard.NewSearchConnector(b.unified,
func() runtime.Object { return &dashboardv0alpha1.DashboardWithAccessInfo{} }) // TODO... replace with a real model
if err != nil {
return err
}
// Expose read only library panels
storage[dashboardv0alpha1.LibraryPanelResourceInfo.StoragePath()] = &dashboard.LibraryPanelStore{
Access: b.legacy.Access,
@ -224,5 +219,6 @@ func (b *DashboardsAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.Op
}
func (b *DashboardsAPIBuilder) GetAPIRoutes() *builder.APIRoutes {
return nil // no custom API routes
defs := b.GetOpenAPIDefinitions()(func(path string) spec.Ref { return spec.Ref{} })
return b.search.GetAPIRoutes(defs)
}

@ -1,150 +0,0 @@
package search
import (
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapiserver "k8s.io/apiserver/pkg/server"
common "k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/spec3"
"github.com/grafana/grafana/pkg/api/response"
request2 "github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/services/apiserver/builder"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/storage/unified/resource"
)
var _ builder.APIGroupBuilder = (*SearchAPIBuilder)(nil)
type SearchAPIBuilder struct {
unified resource.ResourceClient
namespacer request2.NamespaceMapper
}
func NewSearchAPIBuilder(
unified resource.ResourceClient,
cfg *setting.Cfg,
) (*SearchAPIBuilder, error) {
return &SearchAPIBuilder{
unified: unified,
namespacer: request2.GetNamespaceMapper(cfg),
}, nil
}
func RegisterAPIService(
features featuremgmt.FeatureToggles,
apiregistration builder.APIRegistrar,
unified resource.ResourceClient,
cfg *setting.Cfg,
) (*SearchAPIBuilder, error) {
if !(features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageSearch) || features.IsEnabledGlobally(featuremgmt.FlagGrafanaAPIServerWithExperimentalAPIs)) {
return nil, nil
}
builder, err := NewSearchAPIBuilder(unified, cfg)
apiregistration.RegisterAPI(builder)
return builder, err
}
func (b *SearchAPIBuilder) GetGroupVersion() schema.GroupVersion {
return schema.GroupVersion{Group: "search.grafana.app", Version: "v0alpha1"}
}
func (b *SearchAPIBuilder) InstallSchema(scheme *runtime.Scheme) error {
return nil
}
func (b *SearchAPIBuilder) GetOpenAPIDefinitions() common.GetOpenAPIDefinitions {
return func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{}
}
}
func (b *SearchAPIBuilder) GetAPIRoutes() *builder.APIRoutes {
return &builder.APIRoutes{
Namespace: []builder.APIRouteHandler{
{
Path: "search",
Spec: &spec3.PathProps{
Get: &spec3.Operation{
OperationProps: spec3.OperationProps{
Tags: []string{"Search"},
Summary: "Search",
Description: "Search for resources",
},
},
},
Handler: func(w http.ResponseWriter, r *http.Request) {
// get tenant
orgId, err := request2.OrgIDForList(r.Context())
if err != nil {
response.Error(500, "failed to get orgId", err)
}
tenant := b.namespacer(orgId)
queryParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
response.Error(500, "failed to parse query params", err)
}
// get limit and offset from query params
limit := 0
offset := 0
if queryParams.Has("limit") {
limit, _ = strconv.Atoi(queryParams.Get("limit"))
}
if queryParams.Has("offset") {
offset, _ = strconv.Atoi(queryParams.Get("offset"))
}
searchRequest := &resource.SearchRequest{
Tenant: tenant,
Kind: strings.Split(queryParams.Get("kind"), ","),
QueryType: queryParams.Get("queryType"),
Query: queryParams.Get("query"),
Limit: int64(limit),
Offset: int64(offset),
}
res, err := b.unified.Search(r.Context(), searchRequest)
if err != nil {
response.Error(500, "search request failed", err)
}
// TODO need a nicer way of handling this
// the [][]byte response already contains the marshalled JSON, so we don't need to re-encode it
rawMessages := make([]json.RawMessage, len(res.GetItems()))
for i, item := range res.GetItems() {
rawMessages[i] = item.Value
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(rawMessages); err != nil {
response.Error(500, "failed to json encode raw response", err)
}
},
},
},
}
}
func (b *SearchAPIBuilder) GetAuthorizer() authorizer.Authorizer {
return nil
}
func (b *SearchAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.OpenAPI, error) {
return oas, nil
}
func (b *SearchAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, _ builder.APIGroupOptions) error {
apiGroupInfo.PrioritizedVersions = []schema.GroupVersion{b.GetGroupVersion()}
return nil
}

@ -16,7 +16,6 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/peakq"
"github.com/grafana/grafana/pkg/registry/apis/query"
"github.com/grafana/grafana/pkg/registry/apis/scope"
"github.com/grafana/grafana/pkg/registry/apis/search"
"github.com/grafana/grafana/pkg/registry/apis/service"
"github.com/grafana/grafana/pkg/registry/apis/userstorage"
"github.com/grafana/grafana/pkg/services/pluginsintegration/plugincontext"
@ -45,7 +44,5 @@ var WireSet = wire.NewSet(
query.RegisterAPIService,
scope.RegisterAPIService,
notifications.RegisterAPIService,
//sso.RegisterAPIService,
search.RegisterAPIService,
userstorage.RegisterAPIService,
)

@ -150,7 +150,6 @@ import (
"github.com/grafana/grafana/pkg/services/team/teamimpl"
tempuser "github.com/grafana/grafana/pkg/services/temp_user"
"github.com/grafana/grafana/pkg/services/temp_user/tempuserimpl"
"github.com/grafana/grafana/pkg/services/unifiedSearch"
"github.com/grafana/grafana/pkg/services/updatechecker"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/services/user/userimpl"
@ -234,8 +233,6 @@ var wireBasicSet = wire.NewSet(
search.ProvideService,
searchV2.ProvideService,
searchV2.ProvideSearchHTTPService,
unifiedSearch.ProvideService,
unifiedSearch.ProvideSearchHTTPService,
store.ProvideService,
store.ProvideSystemUsersService,
live.ProvideService,

@ -54,12 +54,6 @@ var PathRewriters = []filters.PathRewriter{
return matches[1] + "/name" // connector requires a name
},
},
{
Pattern: regexp.MustCompile(`(/apis/dashboard.grafana.app/v0alpha1/namespaces/.*/search$)`),
ReplaceFunc: func(matches []string) string {
return matches[1] + "/name" // connector requires a name
},
},
{
Pattern: regexp.MustCompile(`(/apis/.*/v0alpha1/namespaces/.*/queryconvert$)`),
ReplaceFunc: func(matches []string) string {

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.1
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: extention.proto

@ -7,11 +7,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"gopkg.in/ini.v1"
"github.com/grafana/grafana-azure-sdk-go/v2/azsettings"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/httpclient"
"github.com/stretchr/testify/require"
"gopkg.in/ini.v1"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/tracing"
@ -92,7 +93,7 @@ func TestIntegrationPluginManager(t *testing.T) {
ms := mssql.ProvideService(cfg)
db := db.InitTestDB(t, sqlstore.InitTestDBOpt{Cfg: cfg})
sv2 := searchV2.ProvideService(cfg, db, nil, nil, tracer, features, nil, nil, nil)
graf := grafanads.ProvideService(sv2, nil, nil, features)
graf := grafanads.ProvideService(sv2, nil, features)
pyroscope := pyroscope.ProvideService(hcp)
parca := parca.ProvideService(hcp)
zipkin := zipkin.ProvideService(hcp)

@ -1,73 +0,0 @@
package unifiedSearch
import (
"encoding/json"
"errors"
"io"
"net/http"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/middleware"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
)
type SearchHTTPService interface {
RegisterHTTPRoutes(storageRoute routing.RouteRegister)
}
type searchHTTPService struct {
search SearchService
}
func ProvideSearchHTTPService(search SearchService) SearchHTTPService {
return &searchHTTPService{search: search}
}
func (s *searchHTTPService) RegisterHTTPRoutes(storageRoute routing.RouteRegister) {
storageRoute.Post("/", middleware.ReqSignedIn, routing.Wrap(s.doQuery))
}
func (s *searchHTTPService) doQuery(c *contextmodel.ReqContext) response.Response {
searchReadinessCheckResp := s.search.IsReady(c.Req.Context(), c.SignedInUser.GetOrgID())
if !searchReadinessCheckResp.IsReady {
return response.JSON(http.StatusOK, &backend.DataResponse{
Frames: []*data.Frame{{
Name: "Loading",
}},
Error: nil,
})
}
body, err := io.ReadAll(c.Req.Body)
if err != nil {
return response.Error(http.StatusInternalServerError, "error reading bytes", err)
}
query := &Query{}
err = json.Unmarshal(body, query)
if err != nil {
return response.Error(http.StatusBadRequest, "error parsing body", err)
}
resp := s.search.doQuery(c.Req.Context(), c.SignedInUser, c.SignedInUser.GetOrgID(), *query)
if resp.Error != nil {
return response.Error(http.StatusInternalServerError, "error handling search request", resp.Error)
}
if len(resp.Frames) == 0 {
msg := "invalid search response"
return response.Error(http.StatusInternalServerError, msg, errors.New(msg))
}
bytes, err := resp.MarshalJSON()
if err != nil {
return response.Error(http.StatusInternalServerError, "error marshalling response", err)
}
return response.JSON(http.StatusOK, bytes)
}

@ -1,328 +0,0 @@
package unifiedSearch
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/store"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
)
type StandardSearchService struct {
registry.BackgroundService
cfg *setting.Cfg
sql db.DB
ac accesscontrol.Service
orgService org.Service
userService user.Service
logger log.Logger
reIndexCh chan struct{}
features featuremgmt.FeatureToggles
resourceClient resource.ResourceClient
}
func (s *StandardSearchService) IsReady(ctx context.Context, orgId int64) IsSearchReadyResponse {
return IsSearchReadyResponse{IsReady: true}
}
func ProvideService(cfg *setting.Cfg, sql db.DB, entityEventStore store.EntityEventsService,
ac accesscontrol.Service, tracer tracing.Tracer, features featuremgmt.FeatureToggles, orgService org.Service,
userService user.Service, folderStore folder.Store, resourceClient resource.ResourceClient) SearchService {
logger := log.New("searchV3")
s := &StandardSearchService{
cfg: cfg,
sql: sql,
ac: ac,
logger: logger,
reIndexCh: make(chan struct{}, 1),
orgService: orgService,
userService: userService,
features: features,
resourceClient: resourceClient,
}
return s
}
func (s *StandardSearchService) IsDisabled() bool {
return !s.features.IsEnabledGlobally(featuremgmt.FlagPanelTitleSearch)
}
func (s *StandardSearchService) Run(ctx context.Context) error {
// TODO: implement this? ( copied from pkg/services/searchV2/service.go )
// orgQuery := &org.SearchOrgsQuery{}
// result, err := s.orgService.Search(ctx, orgQuery)
// if err != nil {
// return fmt.Errorf("can't get org list: %w", err)
// }
// orgIDs := make([]int64, 0, len(result))
// for _, org := range result {
// orgIDs = append(orgIDs, org.ID)
// }
// TODO: do we need to initialize the bleve index again ( should be initialized on startup )?
// return s.dashboardIndex.run(ctx, orgIDs, s.reIndexCh)
return nil
}
func (s *StandardSearchService) TriggerReIndex() {
select {
case s.reIndexCh <- struct{}{}:
default:
// channel is full => re-index will happen soon anyway.
}
}
func (s *StandardSearchService) getUser(ctx context.Context, backendUser *backend.User, orgId int64) (*user.SignedInUser, error) {
// TODO: get user & user's permissions from the request context
var usr *user.SignedInUser
if s.cfg.AnonymousEnabled && backendUser.Email == "" && backendUser.Login == "" {
getOrg := org.GetOrgByNameQuery{Name: s.cfg.AnonymousOrgName}
orga, err := s.orgService.GetByName(ctx, &getOrg)
if err != nil {
s.logger.Error("Anonymous access organization error.", "org_name", s.cfg.AnonymousOrgName, "error", err)
return nil, err
}
usr = &user.SignedInUser{
OrgID: orga.ID,
OrgName: orga.Name,
OrgRole: org.RoleType(s.cfg.AnonymousOrgRole),
IsAnonymous: true,
}
} else {
getSignedInUserQuery := &user.GetSignedInUserQuery{
Login: backendUser.Login,
Email: backendUser.Email,
OrgID: orgId,
}
var err error
usr, err = s.userService.GetSignedInUser(ctx, getSignedInUserQuery)
if err != nil {
s.logger.Error("Error while retrieving user", "error", err, "email", backendUser.Email, "login", getSignedInUserQuery.Login)
return nil, errors.New("auth error")
}
if usr == nil {
s.logger.Error("No user found", "email", backendUser.Email)
return nil, errors.New("auth error")
}
}
if usr.Permissions == nil {
usr.Permissions = make(map[int64]map[string][]string)
}
if _, ok := usr.Permissions[orgId]; ok {
// permissions as part of the `s.sql.GetSignedInUser` query - return early
return usr, nil
}
// TODO: ensure this is cached
permissions, err := s.ac.GetUserPermissions(ctx, usr,
accesscontrol.Options{ReloadCache: false})
if err != nil {
s.logger.Error("Failed to retrieve user permissions", "error", err, "email", backendUser.Email)
return nil, errors.New("auth error")
}
usr.Permissions[orgId] = accesscontrol.GroupScopesByActionContext(ctx, permissions)
return usr, nil
}
func (s *StandardSearchService) DoQuery(ctx context.Context, user *backend.User, orgID int64, q Query) *backend.DataResponse {
signedInUser, err := s.getUser(ctx, user, orgID)
if err != nil {
return &backend.DataResponse{Error: err}
}
return s.doQuery(ctx, signedInUser, orgID, q)
}
func (s *StandardSearchService) doQuery(ctx context.Context, signedInUser *user.SignedInUser, orgID int64, q Query) *backend.DataResponse {
return s.doSearchQuery(ctx, q, s.cfg.AppSubURL, orgID)
}
func (s *StandardSearchService) doSearchQuery(ctx context.Context, qry Query, _ string, orgID int64) *backend.DataResponse {
response := &backend.DataResponse{}
// will use stack id for cloud and org id for on-prem
tenantId := request.GetNamespaceMapper(s.cfg)(orgID)
req := newSearchRequest(tenantId, qry)
res, err := s.resourceClient.Search(ctx, req)
if err != nil {
return s.error(err, "Failed to search resources", response)
}
frame, err := loadSearchResponse(res, s)
if err != nil {
return s.error(err, "Failed to load search response", response)
}
response.Frames = append(response.Frames, frame)
if len(res.Groups) > 0 {
tagsFrame := loadTagsResponse(res)
response.Frames = append(response.Frames, tagsFrame)
}
return response
}
func (s *StandardSearchService) error(err error, message string, response *backend.DataResponse) *backend.DataResponse {
s.logger.Error(message, "error", err)
response.Error = err
return response
}
func loadSearchResponse(res *resource.SearchResponse, s *StandardSearchService) (*data.Frame, error) {
frame := newSearchFrame(res)
for _, r := range res.Items {
doc, err := getDoc(r.Value)
if err != nil {
s.logger.Error("Failed to parse doc", "error", err)
return nil, err
}
kind := strings.ToLower(doc.Kind)
link := dashboardPageItemLink(doc, s.cfg.AppSubURL)
frame.AppendRow(kind, doc.UID, doc.Spec.Title, link, doc.Spec.Tags, doc.FolderID)
}
return frame, nil
}
func loadTagsResponse(res *resource.SearchResponse) *data.Frame {
tagsFrame := newTagsFrame()
for _, grp := range res.Groups {
tagsFrame.AppendRow(grp.Name, grp.Count)
}
return tagsFrame
}
func newSearchFrame(res *resource.SearchResponse) *data.Frame {
fUID := newField("uid", data.FieldTypeString)
fKind := newField("kind", data.FieldTypeString)
fName := newField("name", data.FieldTypeString)
fLocation := newField("location", data.FieldTypeString)
fTags := newField("tags", data.FieldTypeNullableJSON)
fURL := newField("url", data.FieldTypeString)
fURL.Config = &data.FieldConfig{
Links: []data.DataLink{
{Title: "link", URL: "${__value.text}"},
},
}
frame := data.NewFrame("Query results", fKind, fUID, fName, fURL, fTags, fLocation)
frame.SetMeta(&data.FrameMeta{
Type: "search-results",
Custom: &customMeta{
Count: uint64(len(res.Items)),
},
})
return frame
}
func newTagsFrame() *data.Frame {
fTag := newField("tag", data.FieldTypeString)
fCount := newField("count", data.FieldTypeInt64)
return data.NewFrame("tags", fTag, fCount)
}
func dashboardPageItemLink(doc *DashboardListDoc, subURL string) string {
if doc.FolderID == "" {
return fmt.Sprintf("%s/d/%s/%s", subURL, doc.Name, doc.Namespace)
}
return fmt.Sprintf("%s/dashboards/f/%s/%s", subURL, doc.Name, doc.Namespace)
}
type customMeta struct {
Count uint64 `json:"count"`
MaxScore float64 `json:"max_score,omitempty"`
SortBy string `json:"sortBy,omitempty"`
}
type DashboardListDoc struct {
UID string `json:"Uid"`
Group string `json:"Group"`
Namespace string `json:"Namespace"`
Kind string `json:"Kind"`
Name string `json:"Name"`
CreatedAt time.Time `json:"CreatedAt"`
CreatedBy string `json:"CreatedBy"`
UpdatedAt time.Time `json:"UpdatedAt"`
UpdatedBy string `json:"UpdatedBy"`
FolderID string `json:"FolderId"`
Spec struct {
Title string `json:"title"`
Tags *json.RawMessage `json:"tags"`
} `json:"Spec"`
}
func getDoc(data []byte) (*DashboardListDoc, error) {
res := &DashboardListDoc{}
err := json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
func newSearchRequest(tenant string, qry Query) *resource.SearchRequest {
groupBy := make([]*resource.GroupBy, len(qry.Facet))
for _, g := range qry.Facet {
groupBy = append(groupBy, &resource.GroupBy{Name: g.Field, Limit: int64(g.Limit)})
}
return &resource.SearchRequest{
Tenant: tenant,
Query: qry.Query,
Limit: int64(qry.Limit),
Offset: int64(qry.From),
Kind: qry.Kind,
SortBy: []string{sortField(qry.Sort)},
GroupBy: groupBy,
Filters: qry.Tags,
}
}
const (
sortSuffix = "_sort"
descending = "-"
)
func sortField(sort string) string {
sf := strings.TrimSuffix(sort, sortSuffix)
if !strings.HasPrefix(sf, descending) {
return dashboardListFieldMapping[sf]
}
sf = strings.TrimPrefix(sf, descending)
sf = dashboardListFieldMapping[sf]
return descending + sf
}
// mapping of dashboard list fields to search doc fields
var dashboardListFieldMapping = map[string]string{
"name": "title",
}
func newField(name string, typ data.FieldType) *data.Field {
f := data.NewFieldFromFieldType(typ, 0)
f.Name = name
return f
}

@ -1,49 +0,0 @@
package unifiedSearch
import (
"context"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/user"
)
type FacetField struct {
Field string `json:"field"`
Limit int `json:"limit,omitempty"` // explicit page size
}
type Query struct {
Query string `json:"query"`
Location string `json:"location,omitempty"` // parent folder ID
Sort string `json:"sort,omitempty"` // field ASC/DESC
Datasource string `json:"ds_uid,omitempty"` // "datasource" collides with the JSON value at the same level :()
DatasourceType string `json:"ds_type,omitempty"`
Tags []string `json:"tags,omitempty"`
Kind []string `json:"kind,omitempty"`
PanelType string `json:"panel_type,omitempty"`
UIDs []string `json:"uid,omitempty"`
Explain bool `json:"explain,omitempty"` // adds details on why document matched
WithAllowedActions bool `json:"withAllowedActions,omitempty"` // adds allowed actions per entity
Facet []FacetField `json:"facet,omitempty"`
SkipLocation bool `json:"skipLocation,omitempty"`
HasPreview string `json:"hasPreview,omitempty"` // the light|dark theme
Limit int `json:"limit,omitempty"` // explicit page size
From int `json:"from,omitempty"` // for paging
}
type IsSearchReadyResponse struct {
IsReady bool
Reason string // initial-indexing-ongoing, org-indexing-ongoing
}
type SearchService interface {
registry.CanBeDisabled
registry.BackgroundService
DoQuery(ctx context.Context, user *backend.User, orgId int64, query Query) *backend.DataResponse
doQuery(ctx context.Context, user *user.SignedInUser, orgId int64, query Query) *backend.DataResponse
IsReady(ctx context.Context, orgId int64) IsSearchReadyResponse
// RegisterDashboardIndexExtender(ext DashboardIndexExtender)
TriggerReIndex()
}

@ -17,151 +17,3 @@ require (
google.golang.org/protobuf v1.34.2
k8s.io/apimachinery v0.31.1
)
require (
github.com/RoaringBitmap/roaring v1.9.3 // indirect
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/blevesearch/bleve_index_api v1.1.10 // indirect
github.com/blevesearch/geo v0.1.20 // indirect
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
github.com/blevesearch/gtreap v0.1.1 // indirect
github.com/blevesearch/mmap-go v1.0.4 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.2.15 // indirect
github.com/blevesearch/segment v0.9.1 // indirect
github.com/blevesearch/snowballstem v0.9.0 // indirect
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
github.com/blevesearch/vellum v1.0.10 // indirect
github.com/blevesearch/zapx/v11 v11.3.10 // indirect
github.com/blevesearch/zapx/v12 v12.3.10 // indirect
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/mschoch/smat v0.2.0 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
)
require (
github.com/RoaringBitmap/roaring v1.9.3 // indirect
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/blevesearch/bleve_index_api v1.1.10 // indirect
github.com/blevesearch/geo v0.1.20 // indirect
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
github.com/blevesearch/gtreap v0.1.1 // indirect
github.com/blevesearch/mmap-go v1.0.4 // indirect
github.com/blevesearch/scorch_segment_api/v2 v2.2.15 // indirect
github.com/blevesearch/segment v0.9.1 // indirect
github.com/blevesearch/snowballstem v0.9.0 // indirect
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
github.com/blevesearch/vellum v1.0.10 // indirect
github.com/blevesearch/zapx/v11 v11.3.10 // indirect
github.com/blevesearch/zapx/v12 v12.3.10 // indirect
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/mschoch/smat v0.2.0 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
)
require (
cloud.google.com/go v0.115.0 // indirect
cloud.google.com/go/auth v0.8.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect
cloud.google.com/go/iam v1.1.13 // indirect
cloud.google.com/go/storage v1.43.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.27 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
github.com/aws/smithy-go v1.20.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blevesearch/bleve/v2 v2.4.2
github.com/bufbuild/protocompile v0.4.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/google/wire v0.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/google/uuid v1.6.0
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/jhump/protoreflect v1.15.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.29.0 // indirect
go.opentelemetry.io/otel/metric v1.29.0 // indirect
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
golang.org/x/crypto v0.27.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.6.0 // indirect
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
google.golang.org/api v0.191.0 // indirect
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiserver v0.31.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)

@ -1,578 +0,0 @@
package resource
import (
"context"
golog "log"
"path/filepath"
reflect "reflect"
"strings"
"sync"
"time"
"github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/search"
"github.com/google/uuid"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
const tracingPrexfixIndex = "unified_storage.index."
const specFieldPrefix = "Spec."
const descendingPrefix = "-"
type Shard struct {
index bleve.Index
path string
batch *bleve.Batch
}
type Opts struct {
Workers int // This controls how many goroutines are used to index objects
BatchSize int // This is the batch size for how many objects to add to the index at once
ListLimit int // This is how big the List page size is. If the response size is too large, the number of items will be limited by the server.
IndexDir string // The directory where the indexes for each tenant are stored
}
type Index struct {
shardMutex sync.RWMutex
shards map[string]*Shard
opts Opts
s *server
log log.Logger
tracer tracing.Tracer
}
func NewIndex(s *server, opts Opts, tracer tracing.Tracer) *Index {
return &Index{
shardMutex: sync.RWMutex{},
s: s,
opts: opts,
shards: make(map[string]*Shard),
log: log.New("unifiedstorage.search.index"),
tracer: tracer,
}
}
// IndexBatches goes through all the shards and indexes their batches if they are large enough
func (i *Index) IndexBatches(ctx context.Context, maxSize int, tenants []string) error {
_, span := i.tracer.Start(ctx, tracingPrexfixIndex+"IndexBatches")
defer span.End()
group := errgroup.Group{}
group.SetLimit(i.opts.Workers)
totalBatchesIndexed := 0
for _, tenant := range tenants {
shard, err := i.getShard(tenant)
if err != nil {
return err
}
// Index the batch if it is large enough
if shard.batch.Size() >= maxSize {
totalBatchesIndexed++
group.Go(func() error {
i.log.Debug("indexing batch for shard", "tenant", tenant, "size", shard.batch.Size())
err = shard.index.Batch(shard.batch)
if err != nil {
return err
}
shard.batch.Reset()
return nil
})
}
}
err := group.Wait()
if err != nil {
return err
}
span.AddEvent("batches indexed", trace.WithAttributes(attribute.Int("batches_indexed", totalBatchesIndexed)))
return nil
}
// AddToBatches adds resources to their respective shard's batch
// returns a list of tenants that have changes
func (i *Index) AddToBatches(ctx context.Context, list *ListResponse) ([]string, error) {
_, span := i.tracer.Start(ctx, tracingPrexfixIndex+"AddToBatches")
defer span.End()
tenantsWithChanges := make(map[string]bool)
for _, obj := range list.Items {
// Transform the raw resource into a more generic indexable resource
res, err := NewIndexedResource(obj.Value)
if err != nil {
return nil, err
}
shard, err := i.getShard(res.Namespace)
if err != nil {
return nil, err
}
i.log.Debug("indexing resource in batch", "batch_count", len(list.Items), "kind", res.Kind, "tenant", res.Namespace)
err = shard.batch.Index(res.Uid, res)
if err != nil {
return nil, err
}
if _, ok := tenantsWithChanges[res.Namespace]; !ok {
tenantsWithChanges[res.Namespace] = true
}
}
tenants := make([]string, 0, len(tenantsWithChanges))
for tenant := range tenantsWithChanges {
tenants = append(tenants, tenant)
}
return tenants, nil
}
func (i *Index) Init(ctx context.Context) error {
logger := i.log.FromContext(ctx)
ctx, span := i.tracer.Start(ctx, tracingPrexfixIndex+"Init")
defer span.End()
start := time.Now().Unix()
group := errgroup.Group{}
group.SetLimit(i.opts.Workers)
totalObjects := 0
// Get all tenants currently in Unified Storage
tenants, err := i.s.backend.Namespaces(ctx)
if err != nil {
return err
}
for _, tenant := range tenants {
group.Go(func() error {
logger.Info("initializing index for tenant", "tenant", tenant)
objs, err := i.InitForTenant(ctx, tenant)
if err != nil {
return err
}
totalObjects += objs
return nil
})
}
err = group.Wait()
if err != nil {
return err
}
//index all remaining batches for all tenants
logger.Info("indexing remaining batches", "shards", len(i.shards))
err = i.IndexBatches(ctx, 1, i.allTenants())
if err != nil {
return err
}
end := time.Now().Unix()
totalDocCount := getTotalDocCount(i)
logger.Info("Initial indexing finished", "seconds", float64(end-start), "objs_fetched", totalObjects, "objs_indexed", totalDocCount)
span.AddEvent(
"indexing finished",
trace.WithAttributes(attribute.Int64("objects_indexed", int64(totalDocCount))),
trace.WithAttributes(attribute.Int64("objects_fetched", int64(totalObjects))),
)
if IndexServerMetrics != nil {
IndexServerMetrics.IndexCreationTime.WithLabelValues().Observe(float64(end - start))
}
return nil
}
func (i *Index) InitForTenant(ctx context.Context, namespace string) (int, error) {
ctx, span := i.tracer.Start(ctx, tracingPrexfixIndex+"InitForTenant")
defer span.End()
logger := i.log.FromContext(ctx)
resourceTypes := fetchResourceTypes()
totalObjectsFetched := 0
for _, rt := range resourceTypes {
logger.Debug("indexing resource", "kind", rt.Kind, "list_limit", i.opts.ListLimit, "batch_size", i.opts.BatchSize, "workers", i.opts.Workers, "namespace", namespace)
r := &ListRequest{Options: rt.ListOptions, Limit: int64(i.opts.ListLimit)}
r.Options.Key.Namespace = namespace // scope the list to a tenant or this will take forever when US has 1M+ resources
// Paginate through the list of resources and index each page
for {
logger.Debug("fetching resource list", "kind", rt.Kind, "namespace", namespace)
list, err := i.s.List(ctx, r)
if err != nil {
return totalObjectsFetched, err
}
// Record the number of objects indexed for the kind
IndexServerMetrics.IndexedKinds.WithLabelValues(rt.Kind).Add(float64(len(list.Items)))
totalObjectsFetched += len(list.Items)
logger.Debug("indexing batch", "kind", rt.Kind, "count", len(list.Items), "namespace", namespace)
//add changes to batches for shards with changes in the List
err = i.writeBatch(ctx, list)
if err != nil {
return totalObjectsFetched, err
}
if list.NextPageToken == "" {
break
}
r.NextPageToken = list.NextPageToken
}
}
span.AddEvent(
"indexing finished for tenant",
trace.WithAttributes(attribute.Int64("objects_indexed", int64(totalObjectsFetched))),
trace.WithAttributes(attribute.String("tenant", namespace)),
)
return totalObjectsFetched, nil
}
func (i *Index) writeBatch(ctx context.Context, list *ListResponse) error {
tenants, err := i.AddToBatches(ctx, list)
if err != nil {
return err
}
// Index the batches for tenants with changes if the batch is large enough
err = i.IndexBatches(ctx, i.opts.BatchSize, tenants)
if err != nil {
return err
}
return nil
}
func (i *Index) Index(ctx context.Context, data *Data) error {
ctx, span := i.tracer.Start(ctx, tracingPrexfixIndex+"Index")
defer span.End()
logger := i.log.FromContext(ctx)
// Transform the raw resource into a more generic indexable resource
res, err := NewIndexedResource(data.Value.Value)
if err != nil {
return err
}
tenant := res.Namespace
logger.Debug("indexing resource for tenant", "res", string(data.Value.Value), "tenant", tenant)
// if tenant doesn't exist, they may have been created during initial indexing
_, ok := i.shards[tenant]
if !ok {
i.log.Info("tenant not found, initializing their index", "tenant", tenant)
_, err = i.InitForTenant(ctx, tenant)
if err != nil {
return err
}
}
shard, err := i.getShard(tenant)
if err != nil {
return err
}
err = shard.index.Index(res.Uid, res)
if err != nil {
return err
}
//record the kind of resource that was indexed
IndexServerMetrics.IndexedKinds.WithLabelValues(res.Kind).Inc()
// record latency from when event was created to when it was indexed
latencySeconds := float64(time.Now().UnixMicro()-data.Value.ResourceVersion) / 1e6
if latencySeconds > 5 {
logger.Warn("high index latency", "latency", latencySeconds)
}
if IndexServerMetrics != nil {
IndexServerMetrics.IndexLatency.WithLabelValues(data.Key.Resource).Observe(latencySeconds)
}
return nil
}
func (i *Index) Delete(ctx context.Context, uid string, key *ResourceKey) error {
_, span := i.tracer.Start(ctx, tracingPrexfixIndex+"Delete")
defer span.End()
shard, err := i.getShard(key.Namespace)
if err != nil {
return err
}
err = shard.index.Delete(uid)
if err != nil {
return err
}
IndexServerMetrics.IndexedKinds.WithLabelValues(key.Resource).Dec()
return nil
}
func (i *Index) Search(ctx context.Context, request *SearchRequest) (*IndexResults, error) {
ctx, span := i.tracer.Start(ctx, tracingPrexfixIndex+"Search")
defer span.End()
logger := i.log.FromContext(ctx)
if request.Tenant == "" {
request.Tenant = "default"
}
shard, err := i.getShard(request.Tenant)
if err != nil {
return nil, err
}
docCount, err := shard.index.DocCount()
if err != nil {
return nil, err
}
logger.Info("got index for tenant", "tenant", request.Tenant, "docCount", docCount)
fields, _ := shard.index.Fields()
logger.Debug("indexed fields", "fields", fields)
// use 10 as a default limit for now
if request.Limit <= 0 {
request.Limit = 10
}
textQuery := bleve.NewQueryStringQuery(request.Query)
query := bleve.NewConjunctionQuery(textQuery)
if len(request.Kind) > 0 {
// apply OR condition filter for each kind ( dashboard, folder, etc )
orQuery := bleve.NewDisjunctionQuery()
for _, term := range request.Kind {
termQuery := bleve.NewTermQuery(term)
orQuery.AddQuery(termQuery)
}
query.AddQuery(orQuery)
}
if len(request.Filters) > 0 {
orQuery := bleve.NewDisjunctionQuery()
for _, filter := range request.Filters {
matchQuery := bleve.NewMatchQuery(filter)
orQuery.AddQuery(matchQuery)
}
query.AddQuery(orQuery)
}
req := bleve.NewSearchRequest(query)
if len(request.SortBy) > 0 {
sorting := getSortFields(request)
req.SortBy(sorting)
}
for _, group := range request.GroupBy {
facet := bleve.NewFacetRequest(specFieldPrefix+group.Name, int(group.Limit))
req.AddFacet(group.Name+"_facet", facet)
}
req.From = int(request.Offset)
req.Size = int(request.Limit)
req.Fields = []string{"*"} // return all indexed fields in search results
logger.Info("searching index", "query", request.Query, "tenant", request.Tenant)
res, err := shard.index.Search(req)
if err != nil {
return nil, err
}
hits := res.Hits
logger.Info("got search results", "hits", hits)
results := make([]IndexedResource, len(hits))
for resKey, hit := range hits {
ir := IndexedResource{}.FromSearchHit(hit)
results[resKey] = ir
}
groups := []*Group{}
for _, group := range request.GroupBy {
groupByFacet := res.Facets[group.Name+"_facet"]
terms := getTermFacets(groupByFacet.Terms)
for _, term := range terms {
groups = append(groups, &Group{Name: term.Term, Count: int64(term.Count)})
}
}
return &IndexResults{Values: results, Groups: groups}, nil
}
// Count returns the total doc count
func (i *Index) Count() (int, error) {
total := 0
for _, shard := range i.shards {
count, err := shard.index.DocCount()
if err != nil {
i.log.Error("failed to get doc count", "error", err)
}
total += int(count)
}
return total, nil
}
// allTenants returns a list of all tenants in the index
func (i *Index) allTenants() []string {
tenants := make([]string, 0, len(i.shards))
for tenant := range i.shards {
tenants = append(tenants, tenant)
}
return tenants
}
func (i *Index) getShard(tenant string) (*Shard, error) {
i.shardMutex.Lock()
defer i.shardMutex.Unlock()
shard, ok := i.shards[tenant]
if ok {
return shard, nil
}
index, path, err := i.createIndex()
if err != nil {
return &Shard{}, err
}
shard = &Shard{
index: index,
path: path,
batch: index.NewBatch(),
}
i.shards[tenant] = shard
return shard, nil
}
func (i *Index) createIndex() (bleve.Index, string, error) {
if i.opts.IndexDir == "" {
return createInMemoryIndex()
}
return createFileIndex(i.opts.IndexDir)
}
var mappings = createIndexMappings()
// less memory intensive alternative for larger indexes with less tenants (on-prem)
func createFileIndex(path string) (bleve.Index, string, error) {
indexPath := filepath.Join(path, uuid.New().String())
index, err := bleve.New(indexPath, mappings)
if err != nil {
golog.Fatalf("Failed to create index: %v", err)
}
return index, indexPath, err
}
// faster indexing when there are many tenants with smaller batches (cloud)
func createInMemoryIndex() (bleve.Index, string, error) {
index, err := bleve.NewMemOnly(mappings)
return index, "", err
}
type IndexerListOptions struct {
*ListOptions
Kind string
}
// TODO - fetch from api
// Folders need to be indexed first as dashboards depend on them to be indexed already.
func fetchResourceTypes() []*IndexerListOptions {
return []*IndexerListOptions{
{
ListOptions: &ListOptions{
Key: &ResourceKey{
Group: "folder.grafana.app",
Resource: "folders",
},
},
Kind: "Folder",
},
{
ListOptions: &ListOptions{
Key: &ResourceKey{
Group: "playlist.grafana.app",
Resource: "playlists",
},
},
Kind: "Playlist",
},
{
ListOptions: &ListOptions{
Key: &ResourceKey{
Group: "dashboard.grafana.app",
Resource: "dashboards",
},
},
Kind: "Dashboard",
},
}
}
func getSortFields(request *SearchRequest) []string {
sorting := make([]string, 0, len(request.SortBy))
for _, sort := range request.SortBy {
if IsSpecField(sort) {
descending := strings.HasPrefix(sort, descendingPrefix)
sort = strings.TrimPrefix(sort, descendingPrefix)
sortOrder := ""
if descending {
sortOrder = descendingPrefix
}
sorting = append(sorting, sortOrder+specFieldPrefix+sort)
continue
}
sorting = append(sorting, sort)
}
return sorting
}
func getTermFacets(f *search.TermFacets) []*search.TermFacet {
e := reflect.ValueOf(f).Elem()
if e.Kind() != reflect.Struct {
return []*search.TermFacet{}
}
// workaround - this field is private, so we need to use reflection to access it
// TODO - fork bleve and create a pr to make this field accessible
v := e.FieldByName("termLookup")
if v.Kind() != reflect.Map {
return []*search.TermFacet{}
}
terms := []*search.TermFacet{}
termsRange := v.MapRange()
for termsRange.Next() {
value := termsRange.Value()
// facet value is *search.TermFacet
if value.Kind() == reflect.Pointer {
val := value.Elem()
if val.Kind() == reflect.Struct {
group := newTerm(val)
terms = append(terms, group)
}
}
}
return terms
}
func newTerm(val reflect.Value) *search.TermFacet {
term := &search.TermFacet{}
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
if field.Kind() == reflect.String {
term.Term = field.String()
}
if field.Kind() == reflect.Int {
term.Count = int(field.Int())
}
}
return term
}

@ -1,253 +0,0 @@
package resource
import (
"strings"
"github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/mapping"
"github.com/blevesearch/bleve/v2/search"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type IndexedResource struct {
Uid string
Group string
Namespace string
Kind string
Name string
Title string
CreatedAt string
CreatedBy string
UpdatedAt string
UpdatedBy string
FolderId string
Spec map[string]any
}
type IndexResults struct {
Values []IndexedResource
Groups []*Group
}
func (ir IndexedResource) FromSearchHit(hit *search.DocumentMatch) IndexedResource {
ir.Uid = fieldValue("Uid", hit)
ir.Kind = fieldValue("Kind", hit)
ir.Name = fieldValue("Name", hit)
ir.Namespace = fieldValue("Namespace", hit)
ir.Group = fieldValue("Group", hit)
ir.CreatedAt = fieldValue("CreatedAt", hit)
ir.CreatedBy = fieldValue("CreatedBy", hit)
ir.UpdatedAt = fieldValue("UpdatedAt", hit)
ir.UpdatedBy = fieldValue("UpdatedBy", hit)
ir.Title = fieldValue("Title", hit)
// add indexed spec fields to search results
specResult := map[string]any{}
for k, v := range hit.Fields {
if strings.HasPrefix(k, "Spec.") {
specKey := strings.TrimPrefix(k, "Spec.")
specResult[specKey] = v
}
ir.Spec = specResult
}
return ir
}
func fieldValue(field string, hit *search.DocumentMatch) string {
if val, ok := hit.Fields[field]; ok {
return val.(string)
}
return ""
}
// NewIndexedResource creates a new IndexedResource from a raw resource.
// rawResource is the raw json for the resource from unified storage.
func NewIndexedResource(rawResource []byte) (*IndexedResource, error) {
k8sObj := unstructured.Unstructured{}
err := k8sObj.UnmarshalJSON(rawResource)
if err != nil {
return nil, err
}
meta, err := utils.MetaAccessor(&k8sObj)
if err != nil {
return nil, err
}
ir := &IndexedResource{}
ir.Uid = string(meta.GetUID())
ir.Name = meta.GetName()
ir.Title = meta.FindTitle("")
ir.Namespace = meta.GetNamespace()
ir.Group = meta.GetGroupVersionKind().Group
ir.Kind = meta.GetGroupVersionKind().Kind
ir.CreatedAt = meta.GetCreationTimestamp().Time.Format("2006-01-02T15:04:05Z")
ir.CreatedBy = meta.GetCreatedBy()
updatedAt, err := meta.GetUpdatedTimestamp()
if err != nil {
return nil, err
}
if updatedAt != nil {
ir.UpdatedAt = updatedAt.Format("2006-01-02T15:04:05Z")
} else {
ir.UpdatedAt = ir.CreatedAt
}
ir.UpdatedBy = meta.GetUpdatedBy()
spec, err := meta.GetSpec()
if err != nil {
return nil, err
}
specValues, ok := spec.(map[string]any)
if ok {
ir.Spec = specValues
}
return ir, nil
}
func createIndexMappings() *mapping.IndexMappingImpl {
// Create the index mapping
indexMapping := bleve.NewIndexMapping()
// Create an individual index mapping for each kind
indexMapping.TypeField = "Kind"
// for all kinds, create their index mappings
for k := range getSpecObjectMappings() {
objMapping := createIndexMappingForKind(k)
indexMapping.AddDocumentMapping(k, objMapping)
}
return indexMapping
}
func createIndexMappingForKind(resourceKind string) *mapping.DocumentMapping {
// create mappings for top level fields
baseFields := map[string]*mapping.FieldMapping{
"Uid": bleve.NewTextFieldMapping(),
"Group": bleve.NewTextFieldMapping(),
"Namespace": bleve.NewTextFieldMapping(),
"Kind": bleve.NewTextFieldMapping(),
"Name": bleve.NewTextFieldMapping(),
"Title": bleve.NewTextFieldMapping(),
"CreatedAt": bleve.NewDateTimeFieldMapping(),
"CreatedBy": bleve.NewTextFieldMapping(),
"UpdatedAt": bleve.NewDateTimeFieldMapping(),
"UpdatedBy": bleve.NewTextFieldMapping(),
"FolderId": bleve.NewTextFieldMapping(),
}
// Spec is different for all resources, so we need to generate the spec mapping based on the kind
specMapping := createSpecObjectMapping(resourceKind)
objectMapping := bleve.NewDocumentMapping()
objectMapping.Dynamic = false // only map fields that we have explicitly defined
// map spec
objectMapping.AddSubDocumentMapping("Spec", specMapping)
// map top level fields
for k, v := range baseFields {
objectMapping.AddFieldMappingsAt(k, v)
}
return objectMapping
}
type SpecFieldMapping struct {
Field string
Type string
}
// Right now we are hardcoding which spec fields to index for each kind
// In the future, which fields to index will be defined on the resources themselves by their owners.
func getSpecObjectMappings() map[string][]SpecFieldMapping {
return specMappings
}
// Generate the spec field mapping for a given kind
func createSpecObjectMapping(kind string) *mapping.DocumentMapping {
specMapping := bleve.NewDocumentMapping()
specMapping.Dynamic = false
// get the fields to index for the kind
mappings := getSpecObjectMappings()[kind]
for _, m := range mappings {
fieldName := m.Field
fieldType := m.Type
// Create a field mapping based on field type
switch fieldType {
case "string", "string[]":
specMapping.AddFieldMappingsAt(fieldName, bleve.NewTextFieldMapping())
case "int", "int64", "float64":
specMapping.AddFieldMappingsAt(fieldName, bleve.NewNumericFieldMapping())
case "bool":
specMapping.AddFieldMappingsAt(fieldName, bleve.NewBooleanFieldMapping())
case "time":
specMapping.AddFieldMappingsAt(fieldName, bleve.NewDateTimeFieldMapping())
default:
// TODO support indexing arrays and nested fields
// We are only indexing top level string,int, and bool fields within spec for now. Arrays or nested fields are not yet supported.
}
}
return specMapping
}
func IsSpecField(field string) bool {
field = strings.TrimPrefix(field, "-")
_, ok := specFields[field]
return ok
}
var specFields = mapSpecFields()
func mapSpecFields() map[string]bool {
fields := map[string]bool{}
for _, mappings := range specMappings {
for _, m := range mappings {
fields[m.Field] = true
}
}
return fields
}
var specMappings = map[string][]SpecFieldMapping{
"Playlist": {
{
Field: "interval",
Type: "string",
},
{
Field: "title",
Type: "string",
},
},
"Folder": {
{
Field: "title",
Type: "string",
},
{
Field: "description",
Type: "string",
},
},
"Dashboard": {
{
Field: "title",
Type: "string",
},
{
Field: "description",
Type: "string",
},
{
Field: "tags",
Type: "string[]",
},
},
}

@ -1,126 +0,0 @@
package resource
import (
"os"
"path/filepath"
"sync"
"time"
"github.com/grafana/dskit/instrument"
"github.com/prometheus/client_golang/prometheus"
)
var (
onceIndex sync.Once
IndexServerMetrics *IndexMetrics
)
type IndexMetrics struct {
IndexDir string
IndexServer *IndexServer
// metrics
IndexLatency *prometheus.HistogramVec
IndexSize prometheus.Gauge
IndexedDocs prometheus.Gauge
IndexedKinds *prometheus.GaugeVec
IndexCreationTime *prometheus.HistogramVec
}
var IndexCreationBuckets = []float64{1, 5, 10, 25, 50, 75, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000}
func NewIndexMetrics(indexDir string, indexServer *IndexServer) *IndexMetrics {
onceIndex.Do(func() {
IndexServerMetrics = &IndexMetrics{
IndexDir: indexDir,
IndexServer: indexServer,
IndexLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "index_server",
Name: "index_latency_seconds",
Help: "Time (in seconds) until index is updated with new event",
Buckets: instrument.DefBuckets,
NativeHistogramBucketFactor: 1.1, // enable native histograms
NativeHistogramMaxBucketNumber: 160,
NativeHistogramMinResetDuration: time.Hour,
}, []string{"resource"}),
IndexSize: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "index_server",
Name: "index_size",
Help: "Size of the index in bytes",
}),
IndexedDocs: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "index_server",
Name: "indexed_docs",
Help: "Number of indexed documents by resource",
}),
IndexedKinds: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "index_server",
Name: "indexed_kinds",
Help: "Number of indexed documents by kind",
}, []string{"kind"}),
IndexCreationTime: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "index_server",
Name: "index_creation_time_seconds",
Help: "Time (in seconds) it takes until index is created",
Buckets: IndexCreationBuckets,
NativeHistogramBucketFactor: 1.1, // enable native histograms
NativeHistogramMaxBucketNumber: 160,
NativeHistogramMinResetDuration: time.Hour,
}, []string{}),
}
})
return IndexServerMetrics
}
func (s *IndexMetrics) Collect(ch chan<- prometheus.Metric) {
s.IndexLatency.Collect(ch)
s.IndexCreationTime.Collect(ch)
s.IndexedKinds.Collect(ch)
// collect index size
totalSize, err := getTotalIndexSize(s.IndexDir)
if err == nil {
s.IndexSize.Set(float64(totalSize))
s.IndexSize.Collect(ch)
}
// collect index docs
s.IndexedDocs.Set(getTotalDocCount(s.IndexServer.index))
s.IndexedDocs.Collect(ch)
}
func (s *IndexMetrics) Describe(ch chan<- *prometheus.Desc) {
s.IndexLatency.Describe(ch)
s.IndexSize.Describe(ch)
s.IndexedDocs.Describe(ch)
s.IndexedKinds.Describe(ch)
s.IndexCreationTime.Describe(ch)
}
// getTotalDocCount returns the total number of documents in the index
func getTotalDocCount(index *Index) float64 {
count, _ := index.Count()
return float64(count)
}
// getTotalIndexSize returns the total size of the index directory when using a file-based index
func getTotalIndexSize(dir string) (int64, error) {
var totalSize int64
err := filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
fileInfo, err := info.Info()
if err != nil {
return err
}
totalSize += fileInfo.Size()
}
return nil
})
return totalSize, err
}

@ -1,254 +0,0 @@
package resource
import (
"context"
"encoding/json"
"errors"
"log/slog"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/setting"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
)
type IndexServer struct {
ResourceServer
s *server
index *Index
ws *indexWatchServer
log *slog.Logger
cfg *setting.Cfg
tracer tracing.Tracer
}
const tracingPrefixIndexServer = "unified_storage.index_server."
func (is *IndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) {
ctx, span := is.tracer.Start(ctx, tracingPrefixIndexServer+"Search")
defer span.End()
results, err := is.index.Search(ctx, req)
if err != nil {
return nil, err
}
res := &SearchResponse{}
for _, r := range results.Values {
resJsonBytes, err := json.Marshal(r)
if err != nil {
return nil, err
}
res.Items = append(res.Items, &ResourceWrapper{Value: resJsonBytes})
res.Groups = results.Groups
}
return res, nil
}
func (is *IndexServer) History(ctx context.Context, req *HistoryRequest) (*HistoryResponse, error) {
return nil, nil
}
func (is *IndexServer) Origin(ctx context.Context, req *OriginRequest) (*OriginResponse, error) {
return nil, nil
}
// Load the index
func (is *IndexServer) Load(ctx context.Context) error {
ctx, span := is.tracer.Start(ctx, tracingPrefixIndexServer+"Load")
defer span.End()
opts := Opts{
Workers: is.cfg.IndexWorkers,
BatchSize: is.cfg.IndexMaxBatchSize,
ListLimit: is.cfg.IndexListLimit,
IndexDir: is.cfg.IndexPath,
}
is.index = NewIndex(is.s, opts, is.tracer)
err := is.index.Init(ctx)
if err != nil {
return err
}
return nil
}
// Watch resources for changes and update the index
func (is *IndexServer) Watch(ctx context.Context) error {
rtList := fetchResourceTypes()
for _, rt := range rtList {
wr := &WatchRequest{
Options: rt.ListOptions,
}
go func() {
for {
// blocking call
err := is.s.Watch(wr, is.ws)
if err != nil {
is.log.Error("Error watching resource", "error", err)
}
is.log.Debug("Resource watch ended. Restarting watch")
}
}()
}
return nil
}
// Init sets the resource server on the index server
// so we can call the resource server from the index server
// TODO: a chicken and egg problem - index server needs the resource server but the resource server is created with the index server
func (is *IndexServer) Init(ctx context.Context, rs *server) error {
is.s = rs
is.ws = &indexWatchServer{
is: is,
context: ctx,
}
return nil
}
func NewResourceIndexServer(cfg *setting.Cfg, tracer tracing.Tracer) ResourceIndexServer {
logger := slog.Default().With("logger", "index-server")
indexServer := &IndexServer{
log: logger,
cfg: cfg,
tracer: tracer,
}
err := prometheus.Register(NewIndexMetrics(cfg.IndexPath, indexServer))
if err != nil {
logger.Warn("Failed to register index metrics", "error", err)
}
return indexServer
}
type ResourceIndexer interface {
Index(ctx context.Context) (*Index, error)
}
type indexWatchServer struct {
grpc.ServerStream
context context.Context
is *IndexServer
}
func (f *indexWatchServer) Send(we *WatchEvent) error {
if we.Type == WatchEvent_ADDED {
return f.Add(we)
}
if we.Type == WatchEvent_DELETED {
return f.Delete(we)
}
if we.Type == WatchEvent_MODIFIED {
return f.Update(we)
}
return nil
}
func (f *indexWatchServer) RecvMsg(m interface{}) error {
return nil
}
func (f *indexWatchServer) SendMsg(m interface{}) error {
return errors.New("not implemented")
}
func (f *indexWatchServer) Context() context.Context {
if f.context == nil {
f.context = context.Background()
}
return f.context
}
func (f *indexWatchServer) Index() *Index {
return f.is.index
}
func (f *indexWatchServer) Add(we *WatchEvent) error {
data, err := getData(we.Resource)
if err != nil {
return err
}
err = f.Index().Index(f.context, data)
if err != nil {
return err
}
return nil
}
func (f *indexWatchServer) Delete(we *WatchEvent) error {
rs, err := resource(we)
if err != nil {
return err
}
data, err := getData(rs)
if err != nil {
return err
}
err = f.Index().Delete(f.context, data.Uid, data.Key)
if err != nil {
return err
}
return nil
}
func (f *indexWatchServer) Update(we *WatchEvent) error {
rs, err := resource(we)
if err != nil {
return err
}
data, err := getData(rs)
if err != nil {
return err
}
err = f.Index().Delete(f.context, data.Uid, data.Key)
if err != nil {
return err
}
err = f.Index().Index(f.context, data)
if err != nil {
return err
}
return nil
}
type Data struct {
Key *ResourceKey
Value *ResourceWrapper
Uid string
}
func getData(wr *WatchEvent_Resource) (*Data, error) {
r, err := NewIndexedResource(wr.Value)
if err != nil {
return nil, err
}
key := &ResourceKey{
Group: r.Group,
Resource: r.Kind, // We use Kind as resource key since watch events don't have a resource name on them
Namespace: r.Namespace,
Name: r.Name,
}
value := &ResourceWrapper{
ResourceVersion: wr.Version,
Value: wr.Value,
}
return &Data{Key: key, Value: value, Uid: r.Uid}, nil
}
func resource(we *WatchEvent) (*WatchEvent_Resource, error) {
rs := we.Resource
if rs == nil || len(rs.Value) == 0 {
// for updates/deletes
rs = we.Previous
}
if rs == nil || len(rs.Value) == 0 {
return nil, errors.New("resource not found")
}
return rs, nil
}

@ -1,281 +0,0 @@
package resource
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/rand"
)
const testTenant = "default"
var testContext = context.Background()
func TestIndexDashboard(t *testing.T) {
data := readTestData(t, "dashboard-resource.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: data}}}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 1)
require.Equal(t, 1, len(index.allTenants()))
assertSearchCountEquals(t, index, "*", nil, nil, 1)
}
func TestIndexFolder(t *testing.T) {
data := readTestData(t, "folder-resource.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: data}}}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 1)
assertSearchCountEquals(t, index, "*", nil, nil, 1)
}
func TestSearchFolder(t *testing.T) {
dashboard := readTestData(t, "dashboard-resource.json")
folder := readTestData(t, "folder-resource.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: dashboard}, {Value: folder}}}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 2)
assertSearchCountEquals(t, index, "*", []string{"folder"}, nil, 1)
}
func TestSearchDashboardsAndFoldersOnly(t *testing.T) {
dashboard := readTestData(t, "dashboard-resource.json")
folder := readTestData(t, "folder-resource.json")
playlist := readTestData(t, "playlist-resource.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: dashboard}, {Value: folder}, {Value: playlist}}}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 3)
assertSearchCountEquals(t, index, "*", []string{"dashboard", "folder"}, nil, 2)
}
func TestLookupNames(t *testing.T) {
records := 1000
folders, ids := simulateFolders(records)
list := &ListResponse{Items: []*ResourceWrapper{}}
for _, f := range folders {
list.Items = append(list.Items, &ResourceWrapper{Value: []byte(f)})
}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, records)
query := ""
chunk := ids[:100] // query for n folders by id
for _, id := range chunk {
query += `"` + id + `" `
}
assertSearchCountEquals(t, index, query, nil, nil, int64(len(chunk)))
}
func TestIndexDashboardWithTags(t *testing.T) {
dashboard := readTestData(t, "dashboard-resource.json")
data := readTestData(t, "dashboard-tagged-resource.json")
data2 := readTestData(t, "dashboard-tagged-resource2.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: dashboard}, {Value: data}, {Value: data2}}}
index := newTestIndex(t, 2)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 3)
assertSearchCountEquals(t, index, "*", nil, []string{"tag1"}, 2)
assertSearchCountEquals(t, index, "*", nil, []string{"tag4"}, 1)
assertSearchGroupCountEquals(t, index, "*", "tags", nil, 4)
assertSearchGroupCountEquals(t, index, "*", "tags", []string{"tag4"}, 3)
}
func TestSort(t *testing.T) {
dashboard := readTestData(t, "dashboard-resource.json")
folder := readTestData(t, "folder-resource.json")
playlist := readTestData(t, "playlist-resource.json")
list := &ListResponse{Items: []*ResourceWrapper{{Value: dashboard}, {Value: folder}, {Value: playlist}}}
index := newTestIndex(t, 1)
err := index.writeBatch(testContext, list)
require.NoError(t, err)
assertCountEquals(t, index, 3)
req := &SearchRequest{Query: "*", Tenant: testTenant, Limit: 4, Offset: 0, Kind: []string{"dashboard", "folder"}, SortBy: []string{"title"}}
results, err := index.Search(testContext, req)
require.NoError(t, err)
val := results.Values[0]
assert.Equal(t, "dashboard-a", val.Spec["title"])
req = &SearchRequest{Query: "*", Tenant: testTenant, Limit: 4, Offset: 0, Kind: []string{"dashboard", "folder"}, SortBy: []string{"-title"}}
results, err = index.Search(testContext, req)
require.NoError(t, err)
val = results.Values[0]
assert.NotEqual(t, "dashboard-a", val.Spec["title"])
}
func TestIndexBatch(t *testing.T) {
index := newTestIndex(t, 1000)
startAll := time.Now()
ns := namespaces()
// simulate 10 List calls
for i := 0; i < 10; i++ {
list := &ListResponse{Items: loadTestItems(strconv.Itoa(i), ns)}
start := time.Now()
_, err := index.AddToBatches(testContext, list)
require.NoError(t, err)
elapsed := time.Since(start)
fmt.Println("Time elapsed:", elapsed)
}
// index all batches for each shard/tenant
err := index.IndexBatches(testContext, 1, ns)
require.NoError(t, err)
elapsed := time.Since(startAll)
fmt.Println("Total Time elapsed:", elapsed)
assert.Equal(t, len(ns), len(index.shards))
assertCountEquals(t, index, 100000)
}
func loadTestItems(uid string, tenants []string) []*ResourceWrapper {
resource := `{
"kind": "<kind>",
"title": "test",
"metadata": {
"uid": "<uid>",
"name": "test",
"namespace": "<ns>"
},
"spec": {
"title": "test",
"description": "test",
"interval": "5m"
}
}`
items := []*ResourceWrapper{}
for i := 0; i < 10000; i++ {
res := strings.Replace(resource, "<uid>", strconv.Itoa(i)+uid, 1)
// shuffle kinds
kind := kinds[rand.Intn(len(kinds))]
res = strings.Replace(res, "<kind>", kind, 1)
// shuffle namespaces
ns := tenants[rand.Intn(len(tenants))]
res = strings.Replace(res, "<ns>", ns, 1)
items = append(items, &ResourceWrapper{Value: []byte(res)})
}
return items
}
var kinds = []string{
"playlist",
"folder",
}
// simulate many tenants ( cloud )
func namespaces() []string {
ns := []string{}
for i := 0; i < 1000; i++ {
ns = append(ns, "tenant"+strconv.Itoa(i))
}
return ns
}
func newTestIndex(t *testing.T, batchSize int) *Index {
tracingCfg := tracing.NewEmptyTracingConfig()
trace, err := tracing.ProvideService(tracingCfg)
require.NoError(t, err)
return &Index{
tracer: trace,
shards: make(map[string]*Shard),
log: log.New("unifiedstorage.search.index"),
opts: Opts{
ListLimit: 5000,
Workers: 10,
BatchSize: batchSize,
},
}
}
func assertCountEquals(t *testing.T, index *Index, expected int) {
total, err := index.Count()
require.NoError(t, err)
assert.Equal(t, expected, total)
}
func assertSearchCountEquals(t *testing.T, index *Index, search string, kind []string, filters []string, expected int64) {
req := &SearchRequest{Query: search, Tenant: testTenant, Limit: expected + 1, Offset: 0, Kind: kind, Filters: filters}
start := time.Now()
results, err := index.Search(testContext, req)
require.NoError(t, err)
elapsed := time.Since(start)
fmt.Println("Search time:", elapsed)
assert.Equal(t, expected, int64(len(results.Values)))
}
func assertSearchGroupCountEquals(t *testing.T, index *Index, search string, group string, filters []string, expected int64) {
groupBy := []*GroupBy{{Name: group, Limit: 100}}
req := &SearchRequest{Query: search, Tenant: testTenant, Limit: 1, Offset: 0, GroupBy: groupBy, Filters: filters}
results, err := index.Search(testContext, req)
require.NoError(t, err)
assert.Equal(t, expected, int64(len(results.Groups)))
}
func readTestData(t *testing.T, name string) []byte {
// We can ignore the gosec G304 because this is only for tests
// nolint:gosec
data, err := os.ReadFile("./testdata/" + name)
require.NoError(t, err)
return data
}
func simulateFolders(size int) ([]string, []string) {
folders := []string{}
ids := []string{}
for i := 0; i < size; i++ {
id := "folder-" + strconv.Itoa(i)
folder := `{
"kind": "Folder",
"title": "test",
"metadata": {
"uid": "` + id + `",
"name": "folder-` + strconv.Itoa(i) + `",
"namespace": "default"
},
"spec": {
"title": "test",
"description": "test"
}
}`
folders = append(folders, folder)
ids = append(ids, id)
}
return folders, ids
}

@ -5,9 +5,8 @@ import (
)
var (
_ DiagnosticsServer = (*noopService)(nil)
_ ResourceIndexServer = (*noopService)(nil)
_ LifecycleHooks = (*noopService)(nil)
_ DiagnosticsServer = (*noopService)(nil)
_ LifecycleHooks = (*noopService)(nil)
)
// noopService is a helper implementation to simplify tests
@ -34,15 +33,3 @@ func (n *noopService) IsHealthy(context.Context, *HealthCheckRequest) (*HealthCh
func (n *noopService) Read(context.Context, *ReadRequest) (*ReadResponse, error) {
return nil, ErrNotImplementedYet
}
func (n *noopService) Search(context.Context, *SearchRequest) (*SearchResponse, error) {
return nil, ErrNotImplementedYet
}
func (n *noopService) History(context.Context, *HistoryRequest) (*HistoryResponse, error) {
return nil, ErrNotImplementedYet
}
func (n *noopService) Origin(context.Context, *OriginRequest) (*OriginResponse, error) {
return nil, ErrNotImplementedYet
}

File diff suppressed because it is too large Load Diff

@ -323,26 +323,6 @@ message WatchEvent {
Resource previous = 4;
}
// This will soon be deprecated/replaced with ResourceSearchRequest
message SearchRequest {
// query string for chosen implementation (currently just bleve)
string query = 1;
// default to bleve
string queryType = 2;
string tenant = 3;
// resource kind (playlists, dashboards, etc)
repeated string kind = 4;
// pagination support
int64 limit = 5;
int64 offset = 6;
// grouping (optional)
repeated GroupBy groupBy = 8;
// sorting
repeated string sortBy = 9;
// filters
repeated string filters = 10;
}
// Search within a single resource
message ResourceSearchRequest {
message Sort {
@ -417,10 +397,10 @@ message ResourceSearchResponse {
ResourceTable results = 3;
// The total hit count
uint64 total_hits = 4;
int64 total_hits = 4;
// indicates how expensive was the query with respect to bytes read
uint64 query_cost = 5;
double query_cost = 5;
// maximum score across all fields
double max_score = 6;
@ -429,22 +409,6 @@ message ResourceSearchResponse {
map<string,Facet> facet = 7;
}
message GroupBy {
string name = 1;
int64 limit = 2;
}
message Group {
string name = 1;
int64 count = 2;
}
// This will soon be deprecated/replaced with ResourceSearchResponse
message SearchResponse {
repeated ResourceWrapper items = 1;
repeated Group groups = 2;
}
message HistoryRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
@ -749,7 +713,7 @@ service ResourceStore {
// Unlike the ResourceStore, this service can be exposed to clients directly
// It should be implemented with efficient indexes and does not need read-after-write semantics
service ResourceIndex {
rpc Search(SearchRequest) returns (SearchResponse);
rpc Search(ResourceSearchRequest) returns (ResourceSearchResponse);
// Show resource history (and trash)
rpc History(HistoryRequest) returns (HistoryResponse);

@ -360,7 +360,7 @@ const (
// Unlike the ResourceStore, this service can be exposed to clients directly
// It should be implemented with efficient indexes and does not need read-after-write semantics
type ResourceIndexClient interface {
Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error)
Search(ctx context.Context, in *ResourceSearchRequest, opts ...grpc.CallOption) (*ResourceSearchResponse, error)
// Show resource history (and trash)
History(ctx context.Context, in *HistoryRequest, opts ...grpc.CallOption) (*HistoryResponse, error)
// Used for efficient provisioning
@ -375,9 +375,9 @@ func NewResourceIndexClient(cc grpc.ClientConnInterface) ResourceIndexClient {
return &resourceIndexClient{cc}
}
func (c *resourceIndexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) {
func (c *resourceIndexClient) Search(ctx context.Context, in *ResourceSearchRequest, opts ...grpc.CallOption) (*ResourceSearchResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SearchResponse)
out := new(ResourceSearchResponse)
err := c.cc.Invoke(ctx, ResourceIndex_Search_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
@ -412,7 +412,7 @@ func (c *resourceIndexClient) Origin(ctx context.Context, in *OriginRequest, opt
// Unlike the ResourceStore, this service can be exposed to clients directly
// It should be implemented with efficient indexes and does not need read-after-write semantics
type ResourceIndexServer interface {
Search(context.Context, *SearchRequest) (*SearchResponse, error)
Search(context.Context, *ResourceSearchRequest) (*ResourceSearchResponse, error)
// Show resource history (and trash)
History(context.Context, *HistoryRequest) (*HistoryResponse, error)
// Used for efficient provisioning
@ -423,7 +423,7 @@ type ResourceIndexServer interface {
type UnimplementedResourceIndexServer struct {
}
func (UnimplementedResourceIndexServer) Search(context.Context, *SearchRequest) (*SearchResponse, error) {
func (UnimplementedResourceIndexServer) Search(context.Context, *ResourceSearchRequest) (*ResourceSearchResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
}
func (UnimplementedResourceIndexServer) History(context.Context, *HistoryRequest) (*HistoryResponse, error) {
@ -445,7 +445,7 @@ func RegisterResourceIndexServer(s grpc.ServiceRegistrar, srv ResourceIndexServe
}
func _ResourceIndex_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SearchRequest)
in := new(ResourceSearchRequest)
if err := dec(in); err != nil {
return nil, err
}
@ -457,7 +457,7 @@ func _ResourceIndex_Search_Handler(srv interface{}, ctx context.Context, dec fun
FullMethod: ResourceIndex_Search_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceIndexServer).Search(ctx, req.(*SearchRequest))
return srv.(ResourceIndexServer).Search(ctx, req.(*ResourceSearchRequest))
}
return interceptor(ctx, in, info, handler)
}

@ -78,11 +78,16 @@ type searchSupport struct {
log *slog.Logger
storage StorageBackend
search SearchBackend
access authz.AccessClient
builders *builderCache
initWorkers int
}
func newSearchSupport(opts SearchOptions, storage StorageBackend, blob BlobSupport, tracer trace.Tracer) (support *searchSupport, err error) {
var (
_ ResourceIndexServer = (*searchSupport)(nil)
)
func newSearchSupport(opts SearchOptions, storage StorageBackend, access authz.AccessClient, blob BlobSupport, tracer trace.Tracer) (support *searchSupport, err error) {
// No backend search support
if opts.Backend == nil {
return nil, nil
@ -93,6 +98,7 @@ func newSearchSupport(opts SearchOptions, storage StorageBackend, blob BlobSuppo
}
support = &searchSupport{
access: access,
tracer: tracer,
storage: storage,
search: opts.Backend,
@ -113,6 +119,46 @@ func newSearchSupport(opts SearchOptions, storage StorageBackend, blob BlobSuppo
return support, err
}
// History implements ResourceIndexServer.
func (s *searchSupport) History(context.Context, *HistoryRequest) (*HistoryResponse, error) {
return nil, fmt.Errorf("not implemented yet... likely should not be the serarch server")
}
// Origin implements ResourceIndexServer.
func (s *searchSupport) Origin(context.Context, *OriginRequest) (*OriginResponse, error) {
return nil, fmt.Errorf("TBD.. rename to repository")
}
// Search implements ResourceIndexServer.
func (s *searchSupport) Search(ctx context.Context, req *ResourceSearchRequest) (*ResourceSearchResponse, error) {
nsr := NamespacedResource{
Group: req.Options.Key.Group,
Namespace: req.Options.Key.Namespace,
Resource: req.Options.Key.Resource,
}
idx, err := s.getOrCreateIndex(ctx, nsr)
if err != nil {
return &ResourceSearchResponse{
Error: AsErrorResult(err),
}, nil
}
// Get the federated indexes
federate := make([]ResourceIndex, len(req.Federated))
for i, f := range req.Federated {
nsr.Group = f.Group
nsr.Resource = f.Resource
federate[i], err = s.getOrCreateIndex(ctx, nsr)
if err != nil {
return &ResourceSearchResponse{
Error: AsErrorResult(err),
}, nil
}
}
return idx.Search(ctx, s.access, req, federate)
}
// init is called during startup. any failure will block startup and continued execution
func (s *searchSupport) init(ctx context.Context) error {
_, span := s.tracer.Start(ctx, tracingPrexfixSearch+"Init")
@ -160,11 +206,78 @@ func (s *searchSupport) init(ctx context.Context) error {
}
span.AddEvent("namespaces indexed", trace.WithAttributes(attribute.Int("namespaced_indexed", totalBatchesIndexed)))
s.log.Debug("TODO, listen to all events")
// Now start listening for new events
watchctx := context.Background() // new context?
events, err := s.storage.WatchWriteEvents(watchctx)
if err != nil {
return err
}
go func() {
for {
v := <-events
s.handleEvent(watchctx, v)
}
}()
return nil
}
// Async event
func (s *searchSupport) handleEvent(ctx context.Context, evt *WrittenEvent) {
nsr := NamespacedResource{
Namespace: evt.Key.Namespace,
Group: evt.Key.Group,
Resource: evt.Key.Resource,
}
index, err := s.getOrCreateIndex(ctx, nsr)
if err != nil {
s.log.Warn("error getting index for watch event", "error", err)
return
}
builder, err := s.builders.get(ctx, nsr)
if err != nil {
s.log.Warn("error getting builder for watch event", "error", err)
return
}
doc, err := builder.BuildDocument(ctx, evt.Key, evt.ResourceVersion, evt.Value)
if err != nil {
s.log.Warn("error building document watch event", "error", err)
return
}
err = index.Write(doc)
if err != nil {
s.log.Warn("error writing document watch event", "error", err)
return
}
}
func (s *searchSupport) getOrCreateIndex(ctx context.Context, key NamespacedResource) (ResourceIndex, error) {
// TODO???
// We want to block while building the index and return the same index for the key
// simple mutex not great... we don't want to block while anything in building, just the same key
idx, err := s.search.GetIndex(ctx, key)
if err != nil {
return nil, err
}
if idx == nil {
idx, _, err = s.build(ctx, key, 10, 0) // unknown size and RV
if err != nil {
return nil, err
}
if idx == nil {
return nil, fmt.Errorf("nil index after build")
}
}
return idx, nil
}
func (s *searchSupport) build(ctx context.Context, nsr NamespacedResource, size int64, rv int64) (ResourceIndex, int64, error) {
_, span := s.tracer.Start(ctx, tracingPrexfixSearch+"Build")
defer span.End()

@ -145,9 +145,6 @@ type ResourceServerOptions struct {
// The blob configuration
Blob BlobConfig
// Requests based on a search index
Index ResourceIndexServer
// Search options
Search SearchOptions
@ -229,7 +226,6 @@ func NewResourceServer(opts ResourceServerOptions) (ResourceServer, error) {
tracer: opts.Tracer,
log: logger,
backend: opts.Backend,
index: opts.Index,
blob: blobstore,
diagnostics: opts.Diagnostics,
access: opts.AccessClient,
@ -242,7 +238,7 @@ func NewResourceServer(opts ResourceServerOptions) (ResourceServer, error) {
if opts.Search.Resources != nil {
var err error
s.search, err = newSearchSupport(opts.Search, s.backend, s.blob, opts.Tracer)
s.search, err = newSearchSupport(opts.Search, s.backend, s.access, s.blob, opts.Tracer)
if err != nil {
return nil, err
}
@ -259,7 +255,6 @@ type server struct {
backend StorageBackend
blob BlobSupport
search *searchSupport
index ResourceIndexServer
diagnostics DiagnosticsServer
access authz.AccessClient
writeHooks WriteAccessHooks
@ -923,14 +918,14 @@ func (s *server) Watch(req *WatchRequest, srv ResourceStore_WatchServer) error {
}
}
func (s *server) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) {
func (s *server) Search(ctx context.Context, req *ResourceSearchRequest) (*ResourceSearchResponse, error) {
if err := s.Init(ctx); err != nil {
return nil, err
}
if s.index == nil {
if s.search == nil {
return nil, fmt.Errorf("search index not configured")
}
return s.index.Search(ctx, req)
return s.search.Search(ctx, req)
}
// History implements ResourceServer.
@ -938,7 +933,7 @@ func (s *server) History(ctx context.Context, req *HistoryRequest) (*HistoryResp
if err := s.Init(ctx); err != nil {
return nil, err
}
return s.index.History(ctx, req)
return s.search.History(ctx, req)
}
// Origin implements ResourceServer.
@ -946,33 +941,7 @@ func (s *server) Origin(ctx context.Context, req *OriginRequest) (*OriginRespons
if err := s.Init(ctx); err != nil {
return nil, err
}
return s.index.Origin(ctx, req)
}
// Index returns the search index. If the index is not initialized, it will be initialized.
func (s *server) Index(ctx context.Context) (*Index, error) {
if err := s.Init(ctx); err != nil {
return nil, err
}
index := s.index.(*IndexServer)
if index.index == nil {
err := index.Init(ctx, s)
if err != nil {
return nil, err
}
err = index.Load(ctx)
if err != nil {
return nil, err
}
err = index.Watch(ctx)
if err != nil {
return nil, err
}
}
return index.index, nil
return s.search.Origin(ctx, req)
}
// IsHealthy implements ResourceServer.

@ -24,7 +24,7 @@ const tracingPrexfixBleve = "unified_search.bleve."
var _ resource.SearchBackend = &bleveBackend{}
var _ resource.ResourceIndex = &bleveIndex{}
type bleveOptions struct {
type BleveOptions struct {
// The root folder where file objects are saved
Root string
@ -39,14 +39,14 @@ type bleveOptions struct {
type bleveBackend struct {
tracer trace.Tracer
log *slog.Logger
opts bleveOptions
opts BleveOptions
// cache info
cache map[resource.NamespacedResource]*bleveIndex
cacheMu sync.RWMutex
}
func NewBleveBackend(opts bleveOptions, tracer trace.Tracer, reg prometheus.Registerer) *bleveBackend {
func NewBleveBackend(opts BleveOptions, tracer trace.Tracer, reg prometheus.Registerer) *bleveBackend {
b := &bleveBackend{
log: slog.Default().With("logger", "bleve-backend"),
tracer: tracer,
@ -247,8 +247,8 @@ func (b *bleveIndex) Search(
return nil, err
}
response.TotalHits = res.Total
response.QueryCost = res.Cost
response.TotalHits = int64(res.Total)
response.QueryCost = float64(res.Cost)
response.MaxScore = res.MaxScore
response.Results, err = b.hitsToTable(searchrequest.Fields, res.Hits, req.Explain)
@ -368,7 +368,7 @@ func toBleveSearchRequest(req *resource.ResourceSearchRequest, access authz.Acce
// See: https://github.com/grafana/grafana/blob/v11.3.0/pkg/services/searchV2/bluge.go
// NOTE, we likely want to pass in the already called checker because the resource server
// will first need to check if we can see anything (or everything!) for this resource
fmt.Printf("TODO... check authorization")
fmt.Printf("TODO... check authorization\n")
}
switch len(queries) {

@ -31,7 +31,7 @@ func TestBleveBackend(t *testing.T) {
require.NoError(t, err)
backend := NewBleveBackend(
bleveOptions{
BleveOptions{
Root: tmpdir.Name(),
FileThreshold: 5, // with more than 5 items we create a file on disk
},

@ -3,31 +3,52 @@ package search
import (
"context"
"github.com/grafana/authlib/claims"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/store/kind/dashboard"
"github.com/grafana/grafana/pkg/storage/unified/resource"
)
// The default list of open source document builders
type StandardDocumentBuilders struct{}
type StandardDocumentBuilders struct {
sql db.DB
}
// Hooked up so wire can fill in different sprinkles
func ProvideDocumentBuilders() resource.DocumentBuilderSupplier {
return &StandardDocumentBuilders{}
func ProvideDocumentBuilders(sql db.DB) resource.DocumentBuilderSupplier {
return &StandardDocumentBuilders{sql}
}
func (s *StandardDocumentBuilders) GetDocumentBuilders() ([]resource.DocumentBuilderInfo, error) {
dashboards, err := DashboardBuilder(func(ctx context.Context, namespace string, blob resource.BlobSupport) (resource.DocumentBuilder, error) {
stats := NewDashboardStatsLookup(nil) // empty stats
dsinfo := []*dashboard.DatasourceQueryResult{{}}
ns, err := claims.ParseNamespace(namespace)
if err != nil && s.sql != nil {
rows, err := s.sql.GetSqlxSession().Query(ctx, "SELECT uid,type,name,is_default FROM data_source WHERE org_id=?", ns.OrgID)
if err != nil {
return nil, err
}
for rows.Next() {
info := &dashboard.DatasourceQueryResult{}
err = rows.Scan(&info.UID, &info.Type, &info.Name, &info.IsDefault)
if err != nil {
return nil, err
}
dsinfo = append(dsinfo, info)
}
}
return &DashboardDocumentBuilder{
Namespace: namespace,
Blob: blob,
Stats: NewDashboardStatsLookup(nil), // empty stats
DatasourceLookup: dashboard.CreateDatasourceLookup([]*dashboard.DatasourceQueryResult{{}}),
Stats: stats,
DatasourceLookup: dashboard.CreateDatasourceLookup(dsinfo),
}, nil
})
return []resource.DocumentBuilderInfo{
// The default builder
resource.DocumentBuilderInfo{
{
Builder: resource.StandardDocumentBuilder(),
},
// Dashboard builder

@ -3,18 +3,18 @@ package sql
import (
"context"
"os"
"path/filepath"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/authlib/claims"
"github.com/grafana/grafana/pkg/apimachinery/identity"
infraDB "github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/authz"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/search"
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
)
@ -54,12 +54,18 @@ func NewResourceServer(ctx context.Context, db infraDB.DB, cfg *setting.Cfg,
opts.Backend = store
opts.Diagnostics = store
opts.Lifecycle = store
opts.Search = resource.SearchOptions{
Resources: docs,
}
// Setup the search server
if features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageSearch) {
opts.Index = resource.NewResourceIndexServer(cfg, tracer)
opts.Search = resource.SearchOptions{
Backend: search.NewBleveBackend(search.BleveOptions{
Root: filepath.Join(cfg.DataPath, "unified-search", "bleve"),
FileThreshold: 10, // fewer than X items will use a memory index
BatchSize: 500, // This is the batch size for how many objects to add to the index at once
}, tracer, reg),
Resources: docs,
WorkerThreads: 5, // from cfg?
}
}
rs, err := resource.NewResourceServer(opts)
@ -67,29 +73,5 @@ func NewResourceServer(ctx context.Context, db infraDB.DB, cfg *setting.Cfg,
return nil, err
}
// Initialize the indexer if one is configured
if opts.Index != nil {
// TODO: Create a proper identity for the indexer
orgId := int64(1)
ctx = identity.WithRequester(ctx, &identity.StaticRequester{
Type: claims.TypeServiceAccount, // system:apiserver
UserID: 1,
OrgID: int64(1),
Name: "admin",
Login: "admin",
OrgRole: identity.RoleAdmin,
IsGrafanaAdmin: true,
Permissions: map[int64]map[string][]string{
orgId: {
"*": {"*"}, // all resources, all scopes
},
},
})
_, err = rs.(resource.ResourceIndexer).Index(ctx)
if err != nil {
return nil, err
}
}
return rs, nil
}

@ -102,8 +102,8 @@ func (s *service) start(ctx context.Context) error {
}
// TODO, for standalone this will need to be started from enterprise
// Connecting to the correct remote services
docs := search.ProvideDocumentBuilders()
// Connecting to the correct remote services (cloudconfig for DS info and usage stats)
docs := search.ProvideDocumentBuilders(nil)
server, err := NewResourceServer(ctx, s.db, s.cfg, s.features, docs, s.tracing, s.reg, authzClient)
if err != nil {

@ -1,126 +0,0 @@
package test
import (
"encoding/json"
"testing"
"time"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/sql"
"github.com/grafana/grafana/pkg/util/testutil"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
// addResource is a helper to create a resource in unified storage
func addResource(t *testing.T, ctx context.Context, backend sql.Backend, resourceName string, data string) {
ir, err := resource.NewIndexedResource([]byte(data))
require.NoError(t, err)
_, err = backend.WriteEvent(ctx, resource.WriteEvent{
Type: resource.WatchEvent_ADDED,
Value: []byte(data),
Key: &resource.ResourceKey{
Namespace: ir.Namespace,
Group: ir.Group,
Resource: resourceName,
Name: ir.Name,
},
})
require.NoError(t, err)
}
func TestIntegrationIndexerSearch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx := testutil.NewTestContext(t, time.Now().Add(5*time.Second))
cfg := setting.NewCfg()
cfg.IndexWorkers = 1
cfg.IndexMaxBatchSize = 100
cfg.IndexListLimit = 100
backend, server := newServer(t, cfg)
playlist1 := `{
"kind": "Playlist",
"apiVersion": "playlist.grafana.app/v0alpha1",
"metadata": {
"name": "playlist dogs",
"namespace": "tenant1",
"uid": "1fe028dc-81bb-4268-a3ff-20899ff0a16f",
"resourceVersion": "1",
"creationTimestamp": "2024-01-01T12:00:00Z"
},
"spec": {
"interval": "5m",
"title": "dogs"
}
}`
playlist2 := `{
"kind": "Playlist",
"apiVersion": "playlist.grafana.app/v0alpha1",
"metadata": {
"name": "playlist cats",
"namespace": "tenant1",
"uid": "1fe028dc-81bb-4268-a3ff-20899ff0a16f123",
"resourceVersion": "2",
"creationTimestamp": "2024-01-02T12:00:00Z"
},
"spec": {
"interval": "5m",
"title": "cats"
}
}`
// add playlist1 and playlist2 to unified storage
addResource(t, ctx, backend, "playlists", playlist1)
addResource(t, ctx, backend, "playlists", playlist2)
// initialize and build the search index
indexer, ok := server.(resource.ResourceIndexer)
if !ok {
t.Fatal("server does not implement ResourceIndexer")
}
_, err := indexer.Index(ctx)
require.NoError(t, err)
// run search tests against the index
t.Run("can search for all resources", func(t *testing.T) {
res, err := server.Search(ctx, &resource.SearchRequest{
Tenant: "tenant1",
Query: "*",
Limit: 10,
Offset: 0,
})
require.NoError(t, err)
require.Len(t, res.Items, 2)
})
t.Run("can search for resources by title", func(t *testing.T) {
res, err := server.Search(ctx, &resource.SearchRequest{
Tenant: "tenant1",
Query: "Spec.title:dogs",
Limit: 10,
Offset: 0,
})
require.NoError(t, err)
require.Len(t, res.Items, 1)
})
t.Run("can filter resources by created time", func(t *testing.T) {
res, err := server.Search(ctx, &resource.SearchRequest{
Tenant: "tenant1",
Query: "CreatedAt:>=\"2024-01-02\"",
Limit: 10,
Offset: 0,
})
require.NoError(t, err)
require.Len(t, res.Items, 1)
ir := resource.IndexedResource{}
err = json.Unmarshal(res.Items[0].Value, &ir)
require.NoError(t, err)
require.Equal(t, "playlist cats", ir.Name)
})
}

@ -56,7 +56,6 @@ func newServer(t *testing.T, cfg *setting.Cfg) (sql.Backend, resource.ResourceSe
Backend: ret,
Diagnostics: ret,
Lifecycle: ret,
Index: resource.NewResourceIndexServer(cfg, tracing.NewNoopTracerService()),
})
require.NoError(t, err)
require.NotNil(t, server)

@ -7,18 +7,17 @@ import (
"fmt"
"path/filepath"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/datasources"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/searchV2"
"github.com/grafana/grafana/pkg/services/store"
"github.com/grafana/grafana/pkg/services/unifiedSearch"
testdatasource "github.com/grafana/grafana/pkg/tsdb/grafana-testdata-datasource"
)
@ -53,17 +52,16 @@ var (
)
)
func ProvideService(search searchV2.SearchService, searchNext unifiedSearch.SearchService, store store.StorageService, features featuremgmt.FeatureToggles) *Service {
return newService(search, searchNext, store, features)
func ProvideService(search searchV2.SearchService, store store.StorageService, features featuremgmt.FeatureToggles) *Service {
return newService(search, store, features)
}
func newService(search searchV2.SearchService, searchNext unifiedSearch.SearchService, store store.StorageService, features featuremgmt.FeatureToggles) *Service {
func newService(search searchV2.SearchService, store store.StorageService, features featuremgmt.FeatureToggles) *Service {
s := &Service{
search: search,
searchNext: searchNext,
store: store,
log: log.New("grafanads"),
features: features,
search: search,
store: store,
log: log.New("grafanads"),
features: features,
}
return s
@ -71,11 +69,10 @@ func newService(search searchV2.SearchService, searchNext unifiedSearch.SearchSe
// Service exists regardless of user settings
type Service struct {
search searchV2.SearchService
searchNext unifiedSearch.SearchService
store store.StorageService
log log.Logger
features featuremgmt.FeatureToggles
search searchV2.SearchService
store store.StorageService
log log.Logger
features featuremgmt.FeatureToggles
}
func DataSourceModel(orgId int64) *datasources.DataSource {
@ -191,10 +188,6 @@ func (s *Service) doSearchQuery(ctx context.Context, req *backend.QueryDataReque
}
}
if s.features.IsEnabled(ctx, featuremgmt.FlagUnifiedStorageSearch) {
return *s.searchNext.DoQuery(ctx, req.PluginContext.User, req.PluginContext.OrgID, m.SearchNext)
}
searchReadinessCheckResp := s.search.IsReady(ctx, req.PluginContext.OrgID)
if !searchReadinessCheckResp.IsReady {
dashboardSearchNotServedRequestsCounter.With(prometheus.Labels{
@ -214,7 +207,6 @@ func (s *Service) doSearchQuery(ctx context.Context, req *backend.QueryDataReque
}
type requestModel struct {
QueryType string `json:"queryType"`
Search searchV2.DashboardQuery `json:"search,omitempty"`
SearchNext unifiedSearch.Query `json:"searchNext,omitempty"`
QueryType string `json:"queryType"`
Search searchV2.DashboardQuery `json:"search,omitempty"`
}

@ -18,7 +18,7 @@ export function getGrafanaSearcher(): GrafanaSearcher {
return new FrontendSearcher(searcher);
}
const useUnifiedStorageSearch = config.featureToggles.unifiedStorageSearch;
const useUnifiedStorageSearch = false; // TODO, frontend FF config.featureToggles.unifiedStorageSearch;
searcher = useUnifiedStorageSearch ? new UnifiedSearcher(sqlSearcher) : sqlSearcher;
}
return searcher!;

Loading…
Cancel
Save