The open and composable observability and data visualization platform. Visualize metrics, logs, and traces from multiple sources like Prometheus, Loki, Elasticsearch, InfluxDB, Postgres and many more.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
grafana/pkg/expr/transform.go

139 lines
3.3 KiB

package expr
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/grafana/pkg/services/datasources"
)
var (
expressionsQuerySummary *prometheus.SummaryVec
)
func init() {
expressionsQuerySummary = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "expressions_queries_duration_milliseconds",
Help: "Expressions query summary",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"status"},
)
prometheus.MustRegister(expressionsQuerySummary)
}
// Request is similar to plugins.DataQuery but with the Time Ranges is per Query.
type Request struct {
Headers map[string]string
Debug bool
OrgId int64
Queries []Query
}
// Query is like plugins.DataSubQuery, but with a a time range, and only the UID
// for the data source. Also interval is a time.Duration.
type Query struct {
RefID string
TimeRange TimeRange
DataSource *datasources.DataSource `json:"datasource"`
JSON json.RawMessage
Interval time.Duration
QueryType string
MaxDataPoints int64
}
// TimeRange is a time.Time based TimeRange.
type TimeRange struct {
From time.Time
To time.Time
}
// TransformData takes Queries which are either expressions nodes
// or are datasource requests.
func (s *Service) TransformData(ctx context.Context, req *Request) (r *backend.QueryDataResponse, err error) {
if s.isDisabled() {
return nil, fmt.Errorf("server side expressions are disabled")
}
start := time.Now()
defer func() {
var respStatus string
switch {
case err == nil:
respStatus = "success"
default:
respStatus = "failure"
}
duration := float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond)
expressionsQuerySummary.WithLabelValues(respStatus).Observe(duration)
}()
// Build the pipeline from the request, checking for ordering issues (e.g. loops)
// and parsing graph nodes from the queries.
pipeline, err := s.BuildPipeline(req)
if err != nil {
return nil, err
}
// Execute the pipeline
responses, err := s.ExecutePipeline(ctx, pipeline)
if err != nil {
return nil, err
}
// Get which queries have the Hide property so they those queries' results
// can be excluded from the response.
hidden, err := hiddenRefIDs(req.Queries)
if err != nil {
return nil, err
}
if len(hidden) != 0 {
filteredRes := backend.NewQueryDataResponse()
for refID, res := range responses.Responses {
if _, ok := hidden[refID]; !ok {
filteredRes.Responses[refID] = res
}
}
responses = filteredRes
}
return responses, nil
}
func hiddenRefIDs(queries []Query) (map[string]struct{}, error) {
hidden := make(map[string]struct{})
for _, query := range queries {
hide := struct {
Hide bool `json:"hide"`
}{}
if err := json.Unmarshal(query.JSON, &hide); err != nil {
return nil, err
}
if hide.Hide {
hidden[query.RefID] = struct{}{}
}
}
return hidden, nil
}
func (s *Service) decryptSecureJsonDataFn(ctx context.Context) func(ds *datasources.DataSource) map[string]string {
return func(ds *datasources.DataSource) map[string]string {
Secrets: Implement basic unified secret store service (#45804) * wip: Implement kvstore for secrets * wip: Refactor kvstore for secrets * wip: Add format key function to secrets kvstore sql * wip: Add migration for secrets kvstore * Remove unused Key field from secrets kvstore * Remove secret values from debug logs * Integrate unified secrets with datasources * Fix minor issues and tests for kvstore * Create test service helper for secret store * Remove encryption tests from datasources * Move secret operations after datasources * Fix datasource proxy tests * Fix legacy data tests * Add Name to all delete data source commands * Implement decryption cache on sql secret store * Fix minor issue with cache and tests * Use secret type on secret store datasource operations * Add comments to make create and update clear * Rename itemFound variable to isFound * Improve secret deletion and cache management * Add base64 encoding to sql secret store * Move secret retrieval to decrypted values function * Refactor decrypt secure json data functions * Fix expr tests * Fix datasource tests * Fix plugin proxy tests * Fix query tests * Fix metrics api tests * Remove unused fake secrets service from query tests * Add rename function to secret store * Add check for error renaming secret * Remove bus from tests to fix merge conflicts * Add background secrets migration to datasources * Get datasource secure json fields from secrets * Move migration to secret store * Revert "Move migration to secret store" This reverts commit 7c3f872072e9aff601fb9d639127d468c03f97ef. * Add secret service to datasource service on tests * Fix datasource tests * Remove merge conflict on wire * Add ctx to data source http transport on prometheus stats collector * Add ctx to data source http transport on stats collector test
3 years ago
decryptedJsonData, err := s.dataSourceService.DecryptedValues(ctx, ds)
if err != nil {
logger.Error("Failed to decrypt secure json data", "error", err)
}
return decryptedJsonData
}
}