diff --git a/pkg/tsdb/prometheus/prometheus.go b/pkg/tsdb/prometheus/prometheus.go index 65737de5170..ff9246fc7f7 100644 --- a/pkg/tsdb/prometheus/prometheus.go +++ b/pkg/tsdb/prometheus/prometheus.go @@ -51,7 +51,7 @@ type QueryModel struct { RangeQuery bool `json:"range"` InstantQuery bool `json:"instant"` IntervalFactor int64 `json:"intervalFactor"` - OffsetSec int64 `json:"offsetSec"` + UtcOffsetSec int64 `json:"utcOffsetSec"` } type Service struct { @@ -163,23 +163,28 @@ func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) span.SetTag("stop_unixnano", query.End.UnixNano()) defer span.Finish() - var results model.Value + var response model.Value switch query.QueryType { case Range: - results, _, err = client.QueryRange(ctx, query.Expr, timeRange) + response, _, err = client.QueryRange(ctx, query.Expr, timeRange) + if err != nil { + return &result, fmt.Errorf("query: %s failed with: %v", query.Expr, err) + } + case Instant: + response, _, err = client.Query(ctx, query.Expr, query.End) if err != nil { return &result, fmt.Errorf("query: %s failed with: %v", query.Expr, err) } - default: return &result, fmt.Errorf("unknown Query type detected %#v", query.QueryType) } - frame, err := parseResponse(results, query) + frame, err := parseResponse(response, query) if err != nil { return &result, err } + result.Responses[query.RefId] = backend.DataResponse{ Frames: frame, } @@ -228,21 +233,29 @@ func (s *Service) getDSInfo(pluginCtx backend.PluginContext) (*DatasourceInfo, e } func formatLegend(metric model.Metric, query *PrometheusQuery) string { + var legend string + if query.LegendFormat == "" { - return metric.String() + legend = metric.String() + } else { + result := legendFormat.ReplaceAllFunc([]byte(query.LegendFormat), func(in []byte) []byte { + labelName := strings.Replace(string(in), "{{", "", 1) + labelName = strings.Replace(labelName, "}}", "", 1) + labelName = strings.TrimSpace(labelName) + if val, exists := metric[model.LabelName(labelName)]; exists { + return []byte(val) + } + return []byte{} + }) + legend = string(result) } - result := legendFormat.ReplaceAllFunc([]byte(query.LegendFormat), func(in []byte) []byte { - labelName := strings.Replace(string(in), "{{", "", 1) - labelName = strings.Replace(labelName, "}}", "", 1) - labelName = strings.TrimSpace(labelName) - if val, exists := metric[model.LabelName(labelName)]; exists { - return []byte(val) - } - return []byte{} - }) + // If legend is empty brackets, use query expression + if legend == "{}" { + legend = query.Expr + } - return string(result) + return legend } func (s *Service) parseQuery(queryContext *backend.QueryDataRequest, dsInfo *DatasourceInfo) ([]*PrometheusQuery, error) { @@ -291,18 +304,31 @@ func (s *Service) parseQuery(queryContext *backend.QueryDataRequest, dsInfo *Dat expr = strings.ReplaceAll(expr, "$__range", strconv.FormatInt(rangeS, 10)+"s") expr = strings.ReplaceAll(expr, "$__rate_interval", intervalv2.FormatDuration(calculateRateInterval(interval, dsInfo.TimeInterval, s.intervalCalculator))) - queryType := Range + if model.RangeQuery && model.InstantQuery { + return nil, fmt.Errorf("the provided query is not valid, expected only one of `range` and `instant` to be true") + } - // Align query range to step. It rounds start and end down to a multiple of step. - start := int64(math.Floor((float64(query.TimeRange.From.Unix()+model.OffsetSec)/interval.Seconds()))*interval.Seconds() - float64(model.OffsetSec)) - end := int64(math.Floor((float64(query.TimeRange.To.Unix()+model.OffsetSec)/interval.Seconds()))*interval.Seconds() - float64(model.OffsetSec)) + var queryType PrometheusQueryType + var start time.Time + var end time.Time + + if model.InstantQuery { + queryType = Instant + start = query.TimeRange.From + end = query.TimeRange.To + } else { + queryType = Range + // Align query range to step. It rounds start and end down to a multiple of step. + start = time.Unix(int64(math.Floor((float64(query.TimeRange.From.Unix()+model.UtcOffsetSec)/interval.Seconds()))*interval.Seconds()-float64(model.UtcOffsetSec)), 0) + end = time.Unix(int64(math.Floor((float64(query.TimeRange.To.Unix()+model.UtcOffsetSec)/interval.Seconds()))*interval.Seconds()-float64(model.UtcOffsetSec)), 0) + } qs = append(qs, &PrometheusQuery{ Expr: expr, Step: interval, LegendFormat: model.LegendFormat, - Start: time.Unix(start, 0), - End: time.Unix(end, 0), + Start: start, + End: end, RefId: query.RefID, QueryType: queryType, }) @@ -315,27 +341,21 @@ func parseResponse(value model.Value, query *PrometheusQuery) (data.Frames, erro frames := data.Frames{} matrix, ok := value.(model.Matrix) - if !ok { - return frames, fmt.Errorf("unsupported result format: %q", value.Type().String()) + if ok { + matrixFrames := matrixToDataFrames(matrix, query) + frames = append(frames, matrixFrames...) } - for _, v := range matrix { - name := formatLegend(v.Metric, query) - tags := make(map[string]string, len(v.Metric)) - timeVector := make([]time.Time, 0, len(v.Values)) - values := make([]float64, 0, len(v.Values)) - - for k, v := range v.Metric { - tags[string(k)] = string(v) - } + vector, ok := value.(model.Vector) + if ok { + vectorFrames := vectorToDataFrames(vector, query) + frames = append(frames, vectorFrames...) + } - for _, k := range v.Values { - timeVector = append(timeVector, time.Unix(k.Timestamp.Unix(), 0).UTC()) - values = append(values, float64(k.Value)) - } - frames = append(frames, data.NewFrame(name, - data.NewField("time", nil, timeVector), - data.NewField("value", tags, values).SetConfig(&data.FieldConfig{DisplayNameFromDS: name}))) + scalar, ok := value.(*model.Scalar) + if ok { + scalarFrames := scalarToDataFrames(scalar) + frames = append(frames, scalarFrames...) } return frames, nil @@ -370,3 +390,55 @@ func calculateRateInterval(interval time.Duration, scrapeInterval string, interv rateInterval := time.Duration(int(math.Max(float64(interval+scrapeIntervalDuration), float64(4)*float64(scrapeIntervalDuration)))) return rateInterval } + +func matrixToDataFrames(matrix model.Matrix, query *PrometheusQuery) data.Frames { + frames := data.Frames{} + + for _, v := range matrix { + tags := make(map[string]string, len(v.Metric)) + timeVector := make([]time.Time, 0, len(v.Values)) + values := make([]float64, 0, len(v.Values)) + for k, v := range v.Metric { + tags[string(k)] = string(v) + } + for _, k := range v.Values { + timeVector = append(timeVector, time.Unix(k.Timestamp.Unix(), 0).UTC()) + values = append(values, float64(k.Value)) + } + name := formatLegend(v.Metric, query) + frames = append(frames, data.NewFrame(name, + data.NewField("Time", nil, timeVector), + data.NewField("Value", tags, values).SetConfig(&data.FieldConfig{DisplayNameFromDS: name}))) + } + + return frames +} + +func scalarToDataFrames(scalar *model.Scalar) data.Frames { + timeVector := []time.Time{time.Unix(scalar.Timestamp.Unix(), 0).UTC()} + values := []float64{float64(scalar.Value)} + name := fmt.Sprintf("%g", values[0]) + frames := data.Frames{data.NewFrame(name, + data.NewField("Time", nil, timeVector), + data.NewField("Value", nil, values).SetConfig(&data.FieldConfig{DisplayNameFromDS: name}))} + + return frames +} + +func vectorToDataFrames(vector model.Vector, query *PrometheusQuery) data.Frames { + frames := data.Frames{} + for _, v := range vector { + name := formatLegend(v.Metric, query) + tags := make(map[string]string, len(v.Metric)) + timeVector := []time.Time{time.Unix(v.Timestamp.Unix(), 0).UTC()} + values := []float64{float64(v.Value)} + for k, v := range v.Metric { + tags[string(k)] = string(v) + } + frames = append(frames, data.NewFrame(name, + data.NewField("Time", nil, timeVector), + data.NewField("Value", tags, values).SetConfig(&data.FieldConfig{DisplayNameFromDS: name}))) + } + + return frames +} diff --git a/pkg/tsdb/prometheus/prometheus_test.go b/pkg/tsdb/prometheus/prometheus_test.go index d29a53bf63d..ea5167b7d5c 100644 --- a/pkg/tsdb/prometheus/prometheus_test.go +++ b/pkg/tsdb/prometheus/prometheus_test.go @@ -5,7 +5,6 @@ import ( "time" "github.com/grafana/grafana-plugin-sdk-go/backend" - "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana/pkg/tsdb/intervalv2" p "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -40,6 +39,17 @@ func TestPrometheus_formatLeged(t *testing.T) { require.Equal(t, `http_request_total{app="backend", device="mobile"}`, formatLegend(metric, query)) }) + + t.Run("use query expr when no labels", func(t *testing.T) { + metric := map[p.LabelName]p.LabelValue{} + + query := &PrometheusQuery{ + LegendFormat: "", + Expr: `{job="grafana"}`, + } + + require.Equal(t, `{job="grafana"}`, formatLegend(metric, query)) + }) } func TestPrometheus_parseQuery(t *testing.T) { @@ -316,30 +326,8 @@ func TestPrometheus_parseQuery(t *testing.T) { }) } -func queryContext(json string, timeRange backend.TimeRange) *backend.QueryDataRequest { - return &backend.QueryDataRequest{ - Queries: []backend.DataQuery{ - { - JSON: []byte(json), - TimeRange: timeRange, - RefID: "A", - }, - }, - } -} - -func TestParseResponse(t *testing.T) { - t.Run("value is not of type matrix", func(t *testing.T) { - //nolint: staticcheck // plugins.DataQueryResult deprecated - queryRes := data.Frames{} - value := p.Vector{} - res, err := parseResponse(value, nil) - - require.Equal(t, queryRes, res) - require.Error(t, err) - }) - - t.Run("response should be parsed normally", func(t *testing.T) { +func TestPrometheus_parseResponse(t *testing.T) { + t.Run("matrix response should be parsed normally", func(t *testing.T) { values := []p.SamplePair{ {Value: 1, Timestamp: 1000}, {Value: 2, Timestamp: 2000}, @@ -363,14 +351,77 @@ func TestParseResponse(t *testing.T) { require.Equal(t, res[0].Name, "legend Application") require.Len(t, res[0].Fields, 2) require.Len(t, res[0].Fields[0].Labels, 0) - require.Equal(t, res[0].Fields[0].Name, "time") + require.Equal(t, res[0].Fields[0].Name, "Time") + require.Len(t, res[0].Fields[1].Labels, 2) + require.Equal(t, res[0].Fields[1].Labels.String(), "app=Application, tag2=tag2") + require.Equal(t, res[0].Fields[1].Name, "Value") + require.Equal(t, res[0].Fields[1].Config.DisplayNameFromDS, "legend Application") + + // Ensure the timestamps are UTC zoned + testValue := res[0].Fields[0].At(0) + require.Equal(t, "UTC", testValue.(time.Time).Location().String()) + }) + + t.Run("vector response should be parsed normally", func(t *testing.T) { + value := p.Vector{ + &p.Sample{ + Metric: p.Metric{"app": "Application", "tag2": "tag2"}, + Value: 1, + Timestamp: 1000, + }, + } + query := &PrometheusQuery{ + LegendFormat: "legend {{app}}", + } + res, err := parseResponse(value, query) + require.NoError(t, err) + + require.Len(t, res, 1) + require.Equal(t, res[0].Name, "legend Application") + require.Len(t, res[0].Fields, 2) + require.Len(t, res[0].Fields[0].Labels, 0) + require.Equal(t, res[0].Fields[0].Name, "Time") require.Len(t, res[0].Fields[1].Labels, 2) require.Equal(t, res[0].Fields[1].Labels.String(), "app=Application, tag2=tag2") - require.Equal(t, res[0].Fields[1].Name, "value") + require.Equal(t, res[0].Fields[1].Name, "Value") require.Equal(t, res[0].Fields[1].Config.DisplayNameFromDS, "legend Application") // Ensure the timestamps are UTC zoned testValue := res[0].Fields[0].At(0) require.Equal(t, "UTC", testValue.(time.Time).Location().String()) }) + + t.Run("scalar response should be parsed normally", func(t *testing.T) { + value := &p.Scalar{ + Value: 1, + Timestamp: 1000, + } + query := &PrometheusQuery{} + res, err := parseResponse(value, query) + require.NoError(t, err) + + require.Len(t, res, 1) + require.Equal(t, res[0].Name, "1") + require.Len(t, res[0].Fields, 2) + require.Len(t, res[0].Fields[0].Labels, 0) + require.Equal(t, res[0].Fields[0].Name, "Time") + require.Equal(t, res[0].Fields[1].Name, "Value") + require.Equal(t, res[0].Fields[1].Config.DisplayNameFromDS, "1") + + // Ensure the timestamps are UTC zoned + testValue := res[0].Fields[0].At(0) + require.Equal(t, "UTC", testValue.(time.Time).Location().String()) + }) +} + +func queryContext(json string, timeRange backend.TimeRange) *backend.QueryDataRequest { + return &backend.QueryDataRequest{ + Queries: []backend.DataQuery{ + { + JSON: []byte(json), + TimeRange: timeRange, + RefID: "A", + }, + }, + } } diff --git a/public/app/plugins/datasource/prometheus/datasource.ts b/public/app/plugins/datasource/prometheus/datasource.ts index 7af13716379..571d146aadb 100644 --- a/public/app/plugins/datasource/prometheus/datasource.ts +++ b/public/app/plugins/datasource/prometheus/datasource.ts @@ -291,13 +291,16 @@ export class PrometheusDatasource extends DataSourceWithBackend) => { const targets = options.targets.map((target) => { - // We want to format Explore + range queries as time_series + //This is currently only preparing options for Explore queries where we know the format of data we want to receive + if (target.instant) { + return { ...target, instant: true, range: false, format: 'table' }; + } return { ...target, instant: false, range: true, format: 'time_series', - offsetSec: this.timeSrv.timeRange().to.utcOffset() * 60, + utcOffsetSec: this.timeSrv.timeRange().to.utcOffset() * 60, }; }); @@ -305,12 +308,13 @@ export class PrometheusDatasource extends DataSourceWithBackend): Observable { - // WIP - currently we want to run trough backend only if all queries are explore + range queries + // WIP - currently we want to run trough backend only if all queries are explore + range/instant queries const shouldRunBackendQuery = this.access === 'proxy' && options.app === CoreApp.Explore && !options.targets.some((query) => query.exemplar) && - !options.targets.some((query) => query.instant); + // When running both queries, run through proxy + !options.targets.some((query) => query.instant && query.range); if (shouldRunBackendQuery) { const newOptions = this.prepareOptionsV2(options); diff --git a/public/app/plugins/datasource/prometheus/result_transformer.ts b/public/app/plugins/datasource/prometheus/result_transformer.ts index 931c80dc782..b609d1dd147 100644 --- a/public/app/plugins/datasource/prometheus/result_transformer.ts +++ b/public/app/plugins/datasource/prometheus/result_transformer.ts @@ -50,8 +50,9 @@ export function transformV2(response: DataQueryResponse, options: DataQueryReque ); // For table results, we need to transform data frames to table data frames + const responseLength = options.targets.filter((target) => !target.hide).length; const tableFrames = tableResults.map((dataFrame) => { - const df = transformDFoTable(dataFrame, options.targets.length); + const df = transformDFoTable(dataFrame, responseLength); return df; }); diff --git a/public/app/plugins/datasource/prometheus/types.ts b/public/app/plugins/datasource/prometheus/types.ts index 9c391287d8d..4126d1e4980 100644 --- a/public/app/plugins/datasource/prometheus/types.ts +++ b/public/app/plugins/datasource/prometheus/types.ts @@ -11,7 +11,7 @@ export interface PromQuery extends DataQuery { interval?: string; intervalFactor?: number; // Timezone offset to align start & end time on backend - offsetSec?: number; + utcOffsetSec?: number; legendFormat?: string; valueWithRefId?: boolean; requestId?: string;