SSE/Alerting: First pass at query/condition translation (#31693)

- Takes the conditions property from the settings column of an alert from alerts table and turns into an ng alerting condition with the queries and classic condition.
- Has temp API rest endpoint that will take the dashboard conditions json, translate it to SEE queries + classic condition, and execute it (only enabled in dev mode).
- Changes expressions to catch query responses with a non-nil error property
- Adds two new states for an NG instance result (NoData, Error) and updates evaluation to match those states
- Changes the AsDataFrame (for frontend) from Bool to string to represent additional states
- Fix bug in condition model to accept first Operator as empty string.
- In ngalert, adds GetQueryDataRequest, which was part of execute and is still called from there. But this allows me to get the Expression request from a condition to make the "pipeline" can be built.
- Update AsDataFrame for evalresult to be row based so it displays a little better for now
pull/32265/head
Kyle Brandt 5 years ago committed by GitHub
parent 24cb059a6b
commit 7bb79158ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      pkg/expr/classic/classic.go
  2. 6
      pkg/expr/classic/evaluator.go
  3. 4
      pkg/expr/nodes.go
  4. 354
      pkg/expr/translate/testdata/mixedSharedUnsharedTimeRange.json
  5. 63
      pkg/expr/translate/testdata/sameQueryDifferentTimeRange.json
  6. 345
      pkg/expr/translate/translate.go
  7. 146
      pkg/expr/translate/translate_test.go
  8. 107
      pkg/services/ngalert/api/api.go
  9. 81
      pkg/services/ngalert/eval/eval.go

@ -15,10 +15,10 @@ type ConditionsCmd struct {
refID string
}
// classicConditionJSON is the JSON model for a single condition.
// ClassicConditionJSON is the JSON model for a single condition.
// It is based on services/alerting/conditions/query.go's newQueryCondition().
type classicConditionJSON struct {
Evaluator conditionEvalJSON `json:"evaluator"`
type ClassicConditionJSON struct {
Evaluator ConditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
@ -34,10 +34,9 @@ type classicConditionJSON struct {
} `json:"reducer"`
}
type conditionEvalJSON struct {
type ConditionEvalJSON struct {
Params []float64 `json:"params"`
Type string `json:"type"` // e.g. "gt"
}
// condition is a single condition within the ConditionsCmd.
@ -120,7 +119,7 @@ func UnmarshalConditionsCmd(rawQuery map[string]interface{}, refID string) (*Con
if err != nil {
return nil, fmt.Errorf("failed to remarshal classic condition body: %w", err)
}
var ccj []classicConditionJSON
var ccj []ClassicConditionJSON
if err = json.Unmarshal(jsonFromM, &ccj); err != nil {
return nil, fmt.Errorf("failed to unmarshal remarshaled classic condition body: %w", err)
}
@ -132,7 +131,7 @@ func UnmarshalConditionsCmd(rawQuery map[string]interface{}, refID string) (*Con
for i, cj := range ccj {
cond := condition{}
if cj.Operator.Type != "and" && cj.Operator.Type != "or" {
if i > 0 && cj.Operator.Type != "and" && cj.Operator.Type != "or" {
return nil, fmt.Errorf("classic condition %v operator must be `and` or `or`", i+1)
}
cond.Operator = cj.Operator.Type

@ -25,7 +25,7 @@ type rangedEvaluator struct {
// newAlertEvaluator is a factory function for returning
// an AlertEvaluator depending on evaluation operator.
func newAlertEvaluator(model conditionEvalJSON) (evaluator, error) {
func newAlertEvaluator(model ConditionEvalJSON) (evaluator, error) {
switch model.Type {
case "gt", "lt":
return newThresholdEvaluator(model)
@ -54,7 +54,7 @@ func (e *thresholdEvaluator) Eval(reducedValue mathexp.Number) bool {
return false
}
func newThresholdEvaluator(model conditionEvalJSON) (*thresholdEvaluator, error) {
func newThresholdEvaluator(model ConditionEvalJSON) (*thresholdEvaluator, error) {
if len(model.Params) == 0 {
return nil, fmt.Errorf("evaluator '%v' is missing the threshold parameter", model.Type)
}
@ -69,7 +69,7 @@ func (e *noValueEvaluator) Eval(reducedValue mathexp.Number) bool {
return reducedValue.GetFloat64Value() == nil
}
func newRangedEvaluator(model conditionEvalJSON) (*rangedEvaluator, error) {
func newRangedEvaluator(model ConditionEvalJSON) (*rangedEvaluator, error) {
if len(model.Params) != 2 {
return nil, fmt.Errorf("ranged evaluator requires 2 parameters")
}

@ -234,6 +234,10 @@ func (dn *DSNode) Execute(ctx context.Context, vars mathexp.Vars, s *Service) (m
vals := make([]mathexp.Value, 0)
for refID, qr := range resp.Responses {
if qr.Error != nil {
return mathexp.Results{}, fmt.Errorf("failed to execute query %v: %w", refID, qr.Error)
}
if len(qr.Frames) == 1 {
frame := qr.Frames[0]
if frame.TimeSeriesSchema().Type == data.TimeSeriesTypeNot && isNumberTable(frame) {

@ -0,0 +1,354 @@
{
"conditions": [
{
"evaluator": {
"params": [
3
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "B",
"scenarioId": "predictable_pulse",
"stringInput": ""
},
"params": [
"B",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
},
{
"evaluator": {
"params": [
2,
5
],
"type": "within_range"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "B",
"scenarioId": "predictable_pulse",
"stringInput": ""
},
"params": [
"B",
"10m",
"now-5m"
]
},
"reducer": {
"params": [],
"type": "max"
},
"type": "query"
},
{
"evaluator": {
"params": [
6
],
"type": "gt"
},
"operator": {
"type": "or"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "A",
"scenarioId": "predictable_csv_wave",
"stringInput": ""
},
"params": [
"A",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "sum"
},
"type": "query"
},
{
"evaluator": {
"params": [
7
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "A",
"scenarioId": "predictable_csv_wave",
"stringInput": ""
},
"params": [
"A",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
},
{
"evaluator": {
"params": [],
"type": "no_value"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "C",
"scenarioId": "no_data_points",
"stringInput": ""
},
"params": [
"C",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "diff"
},
"type": "query"
},
{
"evaluator": {
"params": [
9
],
"type": "gt"
},
"operator": {
"type": "or"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 30,
"valuesCSV": "1,1,6,6,3,3"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "D",
"scenarioId": "predictable_csv_wave",
"stringInput": ""
},
"params": [
"D",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "diff_abs"
},
"type": "query"
},
{
"evaluator": {
"params": [
10
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 30,
"valuesCSV": "1,1,6,6,3,3"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "D",
"scenarioId": "predictable_csv_wave",
"stringInput": ""
},
"params": [
"D",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "percent_diff"
},
"type": "query"
},
{
"evaluator": {
"params": [
11
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 4,
"model": {
"alias": "",
"csvWave": {
"timeStep": 30,
"valuesCSV": "1,1,6,6,3,3"
},
"hide": false,
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"refId": "D",
"scenarioId": "predictable_csv_wave",
"stringInput": ""
},
"params": [
"D",
"10m",
"now"
]
},
"reducer": {
"params": [],
"type": "percent_diff_abs"
},
"type": "query"
}
]
}

@ -0,0 +1,63 @@
{
"conditions": [
{
"evaluator": {
"params": [
0
],
"type": "lt"
},
"operator": {
"type": ""
},
"query": {
"datasourceId": 2,
"model": {
"expr": "avg_over_time(sum by (instance) (up)[1h:5m])",
"interval": "",
"legendFormat": "",
"refId": "A"
},
"params": [
"A",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
},
{
"evaluator": {
"params": [
0
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"datasourceId": 2,
"model": {
"expr": "avg_over_time(sum by (instance) (up)[1h:5m])",
"interval": "",
"legendFormat": "",
"refId": "A"
},
"params": [
"A",
"10m",
"now-5m"
]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
}
]}

@ -0,0 +1,345 @@
package translate
import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/expr"
"github.com/grafana/grafana/pkg/expr/classic"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/ngalert/eval"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
)
// DashboardAlertConditions turns dashboard alerting conditions into server side expression queries and a
// classic conditions operation. A Condition from the ngalert model's package will be returned if the
// translation is successful in creating an expression that can be parsed.
// A query is created for each unique referenced query in the dashboard. Each query is considered to be unique
// based on the RefID and the Time Range. Therefore, if the same RefID has multiple time ranges in the dashboard
// condition, new RefIDs will be created.
func DashboardAlertConditions(rawDCondJSON []byte, orgID int64) (*ngmodels.Condition, error) {
oldCond := dashConditionsJSON{}
err := json.Unmarshal(rawDCondJSON, &oldCond)
if err != nil {
return nil, err
}
ngCond, err := oldCond.GetNew(orgID)
if err != nil {
return nil, err
}
backendReq, err := eval.GetQueryDataRequest(eval.AlertExecCtx{ExpressionsEnabled: true}, ngCond, time.Unix(500, 0))
if err != nil {
return nil, err
}
svc := &expr.Service{}
_, err = svc.BuildPipeline(backendReq)
if err != nil {
return nil, err
}
return ngCond, nil
}
type dashConditionsJSON struct {
Conditions []dashAlertingConditionJSON `json:"conditions"`
}
// dashAlertingConditionJSON is like classic.ClassicConditionJSON except that it
// include the model property with the query.
type dashAlertingConditionJSON struct {
Evaluator conditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
} `json:"operator"`
Query struct {
Params []string
DatasourceID int64 `json:""`
Model json.RawMessage
} `json:"query"`
Reducer struct {
// Params []interface{} `json:"params"` (Unused)
Type string `json:"type"`
}
}
type conditionEvalJSON struct {
Params []float64 `json:"params"`
Type string `json:"type"` // e.g. "gt"
}
func (dc *dashConditionsJSON) GetNew(orgID int64) (*ngmodels.Condition, error) {
refIDtoCondIdx := make(map[string][]int) // a map of original refIds to their corresponding condition index
for i, cond := range dc.Conditions {
if len(cond.Query.Params) != 3 {
return nil, fmt.Errorf("unexpected number of query parameters in cond %v, want 3 got %v", i+1, len(cond.Query.Params))
}
refID := cond.Query.Params[0]
refIDtoCondIdx[refID] = append(refIDtoCondIdx[refID], i)
}
newRefIDstoCondIdx := make(map[string][]int) // a map of the new refIds to their coresponding condition index
refIDs := make([]string, 0, len(refIDtoCondIdx)) // a unique sorted list of the original refIDs
for refID := range refIDtoCondIdx {
refIDs = append(refIDs, refID)
}
sort.Strings(refIDs)
newRefIDsToTimeRanges := make(map[string][2]string) // a map of new RefIDs to their time range string tuple representation
for _, refID := range refIDs {
condIdxes := refIDtoCondIdx[refID]
if len(condIdxes) == 1 {
// If the refID is used in only condition, keep the letter a new refID
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[0])
newRefIDsToTimeRanges[refID] = [2]string{dc.Conditions[condIdxes[0]].Query.Params[1], dc.Conditions[condIdxes[0]].Query.Params[2]}
continue
}
// track unique time ranges within the same refID
timeRangesToCondIdx := make(map[[2]string][]int) // a map of the time range tuple to the condition index
for _, idx := range condIdxes {
timeParamFrom := dc.Conditions[idx].Query.Params[1]
timeParamTo := dc.Conditions[idx].Query.Params[2]
key := [2]string{timeParamFrom, timeParamTo}
timeRangesToCondIdx[key] = append(timeRangesToCondIdx[key], idx)
}
if len(timeRangesToCondIdx) == 1 {
// if all shared time range, no need to create a new query with a new RefID
for i := range condIdxes {
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[i])
newRefIDsToTimeRanges[refID] = [2]string{dc.Conditions[condIdxes[i]].Query.Params[1], dc.Conditions[condIdxes[i]].Query.Params[2]}
}
continue
}
// This referenced query/refID has different time ranges, so new queries are needed for each unique time range.
timeRanges := make([][2]string, 0, len(timeRangesToCondIdx)) // a sorted list of unique time ranges for the query
for tr := range timeRangesToCondIdx {
timeRanges = append(timeRanges, tr)
}
sort.Slice(timeRanges, func(i, j int) bool {
switch {
case timeRanges[i][0] < timeRanges[j][0]:
return true
case timeRanges[i][0] > timeRanges[j][0]:
return false
default:
return timeRanges[i][1] < timeRanges[j][1]
}
})
for _, tr := range timeRanges {
idxes := timeRangesToCondIdx[tr]
for i := 0; i < len(idxes); i++ {
newLetter, err := getNewRefID(newRefIDstoCondIdx)
if err != nil {
return nil, err
}
newRefIDstoCondIdx[newLetter] = append(newRefIDstoCondIdx[newLetter], idxes[i])
newRefIDsToTimeRanges[newLetter] = [2]string{dc.Conditions[idxes[i]].Query.Params[1], dc.Conditions[idxes[i]].Query.Params[2]}
}
}
}
newRefIDs := make([]string, 0, len(newRefIDstoCondIdx)) // newRefIds is a sorted list of the unique refIds of new queries
for refID := range newRefIDstoCondIdx {
newRefIDs = append(newRefIDs, refID)
}
sort.Strings(newRefIDs)
ngCond := &ngmodels.Condition{}
condIdxToNewRefID := make(map[int]string) // a map of condition indices to the RefIDs of new queries
// build the new data source queries
for _, refID := range newRefIDs {
condIdxes := newRefIDstoCondIdx[refID]
for i, condIdx := range condIdxes {
condIdxToNewRefID[condIdx] = refID
if i > 0 {
// only create each unique query once
continue
}
var queryObj map[string]interface{} // copy the model
err := json.Unmarshal(dc.Conditions[condIdx].Query.Model, &queryObj)
if err != nil {
return nil, err
}
getDsInfo := &models.GetDataSourceQuery{
OrgId: orgID,
Id: dc.Conditions[condIdx].Query.DatasourceID,
}
if err := bus.Dispatch(getDsInfo); err != nil {
return nil, fmt.Errorf("could not find datasource: %w", err)
}
queryObj["datasource"] = getDsInfo.Result.Name
queryObj["datasourceUid"] = getDsInfo.Result.Uid
queryObj["refId"] = refID
encodedObj, err := json.Marshal(queryObj)
if err != nil {
return nil, err
}
rawFrom := newRefIDsToTimeRanges[refID][0]
rawTo := newRefIDsToTimeRanges[refID][1]
rTR, err := getRelativeDuration(rawFrom, rawTo)
if err != nil {
return nil, err
}
alertQuery := ngmodels.AlertQuery{
RefID: refID,
Model: encodedObj,
RelativeTimeRange: *rTR,
DatasourceUID: getDsInfo.Uid,
}
ngCond.QueriesAndExpressions = append(ngCond.QueriesAndExpressions, alertQuery)
}
}
// build the new classic condition pointing our new equivalent queries
conditions := make([]classic.ClassicConditionJSON, len(dc.Conditions))
for i, cond := range dc.Conditions {
newCond := classic.ClassicConditionJSON{}
newCond.Evaluator = classic.ConditionEvalJSON{
Type: cond.Evaluator.Type,
Params: cond.Evaluator.Params,
}
newCond.Operator.Type = cond.Operator.Type
newCond.Query.Params = append(newCond.Query.Params, condIdxToNewRefID[i])
newCond.Reducer.Type = cond.Reducer.Type
conditions[i] = newCond
}
ccRefID, err := getNewRefID(newRefIDstoCondIdx) // get refID for the classic condition
if err != nil {
return nil, err
}
ngCond.RefID = ccRefID // set the alert condition to point to the classic condition
ngCond.OrgID = orgID
exprModel := struct {
Type string `json:"type"`
RefID string `json:"refId"`
Datasource string `json:"datasource"`
Conditions []classic.ClassicConditionJSON `json:"conditions"`
}{
"classic_conditions",
ccRefID,
"__expr__",
conditions,
}
exprModelJSON, err := json.Marshal(&exprModel)
if err != nil {
return nil, err
}
ccAlertQuery := ngmodels.AlertQuery{
RefID: ccRefID,
Model: exprModelJSON,
}
ngCond.QueriesAndExpressions = append(ngCond.QueriesAndExpressions, ccAlertQuery)
for i := range ngCond.QueriesAndExpressions {
err := ngCond.QueriesAndExpressions[i].PreSave() // Set query model properties
if err != nil {
return nil, err
}
}
sort.Slice(ngCond.QueriesAndExpressions, func(i, j int) bool {
return ngCond.QueriesAndExpressions[i].RefID < ngCond.QueriesAndExpressions[j].RefID
})
return ngCond, nil
}
const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
// getNewRefID finds first capital letter in the alphabet not in use
// to use for a new RefID. It errors if it runs out of letters.
//
// TODO: Research if there is a limit. If so enforce is by
// number of queries not letters. If no limit generate more types
// of refIDs.
func getNewRefID(refIDs map[string][]int) (string, error) {
for _, r := range alpha {
sR := string(r)
if _, ok := refIDs[sR]; ok {
continue
}
return sR, nil
}
return "", fmt.Errorf("ran out of letters when creating expression")
}
// getRelativeDuration turns the alerting durations for dashboard conditions
// into a relative time range.
func getRelativeDuration(rawFrom, rawTo string) (*ngmodels.RelativeTimeRange, error) {
fromD, err := getFrom(rawFrom)
if err != nil {
return nil, err
}
toD, err := getTo(rawTo)
if err != nil {
return nil, err
}
return &ngmodels.RelativeTimeRange{
From: ngmodels.Duration(fromD),
To: ngmodels.Duration(toD),
}, nil
}
func getFrom(from string) (time.Duration, error) {
fromRaw := strings.Replace(from, "now-", "", 1)
d, err := time.ParseDuration("-" + fromRaw)
if err != nil {
return 0, err
}
return -d, err
}
func getTo(to string) (time.Duration, error) {
if to == "now" {
return 0, nil
} else if strings.HasPrefix(to, "now-") {
withoutNow := strings.Replace(to, "now-", "", 1)
d, err := time.ParseDuration("-" + withoutNow)
if err != nil {
return 0, err
}
return -d, nil
}
d, err := time.ParseDuration(to)
if err != nil {
return 0, err
}
return -d, nil
}

@ -0,0 +1,146 @@
package translate
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/expr/classic"
"github.com/grafana/grafana/pkg/models"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/stretchr/testify/require"
)
func TestDashboardAlertConditions(t *testing.T) {
registerGetDsInfoHandler()
var tests = []struct {
name string
// inputJSONFName, at least for now, is as "conditions" will appear within the alert table
// settings column JSON. Which means it has already run through the dashboard
// alerting Extractor. It is the input.
inputJSONFName string
// Condition is quite large (and unexported things), so check misc attributes.
spotCheckFn func(t *testing.T, cond *ngmodels.Condition)
}{
{
name: "two conditions one query but different time ranges",
inputJSONFName: `sameQueryDifferentTimeRange.json`,
spotCheckFn: func(t *testing.T, cond *ngmodels.Condition) {
require.Equal(t, "C", cond.RefID, "unexpected refId for condition")
require.Equal(t, 3, len(cond.QueriesAndExpressions), "unexpected query/expression array length")
firstQuery := cond.QueriesAndExpressions[0]
require.Equal(t, "A", firstQuery.RefID, "unexpected refId for first query")
require.Equal(t, ngmodels.RelativeTimeRange{
From: ngmodels.Duration(time.Second * 600),
To: ngmodels.Duration(time.Second * 300),
}, firstQuery.RelativeTimeRange, "unexpected timerange for first query")
secondQuery := cond.QueriesAndExpressions[1]
require.Equal(t, "B", secondQuery.RefID, "unexpected refId for second query")
require.Equal(t, ngmodels.RelativeTimeRange{
From: ngmodels.Duration(time.Second * 300),
To: ngmodels.Duration(0),
}, secondQuery.RelativeTimeRange, "unexpected timerange for second query")
condQuery := cond.QueriesAndExpressions[2]
require.Equal(t, "C", condQuery.RefID, "unexpected refId for second query")
isExpr, err := condQuery.IsExpression()
require.NoError(t, err)
require.Equal(t, true, isExpr, "third query should be an expression")
c := struct {
Conditions []classic.ClassicConditionJSON `json:"conditions"`
}{}
err = json.Unmarshal(condQuery.Model, &c)
require.NoError(t, err)
require.Equal(t, 2, len(c.Conditions), "expected 2 conditions in classic condition")
// This is "correct" in that the condition gets the correct time range,
// but a bit odd that it creates B then A, can look into changing that
// later.
firstCond := c.Conditions[0]
require.Equal(t, "lt", firstCond.Evaluator.Type, "expected first cond to use lt")
require.Equal(t, "B", firstCond.Query.Params[0], "expected first cond to reference B")
secondCond := c.Conditions[1]
require.Equal(t, "gt", secondCond.Evaluator.Type, "expected second cond to use gt")
require.Equal(t, "A", secondCond.Query.Params[0], "expected second cond to reference A")
},
},
{
name: "mixed shared and unshared time ranges",
inputJSONFName: `mixedSharedUnsharedTimeRange.json`,
spotCheckFn: func(t *testing.T, cond *ngmodels.Condition) {
require.Equal(t, "G", cond.RefID, "unexpected refId for condition")
require.Equal(t, 7, len(cond.QueriesAndExpressions), "unexpected query/expression array length")
condQuery := cond.QueriesAndExpressions[6]
isExpr, err := condQuery.IsExpression()
require.NoError(t, err)
require.Equal(t, true, isExpr, "expected last query to be an expression")
c := struct {
Conditions []classic.ClassicConditionJSON `json:"conditions"`
}{}
err = json.Unmarshal(condQuery.Model, &c)
require.NoError(t, err)
require.Equal(t, 8, len(c.Conditions), "expected 8 conditions in classic condition")
firstCond := c.Conditions[0]
require.Equal(t, "gt", firstCond.Evaluator.Type, "expected first cond to use gt")
require.Equal(t, "avg", firstCond.Reducer.Type, "expected first cond to use reducer avg")
firstCondRefID := firstCond.Query.Params[0]
aq, err := alertRuleByRefId(cond, firstCondRefID)
require.NoError(t, err)
require.Equal(t, ngmodels.Duration(300*time.Second), aq.RelativeTimeRange.From,
"expected first condition to reference a query with a from of 300 seconds")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
jsonFile := filepath.Join("testdata", tt.inputJSONFName)
//nolint:GOSEC
b, err := ioutil.ReadFile(jsonFile)
require.NoError(t, err)
cond, err := DashboardAlertConditions(b, 1)
require.NoError(t, err)
tt.spotCheckFn(t, cond)
})
}
}
func alertRuleByRefId(cond *ngmodels.Condition, refID string) (ngmodels.AlertQuery, error) {
for _, aq := range cond.QueriesAndExpressions {
if aq.RefID == refID {
return aq, nil
}
}
return ngmodels.AlertQuery{}, fmt.Errorf("query with refId %v not found", refID)
}
func registerGetDsInfoHandler() {
bus.AddHandler("test", func(query *models.GetDataSourceQuery) error {
switch {
case query.Id == 2:
query.Result = &models.DataSource{Id: 2, OrgId: 1, Uid: "000000002"}
case query.Id == 4:
query.Result = &models.DataSource{Id: 4, OrgId: 1, Uid: "000000004"}
default:
return fmt.Errorf("datasource not found")
}
return nil
})
}

@ -16,6 +16,8 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/expr/translate"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
@ -73,6 +75,111 @@ func (api *API) RegisterAPIEndpoints() {
api.RouteRegister.Group("/api/alert-instances", func(alertInstances routing.RouteRegister) {
alertInstances.Get("", middleware.ReqSignedIn, routing.Wrap(api.listAlertInstancesEndpoint))
})
if api.Cfg.Env == setting.Dev {
api.RouteRegister.Group("/api/alert-definitions", func(alertDefinitions routing.RouteRegister) {
alertDefinitions.Post("/evalOld", middleware.ReqSignedIn, routing.Wrap(api.conditionEvalOldEndpoint))
})
api.RouteRegister.Group("/api/alert-definitions", func(alertDefinitions routing.RouteRegister) {
alertDefinitions.Get("/evalOldByID/:id", middleware.ReqSignedIn, routing.Wrap(api.conditionEvalOldEndpointByID))
})
}
}
// conditionEvalEndpoint handles POST /api/alert-definitions/evalOld.
func (api *API) conditionEvalOldEndpoint(c *models.ReqContext) response.Response {
b, err := c.Req.Body().Bytes()
if err != nil {
response.Error(400, "failed to read body", err)
}
evalCond, err := translate.DashboardAlertConditions(b, c.OrgId)
if err != nil {
return response.Error(400, "Failed to translate alert conditions", err)
}
if err := api.validateCondition(*evalCond, c.SignedInUser, c.SkipCache); err != nil {
return response.Error(400, "invalid condition", err)
}
//now := cmd.Now
//if now.IsZero() {
//now := timeNow()
//}
evaluator := eval.Evaluator{Cfg: api.Cfg}
evalResults, err := evaluator.ConditionEval(evalCond, timeNow(), api.DataService)
if err != nil {
return response.Error(400, "Failed to evaluate conditions", err)
}
frame := evalResults.AsDataFrame()
df := plugins.NewDecodedDataFrames([]*data.Frame{&frame})
instances, err := df.Encoded()
if err != nil {
return response.Error(400, "Failed to encode result dataframes", err)
}
return response.JSON(200, util.DynMap{
"instances": instances,
})
}
// conditionEvalEndpoint handles POST /api/alert-definitions/evalOld.
func (api *API) conditionEvalOldEndpointByID(c *models.ReqContext) response.Response {
id := c.ParamsInt64("id")
if id == 0 {
return response.Error(400, "missing id", nil)
}
getAlert := &models.GetAlertByIdQuery{
Id: id,
}
if err := bus.Dispatch(getAlert); err != nil {
return response.Error(400, fmt.Sprintf("could find alert with id %v", id), err)
}
if getAlert.Result.OrgId != c.SignedInUser.OrgId {
return response.Error(403, "alert does not match organization of user", nil)
}
settings := getAlert.Result.Settings
sb, err := settings.ToDB()
if err != nil {
return response.Error(400, "failed to marshal alert settings", err)
}
evalCond, err := translate.DashboardAlertConditions(sb, c.OrgId)
if err != nil {
return response.Error(400, "Failed to translate alert conditions", err)
}
if err := api.validateCondition(*evalCond, c.SignedInUser, c.SkipCache); err != nil {
return response.Error(400, "invalid condition", err)
}
//now := cmd.Now
//if now.IsZero() {
//now := timeNow()
//}
evaluator := eval.Evaluator{Cfg: api.Cfg}
evalResults, err := evaluator.ConditionEval(evalCond, timeNow(), api.DataService)
if err != nil {
return response.Error(400, "Failed to evaluate conditions", err)
}
frame := evalResults.AsDataFrame()
df := plugins.NewDecodedDataFrames([]*data.Frame{&frame})
instances, err := df.Encoded()
if err != nil {
return response.Error(400, "Failed to encode result dataframes", err)
}
return response.JSON(200, util.DynMap{
"instances": instances,
})
}
// conditionEvalEndpoint handles POST /api/alert-definitions/eval.

@ -5,6 +5,7 @@ package eval
import (
"context"
"fmt"
"sort"
"time"
"github.com/grafana/grafana/pkg/services/ngalert/models"
@ -71,12 +72,20 @@ const (
Normal state = iota
// Alerting is the eval state for an alert instance condition
// that evaluated to false.
// that evaluated to true (Alerting).
Alerting
// NoData is the eval state for an alert rule condition
// that evaluated to NoData.
NoData
// Error is the eval state for an alert rule condition
// that evaluated to Error.
Error
)
func (s state) String() string {
return [...]string{"Normal", "Alerting"}[s]
return [...]string{"Normal", "Alerting", "NoData", "Error"}[s]
}
// AlertExecCtx is the context provided for executing an alert condition.
@ -87,9 +96,8 @@ type AlertExecCtx struct {
Ctx context.Context
}
// execute runs the Condition's expressions or queries.
func execute(ctx AlertExecCtx, c *models.Condition, now time.Time, dataService *tsdb.Service) (*ExecutionResults, error) {
result := ExecutionResults{}
// GetQueryDataRequest validates the condition and creates a backend.QueryDataRequest from it.
func GetQueryDataRequest(ctx AlertExecCtx, c *models.Condition, now time.Time) (*backend.QueryDataRequest, error) {
if !c.IsValid() {
return nil, fmt.Errorf("invalid conditions")
// TODO: Things probably
@ -127,6 +135,17 @@ func execute(ctx AlertExecCtx, c *models.Condition, now time.Time, dataService *
TimeRange: q.RelativeTimeRange.ToTimeRange(now),
})
}
return queryDataReq, nil
}
// execute runs the Condition's expressions or queries.
func execute(ctx AlertExecCtx, c *models.Condition, now time.Time, dataService *tsdb.Service) (*ExecutionResults, error) {
result := ExecutionResults{}
queryDataReq, err := GetQueryDataRequest(ctx, c, now)
if err != nil {
return &result, err
}
exprService := expr.Service{
Cfg: &setting.Cfg{ExpressionsEnabled: ctx.ExpressionsEnabled},
@ -182,9 +201,20 @@ func evaluateExecutionResult(results *ExecutionResults) (Results, error) {
}
labels[labelsStr] = true
state := Normal
val, err := f.Fields[0].FloatAt(0)
if err != nil || val != 0 {
val, ok := f.Fields[0].At(0).(*float64)
if !ok {
return nil, &invalidEvalResultFormatError{refID: f.RefID, reason: fmt.Sprintf("expected nullable float64 but got type %T", f.Fields[0].Type())}
}
var state state
switch {
case err != nil:
state = Error
case val == nil:
state = NoData
case *val == 0:
state = Normal
default:
state = Alerting
}
@ -197,15 +227,38 @@ func evaluateExecutionResult(results *ExecutionResults) (Results, error) {
}
// AsDataFrame forms the EvalResults in Frame suitable for displaying in the table panel of the front end.
// This may be temporary, as there might be a fair amount we want to display in the frontend, and it might not make sense to store that in data.Frame.
// For the first pass, I would expect a Frame with a single row, and a column for each instance with a boolean value.
// It displays one row per alert instance, with a column for each label and one for the alerting state.
func (evalResults Results) AsDataFrame() data.Frame {
fields := make([]*data.Field, 0)
fieldLen := len(evalResults)
uniqueLabelKeys := make(map[string]struct{})
for _, evalResult := range evalResults {
fields = append(fields, data.NewField("", evalResult.Instance, []bool{evalResult.State != Normal}))
for k := range evalResult.Instance {
uniqueLabelKeys[k] = struct{}{}
}
}
labelColumns := make([]string, 0, len(uniqueLabelKeys))
for k := range uniqueLabelKeys {
labelColumns = append(labelColumns, k)
}
labelColumns = sort.StringSlice(labelColumns)
frame := data.NewFrame("evaluation results")
for _, lKey := range labelColumns {
frame.Fields = append(frame.Fields, data.NewField(lKey, nil, make([]string, fieldLen)))
}
frame.Fields = append(frame.Fields, data.NewField("State", nil, make([]string, fieldLen)))
for evalIdx, evalResult := range evalResults {
for lIdx, v := range labelColumns {
frame.Set(lIdx, evalIdx, evalResult.Instance[v])
}
frame.Set(len(labelColumns), evalIdx, evalResult.State.String())
}
f := data.NewFrame("", fields...)
return *f
return *frame
}
// ConditionEval executes conditions and evaluates the result.

Loading…
Cancel
Save