mirror of https://github.com/grafana/grafana
Alerting: rule backtesting API (#57318)
* Implement backtesting engine that can process regular rule specification (with queries to datasource) as well as special kind of rules that have data frame instead of query. * declare a new API endpoint and model * add feature toggle `alertingBacktesting`pull/60163/head
parent
258696409d
commit
ad09feed83
@ -0,0 +1,158 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"net/url" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/benbjohnson/clock" |
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||||
|
"github.com/hashicorp/go-multierror" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/infra/log" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/models" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/state" |
||||||
|
"github.com/grafana/grafana/pkg/services/user" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
ErrInvalidInputData = errors.New("invalid input data") |
||||||
|
|
||||||
|
logger = log.New("ngalert.backtesting.engine") |
||||||
|
backtestingEvaluatorFactory = newBacktestingEvaluator |
||||||
|
) |
||||||
|
|
||||||
|
type callbackFunc = func(now time.Time, results eval.Results) error |
||||||
|
|
||||||
|
type backtestingEvaluator interface { |
||||||
|
Eval(ctx context.Context, from, to time.Time, interval time.Duration, callback callbackFunc) error |
||||||
|
} |
||||||
|
|
||||||
|
type stateManager interface { |
||||||
|
ProcessEvalResults(ctx context.Context, evaluatedAt time.Time, alertRule *models.AlertRule, results eval.Results, extraLabels data.Labels) []state.StateTransition |
||||||
|
} |
||||||
|
|
||||||
|
type Engine struct { |
||||||
|
evalFactory eval.EvaluatorFactory |
||||||
|
createStateManager func() stateManager |
||||||
|
} |
||||||
|
|
||||||
|
func NewEngine(appUrl *url.URL, evalFactory eval.EvaluatorFactory) *Engine { |
||||||
|
return &Engine{ |
||||||
|
evalFactory: evalFactory, |
||||||
|
createStateManager: func() stateManager { |
||||||
|
return state.NewManager(nil, appUrl, nil, &NoopImageService{}, clock.New(), nil) |
||||||
|
}, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (e *Engine) Test(ctx context.Context, user *user.SignedInUser, rule *models.AlertRule, from, to time.Time) (*data.Frame, error) { |
||||||
|
ruleCtx := models.WithRuleKey(ctx, rule.GetKey()) |
||||||
|
logger := logger.FromContext(ctx) |
||||||
|
|
||||||
|
if !from.Before(to) { |
||||||
|
return nil, fmt.Errorf("%w: invalid interval of the backtesting [%d,%d]", ErrInvalidInputData, from.Unix(), to.Unix()) |
||||||
|
} |
||||||
|
if to.Sub(from).Seconds() < float64(rule.IntervalSeconds) { |
||||||
|
return nil, fmt.Errorf("%w: interval of the backtesting [%d,%d] is less than evaluation interval [%ds]", ErrInvalidInputData, from.Unix(), to.Unix(), rule.IntervalSeconds) |
||||||
|
} |
||||||
|
length := int(to.Sub(from).Seconds()) / int(rule.IntervalSeconds) |
||||||
|
|
||||||
|
evaluator, err := backtestingEvaluatorFactory(ruleCtx, e.evalFactory, user, rule.GetEvalCondition()) |
||||||
|
if err != nil { |
||||||
|
return nil, multierror.Append(ErrInvalidInputData, err) |
||||||
|
} |
||||||
|
|
||||||
|
stateManager := e.createStateManager() |
||||||
|
|
||||||
|
logger.Info("Start testing alert rule", "from", from, "to", to, "interval", rule.IntervalSeconds, "evaluations", length) |
||||||
|
|
||||||
|
start := time.Now() |
||||||
|
|
||||||
|
tsField := data.NewField("Time", nil, make([]time.Time, length)) |
||||||
|
valueFields := make(map[string]*data.Field) |
||||||
|
|
||||||
|
err = evaluator.Eval(ruleCtx, from, to, time.Duration(rule.IntervalSeconds)*time.Second, func(currentTime time.Time, results eval.Results) error { |
||||||
|
idx := int(currentTime.Sub(from).Seconds()) / int(rule.IntervalSeconds) |
||||||
|
states := stateManager.ProcessEvalResults(ruleCtx, currentTime, rule, results, nil) |
||||||
|
tsField.Set(idx, currentTime) |
||||||
|
for _, s := range states { |
||||||
|
field, ok := valueFields[s.CacheID] |
||||||
|
if !ok { |
||||||
|
field = data.NewField("", s.Labels, make([]*string, length)) |
||||||
|
valueFields[s.CacheID] = field |
||||||
|
} |
||||||
|
if s.State.State != eval.NoData { // set nil if NoData
|
||||||
|
value := s.State.State.String() |
||||||
|
if s.StateReason != "" { |
||||||
|
value += " (" + s.StateReason + ")" |
||||||
|
} |
||||||
|
field.Set(idx, &value) |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
}) |
||||||
|
fields := make([]*data.Field, 0, len(valueFields)+1) |
||||||
|
fields = append(fields, tsField) |
||||||
|
for _, f := range valueFields { |
||||||
|
fields = append(fields, f) |
||||||
|
} |
||||||
|
result := data.NewFrame("Backtesting results", fields...) |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
logger.Info("Rule testing finished successfully", "duration", time.Since(start)) |
||||||
|
return result, nil |
||||||
|
} |
||||||
|
|
||||||
|
func newBacktestingEvaluator(ctx context.Context, evalFactory eval.EvaluatorFactory, user *user.SignedInUser, condition models.Condition) (backtestingEvaluator, error) { |
||||||
|
for _, q := range condition.Data { |
||||||
|
if q.DatasourceUID == "__data__" || q.QueryType == "__data__" { |
||||||
|
if len(condition.Data) != 1 { |
||||||
|
return nil, errors.New("data queries are not supported with other expressions or data queries") |
||||||
|
} |
||||||
|
if condition.Condition == "" { |
||||||
|
return nil, fmt.Errorf("condition must not be empty and be set to the data query %s", q.RefID) |
||||||
|
} |
||||||
|
if condition.Condition != q.RefID { |
||||||
|
return nil, fmt.Errorf("condition must be set to the data query %s", q.RefID) |
||||||
|
} |
||||||
|
model := struct { |
||||||
|
DataFrame *data.Frame `json:"data"` |
||||||
|
}{} |
||||||
|
err := json.Unmarshal(q.Model, &model) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to parse data frame: %w", err) |
||||||
|
} |
||||||
|
if model.DataFrame == nil { |
||||||
|
return nil, errors.New("the data field must not be empty") |
||||||
|
} |
||||||
|
return newDataEvaluator(condition.Condition, model.DataFrame) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
evaluator, err := evalFactory.Create(eval.EvaluationContext{Ctx: ctx, |
||||||
|
User: user, |
||||||
|
}, condition) |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &queryEvaluator{ |
||||||
|
eval: evaluator, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// NoopImageService is a no-op image service.
|
||||||
|
type NoopImageService struct{} |
||||||
|
|
||||||
|
func (s *NoopImageService) NewImage(_ context.Context, _ *models.AlertRule) (*models.Image, error) { |
||||||
|
return &models.Image{}, nil |
||||||
|
} |
@ -0,0 +1,376 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"math/rand" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||||
|
"github.com/stretchr/testify/require" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval/eval_mocks" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/models" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/state" |
||||||
|
"github.com/grafana/grafana/pkg/services/user" |
||||||
|
"github.com/grafana/grafana/pkg/util" |
||||||
|
) |
||||||
|
|
||||||
|
func TestNewBacktestingEvaluator(t *testing.T) { |
||||||
|
t.Run("creates data evaluator", func(t *testing.T) { |
||||||
|
frame := GenerateWideSeriesFrame(10, time.Second) |
||||||
|
d := struct { |
||||||
|
Data *data.Frame `json:"data"` |
||||||
|
}{ |
||||||
|
Data: frame, |
||||||
|
} |
||||||
|
validData, err := json.Marshal(d) |
||||||
|
require.NoError(t, err) |
||||||
|
refID := util.GenerateShortUID() |
||||||
|
|
||||||
|
evalFactory := eval_mocks.NewEvaluatorFactory(&eval_mocks.ConditionEvaluatorMock{}) |
||||||
|
|
||||||
|
testCases := []struct { |
||||||
|
name string |
||||||
|
condition models.Condition |
||||||
|
error bool |
||||||
|
expectedEval backtestingEvaluator |
||||||
|
}{ |
||||||
|
{ |
||||||
|
name: "creates data evaluator when there is one query with type __data__", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: refID, |
||||||
|
QueryType: "__data__", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "", |
||||||
|
Model: json.RawMessage(validData), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
expectedEval: &dataEvaluator{}, |
||||||
|
}, |
||||||
|
{ |
||||||
|
name: "creates data evaluator when there is one query with datasource UID __data__", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: refID, |
||||||
|
QueryType: "", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "__data__", |
||||||
|
Model: json.RawMessage(validData), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
expectedEval: &dataEvaluator{}, |
||||||
|
}, { |
||||||
|
name: "fails if queries contain data and other queries", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: refID, |
||||||
|
QueryType: "__data__", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "", |
||||||
|
Model: json.RawMessage(validData), |
||||||
|
}, |
||||||
|
{ |
||||||
|
RefID: "D", |
||||||
|
QueryType: "", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: util.GenerateShortUID(), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
error: true, |
||||||
|
}, |
||||||
|
{ |
||||||
|
name: "fails if data query does not contain data", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: refID, |
||||||
|
QueryType: "__data__", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "", |
||||||
|
Model: json.RawMessage(nil), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
error: true, |
||||||
|
}, |
||||||
|
{ |
||||||
|
name: "fails if data query does not contain frame in data", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: refID, |
||||||
|
QueryType: "__data__", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "", |
||||||
|
Model: json.RawMessage(`{ "data": "test"}`), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
error: true, |
||||||
|
}, { |
||||||
|
name: "fails if condition refID and data refID does not match", |
||||||
|
condition: models.Condition{ |
||||||
|
Condition: refID, |
||||||
|
Data: []models.AlertQuery{ |
||||||
|
{ |
||||||
|
RefID: "B", |
||||||
|
QueryType: "__data__", |
||||||
|
RelativeTimeRange: models.RelativeTimeRange{}, |
||||||
|
DatasourceUID: "", |
||||||
|
Model: json.RawMessage(validData), |
||||||
|
}, |
||||||
|
}, |
||||||
|
}, |
||||||
|
error: true, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
for _, testCase := range testCases { |
||||||
|
t.Run(testCase.name, func(t *testing.T) { |
||||||
|
e, err := newBacktestingEvaluator(context.Background(), evalFactory, nil, testCase.condition) |
||||||
|
if testCase.error { |
||||||
|
require.Error(t, err) |
||||||
|
return |
||||||
|
} |
||||||
|
require.NoError(t, err) |
||||||
|
require.IsType(t, &dataEvaluator{}, e) |
||||||
|
}) |
||||||
|
} |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
func TestEvaluatorTest(t *testing.T) { |
||||||
|
states := []eval.State{eval.Normal, eval.Alerting, eval.Pending} |
||||||
|
generateState := func(prefix string) *state.State { |
||||||
|
return &state.State{ |
||||||
|
CacheID: "state-" + prefix, |
||||||
|
Labels: models.GenerateAlertLabels(rand.Intn(5)+1, prefix+"-"), |
||||||
|
State: states[rand.Intn(len(states))], |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
randomResultCallback := func(now time.Time) (eval.Results, error) { |
||||||
|
return eval.GenerateResults(rand.Intn(5)+1, eval.ResultGen()), nil |
||||||
|
} |
||||||
|
evaluator := &fakeBacktestingEvaluator{ |
||||||
|
evalCallback: randomResultCallback, |
||||||
|
} |
||||||
|
manager := &fakeStateManager{} |
||||||
|
|
||||||
|
backtestingEvaluatorFactory = func(ctx context.Context, evalFactory eval.EvaluatorFactory, user *user.SignedInUser, condition models.Condition) (backtestingEvaluator, error) { |
||||||
|
return evaluator, nil |
||||||
|
} |
||||||
|
|
||||||
|
t.Cleanup(func() { |
||||||
|
backtestingEvaluatorFactory = newBacktestingEvaluator |
||||||
|
}) |
||||||
|
|
||||||
|
engine := &Engine{ |
||||||
|
evalFactory: nil, |
||||||
|
createStateManager: func() stateManager { |
||||||
|
return manager |
||||||
|
}, |
||||||
|
} |
||||||
|
rule := models.AlertRuleGen(models.WithInterval(time.Second))() |
||||||
|
ruleInterval := time.Duration(rule.IntervalSeconds) * time.Second |
||||||
|
|
||||||
|
t.Run("should return data frame in specific format", func(t *testing.T) { |
||||||
|
from := time.Unix(0, 0) |
||||||
|
to := from.Add(5 * ruleInterval) |
||||||
|
allStates := [...]eval.State{eval.Normal, eval.Alerting, eval.Pending, eval.NoData, eval.Error} |
||||||
|
|
||||||
|
var states []state.StateTransition |
||||||
|
|
||||||
|
for _, s := range allStates { |
||||||
|
states = append(states, state.StateTransition{ |
||||||
|
State: &state.State{ |
||||||
|
CacheID: "state-" + s.String(), |
||||||
|
Labels: models.GenerateAlertLabels(rand.Intn(5)+1, s.String()+"-"), |
||||||
|
State: s, |
||||||
|
StateReason: util.GenerateShortUID(), |
||||||
|
}, |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
manager.stateCallback = func(now time.Time) []state.StateTransition { |
||||||
|
return states |
||||||
|
} |
||||||
|
|
||||||
|
frame, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
|
||||||
|
require.NoError(t, err) |
||||||
|
require.Len(t, frame.Fields, len(states)+1) // +1 - timestamp
|
||||||
|
|
||||||
|
t.Run("should contain field Time", func(t *testing.T) { |
||||||
|
timestampField, _ := frame.FieldByName("Time") |
||||||
|
require.NotNil(t, timestampField, "frame does not contain field 'Time'") |
||||||
|
require.Equal(t, data.FieldTypeTime, timestampField.Type()) |
||||||
|
}) |
||||||
|
|
||||||
|
fieldByState := make(map[string]*data.Field, len(states)) |
||||||
|
|
||||||
|
t.Run("should contain a field per state", func(t *testing.T) { |
||||||
|
for _, s := range states { |
||||||
|
var f *data.Field |
||||||
|
for _, field := range frame.Fields { |
||||||
|
if field.Labels.String() == s.Labels.String() { |
||||||
|
f = field |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
require.NotNilf(t, f, "Cannot find a field by state labels") |
||||||
|
fieldByState[s.CacheID] = f |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should be populated with correct values", func(t *testing.T) { |
||||||
|
timestampField, _ := frame.FieldByName("Time") |
||||||
|
expectedLength := timestampField.Len() |
||||||
|
for _, field := range frame.Fields { |
||||||
|
require.Equalf(t, expectedLength, field.Len(), "Field %s should have the size %d", field.Name, expectedLength) |
||||||
|
} |
||||||
|
for i := 0; i < expectedLength; i++ { |
||||||
|
expectedTime := from.Add(time.Duration(int64(i)*rule.IntervalSeconds) * time.Second) |
||||||
|
require.Equal(t, expectedTime, timestampField.At(i).(time.Time)) |
||||||
|
for _, s := range states { |
||||||
|
f := fieldByState[s.CacheID] |
||||||
|
if s.State.State == eval.NoData { |
||||||
|
require.Nil(t, f.At(i)) |
||||||
|
} else { |
||||||
|
v := f.At(i).(*string) |
||||||
|
require.NotNilf(t, v, "Field [%s] value at index %d should not be nil", s.CacheID, i) |
||||||
|
require.Equal(t, fmt.Sprintf("%s (%s)", s.State.State, s.StateReason), *v) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should backfill field with nulls if a new dimension created in the middle", func(t *testing.T) { |
||||||
|
from := time.Unix(0, 0) |
||||||
|
|
||||||
|
state1 := state.StateTransition{ |
||||||
|
State: generateState("1"), |
||||||
|
} |
||||||
|
state2 := state.StateTransition{ |
||||||
|
State: generateState("2"), |
||||||
|
} |
||||||
|
state3 := state.StateTransition{ |
||||||
|
State: generateState("3"), |
||||||
|
} |
||||||
|
stateByTime := map[time.Time][]state.StateTransition{ |
||||||
|
from: {state1, state2}, |
||||||
|
from.Add(1 * ruleInterval): {state1, state2}, |
||||||
|
from.Add(2 * ruleInterval): {state1, state2}, |
||||||
|
from.Add(3 * ruleInterval): {state1, state2, state3}, |
||||||
|
from.Add(4 * ruleInterval): {state1, state2, state3}, |
||||||
|
} |
||||||
|
to := from.Add(time.Duration(len(stateByTime)) * ruleInterval) |
||||||
|
|
||||||
|
manager.stateCallback = func(now time.Time) []state.StateTransition { |
||||||
|
return stateByTime[now] |
||||||
|
} |
||||||
|
|
||||||
|
frame, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
require.NoError(t, err) |
||||||
|
|
||||||
|
var field3 *data.Field |
||||||
|
for _, field := range frame.Fields { |
||||||
|
if field.Labels.String() == state3.Labels.String() { |
||||||
|
field3 = field |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
require.NotNilf(t, field3, "Result for state 3 was not found") |
||||||
|
require.Equalf(t, len(stateByTime), field3.Len(), "State3 result has unexpected number of values") |
||||||
|
|
||||||
|
idx := 0 |
||||||
|
for curTime, states := range stateByTime { |
||||||
|
value := field3.At(idx).(*string) |
||||||
|
if len(states) == 2 { |
||||||
|
require.Nilf(t, value, "The result should be nil if state3 was not available for time %v", curTime) |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should fail", func(t *testing.T) { |
||||||
|
manager.stateCallback = func(now time.Time) []state.StateTransition { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
t.Run("when interval is not correct", func(t *testing.T) { |
||||||
|
from := time.Now() |
||||||
|
t.Run("when from=to", func(t *testing.T) { |
||||||
|
to := from |
||||||
|
_, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
require.ErrorIs(t, err, ErrInvalidInputData) |
||||||
|
}) |
||||||
|
t.Run("when from > to", func(t *testing.T) { |
||||||
|
to := from.Add(-ruleInterval) |
||||||
|
_, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
require.ErrorIs(t, err, ErrInvalidInputData) |
||||||
|
}) |
||||||
|
t.Run("when to-from < interval", func(t *testing.T) { |
||||||
|
to := from.Add(ruleInterval).Add(-time.Millisecond) |
||||||
|
_, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
require.ErrorIs(t, err, ErrInvalidInputData) |
||||||
|
}) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("when evalution fails", func(t *testing.T) { |
||||||
|
expectedError := errors.New("test-error") |
||||||
|
evaluator.evalCallback = func(now time.Time) (eval.Results, error) { |
||||||
|
return nil, expectedError |
||||||
|
} |
||||||
|
from := time.Now() |
||||||
|
to := from.Add(ruleInterval) |
||||||
|
_, err := engine.Test(context.Background(), nil, rule, from, to) |
||||||
|
require.ErrorIs(t, err, expectedError) |
||||||
|
}) |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
type fakeStateManager struct { |
||||||
|
stateCallback func(now time.Time) []state.StateTransition |
||||||
|
} |
||||||
|
|
||||||
|
func (f *fakeStateManager) ProcessEvalResults(_ context.Context, evaluatedAt time.Time, _ *models.AlertRule, _ eval.Results, _ data.Labels) []state.StateTransition { |
||||||
|
return f.stateCallback(evaluatedAt) |
||||||
|
} |
||||||
|
|
||||||
|
type fakeBacktestingEvaluator struct { |
||||||
|
evalCallback func(now time.Time) (eval.Results, error) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *fakeBacktestingEvaluator) Eval(_ context.Context, from, to time.Time, interval time.Duration, callback callbackFunc) error { |
||||||
|
idx := 0 |
||||||
|
for now := from; now.Before(to); now = now.Add(interval) { |
||||||
|
results, err := f.evalCallback(now) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
err = callback(now, results) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
idx++ |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,96 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"errors" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/expr" |
||||||
|
"github.com/grafana/grafana/pkg/expr/mathexp" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
) |
||||||
|
|
||||||
|
// DataEvaluator is evaluator that evaluates data
|
||||||
|
type dataEvaluator struct { |
||||||
|
refID string |
||||||
|
data []mathexp.Series |
||||||
|
downsampleFunction string |
||||||
|
upsampleFunction string |
||||||
|
} |
||||||
|
|
||||||
|
func newDataEvaluator(refID string, frame *data.Frame) (*dataEvaluator, error) { |
||||||
|
series, err := expr.WideToMany(frame) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
for _, s := range series { |
||||||
|
s.SortByTime(false) |
||||||
|
} |
||||||
|
|
||||||
|
return &dataEvaluator{ |
||||||
|
refID: refID, |
||||||
|
data: series, |
||||||
|
downsampleFunction: "last", |
||||||
|
upsampleFunction: "pad", |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (d *dataEvaluator) Eval(_ context.Context, from, to time.Time, interval time.Duration, callback callbackFunc) error { |
||||||
|
var resampled = make([]mathexp.Series, 0, len(d.data)) |
||||||
|
|
||||||
|
iterations := 0 |
||||||
|
for _, s := range d.data { |
||||||
|
// making sure the input data frame is aligned with the interval
|
||||||
|
r, err := s.Resample(d.refID, interval, d.downsampleFunction, d.upsampleFunction, from, to.Add(-interval)) // we want to query [from,to)
|
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
resampled = append(resampled, r) |
||||||
|
iterations = r.Len() |
||||||
|
} |
||||||
|
|
||||||
|
for i := 0; i < iterations; i++ { |
||||||
|
result := make([]eval.Result, 0, len(resampled)) |
||||||
|
var now time.Time |
||||||
|
for _, series := range resampled { |
||||||
|
snow := series.GetTime(i) |
||||||
|
if !now.IsZero() && now != snow { // this should not happen because all series' belong to a single data frame
|
||||||
|
return errors.New("failed to resample input data. timestamps are not aligned") |
||||||
|
} |
||||||
|
now = snow |
||||||
|
value := series.GetValue(i) |
||||||
|
var state = eval.Normal |
||||||
|
if value == nil { |
||||||
|
continue |
||||||
|
} else if *value != 0 { |
||||||
|
state = eval.Alerting |
||||||
|
} |
||||||
|
result = append(result, eval.Result{ |
||||||
|
Instance: series.GetLabels(), |
||||||
|
State: state, |
||||||
|
Results: nil, |
||||||
|
Values: map[string]eval.NumberValueCapture{ |
||||||
|
d.refID: { |
||||||
|
Var: d.refID, |
||||||
|
Labels: series.GetLabels(), |
||||||
|
Value: value, |
||||||
|
}, |
||||||
|
}, |
||||||
|
EvaluatedAt: now, |
||||||
|
}) |
||||||
|
} |
||||||
|
if len(result) == 0 { |
||||||
|
result = append(result, eval.Result{ |
||||||
|
State: eval.NoData, |
||||||
|
EvaluatedAt: now, |
||||||
|
}) |
||||||
|
} |
||||||
|
err := callback(now, result) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,295 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"math/rand" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||||
|
"github.com/stretchr/testify/assert" |
||||||
|
"github.com/stretchr/testify/require" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/models" |
||||||
|
"github.com/grafana/grafana/pkg/util" |
||||||
|
) |
||||||
|
|
||||||
|
func GenerateWideSeriesFrame(size int, resolution time.Duration) *data.Frame { |
||||||
|
fields := make(data.Fields, 0, rand.Intn(4)+2) |
||||||
|
fields = append(fields, data.NewField("time", nil, make([]time.Time, size))) |
||||||
|
for i := 1; i < cap(fields); i++ { |
||||||
|
name := fmt.Sprintf("values-%d", i) |
||||||
|
fields = append(fields, data.NewField(name, models.GenerateAlertLabels(rand.Intn(4)+1, name), make([]int64, size))) |
||||||
|
} |
||||||
|
frame := data.NewFrame("test", fields...) |
||||||
|
|
||||||
|
tmili := time.Now().UnixMilli() |
||||||
|
tmili = tmili - tmili%resolution.Milliseconds() |
||||||
|
current := time.UnixMilli(tmili).Add(-resolution * time.Duration(size)) |
||||||
|
for i := 0; i < size; i++ { |
||||||
|
vals := make([]interface{}, 0, len(frame.Fields)) |
||||||
|
vals = append(vals, current) |
||||||
|
for i := 1; i < cap(vals); i++ { |
||||||
|
vals = append(vals, rand.Int63n(2)-1) // random value [-1,1]
|
||||||
|
} |
||||||
|
frame.SetRow(i, vals...) |
||||||
|
current = current.Add(resolution) |
||||||
|
} |
||||||
|
return frame |
||||||
|
} |
||||||
|
|
||||||
|
func TestDataEvaluator_New(t *testing.T) { |
||||||
|
t.Run("should fail if frame is not TimeSeriesTypeWide", func(t *testing.T) { |
||||||
|
t.Run("but TimeSeriesTypeNot", func(t *testing.T) { |
||||||
|
frameTimeSeriesTypeNot := data.NewFrame("test") |
||||||
|
require.Equal(t, data.TimeSeriesTypeNot, frameTimeSeriesTypeNot.TimeSeriesSchema().Type) |
||||||
|
_, err := newDataEvaluator(util.GenerateShortUID(), frameTimeSeriesTypeNot) |
||||||
|
require.Error(t, err) |
||||||
|
}) |
||||||
|
t.Run("but TimeSeriesTypeLong", func(t *testing.T) { |
||||||
|
frameTimeSeriesTypeLong := data.NewFrame("test", data.NewField("time", nil, make([]time.Time, 0)), data.NewField("data", nil, make([]string, 0)), data.NewField("value", nil, make([]int64, 0))) |
||||||
|
require.Equal(t, data.TimeSeriesTypeLong, frameTimeSeriesTypeLong.TimeSeriesSchema().Type) |
||||||
|
_, err := newDataEvaluator(util.GenerateShortUID(), frameTimeSeriesTypeLong) |
||||||
|
require.Error(t, err) |
||||||
|
}) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should convert fame to series and sort it", func(t *testing.T) { |
||||||
|
refID := util.GenerateShortUID() |
||||||
|
frameSize := rand.Intn(100) + 100 |
||||||
|
frame := GenerateWideSeriesFrame(frameSize, time.Second) |
||||||
|
rand.Shuffle(frameSize, func(i, j int) { |
||||||
|
rowi := frame.RowCopy(i) |
||||||
|
rowj := frame.RowCopy(j) |
||||||
|
frame.SetRow(i, rowj...) |
||||||
|
frame.SetRow(j, rowi...) |
||||||
|
}) |
||||||
|
e, err := newDataEvaluator(refID, frame) |
||||||
|
require.NoError(t, err) |
||||||
|
require.Equal(t, refID, e.refID) |
||||||
|
require.Len(t, e.data, len(frame.Fields)-1) // timestamp is not counting
|
||||||
|
for idx, series := range e.data { |
||||||
|
assert.Equalf(t, series.Len(), frameSize, "Length of the series %d is %d but expected to be %d", idx, series.Len(), frameSize) |
||||||
|
assert.Equalf(t, frame.Fields[idx+1].Labels, series.GetLabels(), "Labels of series %d does not match with original field labels", idx) |
||||||
|
assert.Lessf(t, series.GetTime(0), series.GetTime(1), "Series %d is expected to be sorted in ascending order", idx) |
||||||
|
} |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
func TestDataEvaluator_Eval(t *testing.T) { |
||||||
|
type results struct { |
||||||
|
time time.Time |
||||||
|
results eval.Results |
||||||
|
} |
||||||
|
|
||||||
|
refID := util.GenerateShortUID() |
||||||
|
frameSize := rand.Intn(100) + 100 |
||||||
|
frame := GenerateWideSeriesFrame(frameSize, time.Second) |
||||||
|
from := frame.At(0, 0).(time.Time) |
||||||
|
to := frame.At(0, frame.Rows()-1).(time.Time) |
||||||
|
evaluator, err := newDataEvaluator(refID, frame) |
||||||
|
require.NoErrorf(t, err, "Frame %v", frame) |
||||||
|
|
||||||
|
t.Run("should use data points when frame resolution matches evaluation interval", func(t *testing.T) { |
||||||
|
r := make([]results, 0, frame.Rows()) |
||||||
|
|
||||||
|
invterval := time.Second |
||||||
|
|
||||||
|
resultsCount := int(to.Sub(from).Seconds() / invterval.Seconds()) |
||||||
|
|
||||||
|
err = evaluator.Eval(context.Background(), from, to, time.Second, func(now time.Time, res eval.Results) error { |
||||||
|
r = append(r, results{ |
||||||
|
now, res, |
||||||
|
}) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
require.NoError(t, err) |
||||||
|
|
||||||
|
require.Len(t, r, resultsCount) |
||||||
|
|
||||||
|
t.Run("results should be in the same refID", func(t *testing.T) { |
||||||
|
for _, res := range r { |
||||||
|
for _, result := range res.results { |
||||||
|
require.Contains(t, result.Values, refID) |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should be Alerting if value is not 0", func(t *testing.T) { |
||||||
|
for _, res := range r { |
||||||
|
for _, result := range res.results { |
||||||
|
v := result.Values[refID].Value |
||||||
|
require.NotNil(t, v) |
||||||
|
if *v == 0 { |
||||||
|
require.Equalf(t, eval.Normal, result.State, "Result value is %d", *v) |
||||||
|
} else { |
||||||
|
require.Equalf(t, eval.Alerting, result.State, "Result value is %d", *v) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("results should be in ascending order", func(t *testing.T) { |
||||||
|
var prev = results{} |
||||||
|
for i := 0; i < len(r); i++ { |
||||||
|
current := r[i] |
||||||
|
if i > 0 { |
||||||
|
require.Less(t, prev.time, current.time) |
||||||
|
} else { |
||||||
|
require.Equal(t, from, current.time) |
||||||
|
} |
||||||
|
prev = current |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("results should be in the same order as fields in frame", func(t *testing.T) { |
||||||
|
for i := 0; i < len(r); i++ { |
||||||
|
current := r[i] |
||||||
|
for idx, result := range current.results { |
||||||
|
field := frame.Fields[idx+1] |
||||||
|
require.Equal(t, field.Labels, result.Instance) |
||||||
|
expected, err := field.FloatAt(i) |
||||||
|
require.NoError(t, err) |
||||||
|
require.EqualValues(t, expected, *result.Values[refID].Value) |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
}) |
||||||
|
t.Run("when frame resolution does not match evaluation interval", func(t *testing.T) { |
||||||
|
t.Run("should closest timestamp if interval is smaller than frame resolution", func(t *testing.T) { |
||||||
|
interval := 300 * time.Millisecond |
||||||
|
size := to.Sub(from).Milliseconds() / interval.Milliseconds() |
||||||
|
r := make([]results, 0, size) |
||||||
|
|
||||||
|
err = evaluator.Eval(context.Background(), from, to, interval, func(now time.Time, res eval.Results) error { |
||||||
|
r = append(r, results{ |
||||||
|
now, res, |
||||||
|
}) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
|
||||||
|
currentRowIdx := 0 |
||||||
|
nextTime := frame.At(0, currentRowIdx+1).(time.Time) |
||||||
|
for id, current := range r { |
||||||
|
if !current.time.Before(nextTime) { |
||||||
|
currentRowIdx++ |
||||||
|
if frame.Rows() > currentRowIdx+1 { |
||||||
|
nextTime = frame.At(0, currentRowIdx+1).(time.Time) |
||||||
|
} |
||||||
|
} |
||||||
|
for idx, result := range current.results { |
||||||
|
field := frame.Fields[idx+1] |
||||||
|
require.Equal(t, field.Labels, result.Instance) |
||||||
|
expected, err := field.FloatAt(currentRowIdx) |
||||||
|
require.NoError(t, err) |
||||||
|
require.EqualValuesf(t, expected, *result.Values[refID].Value, "Time %d", id) |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should downscale series if interval is smaller using previous value", func(t *testing.T) { |
||||||
|
interval := 5 * time.Second |
||||||
|
size := int(to.Sub(from).Seconds() / interval.Seconds()) |
||||||
|
r := make([]results, 0, size) |
||||||
|
|
||||||
|
err = evaluator.Eval(context.Background(), from, to, interval, func(now time.Time, res eval.Results) error { |
||||||
|
r = append(r, results{ |
||||||
|
now, res, |
||||||
|
}) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
|
||||||
|
currentRowIdx := 0 |
||||||
|
var frameDate time.Time |
||||||
|
for resultNum, current := range r { |
||||||
|
for i := currentRowIdx; i < frame.Rows(); i++ { |
||||||
|
d := frame.At(0, i).(time.Time) |
||||||
|
if d.Equal(current.time) { |
||||||
|
currentRowIdx = i |
||||||
|
frameDate = d |
||||||
|
break |
||||||
|
} |
||||||
|
if d.After(current.time) { |
||||||
|
require.Fail(t, "Interval is not aligned") |
||||||
|
} |
||||||
|
} |
||||||
|
for idx, result := range current.results { |
||||||
|
field := frame.Fields[idx+1] |
||||||
|
require.Equal(t, field.Labels, result.Instance) |
||||||
|
expected, err := field.FloatAt(currentRowIdx) |
||||||
|
require.NoError(t, err) |
||||||
|
require.EqualValuesf(t, expected, *result.Values[refID].Value, "Current time [%v] frame time [%v]. Result #%d", current.time, frameDate, resultNum) |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
}) |
||||||
|
t.Run("when eval interval is larger than data", func(t *testing.T) { |
||||||
|
t.Run("should be noData until the frame interval", func(t *testing.T) { |
||||||
|
newFrom := from.Add(-10 * time.Second) |
||||||
|
r := make([]results, 0, int(to.Sub(newFrom).Seconds())) |
||||||
|
err = evaluator.Eval(context.Background(), newFrom, to, time.Second, func(now time.Time, res eval.Results) error { |
||||||
|
r = append(r, results{ |
||||||
|
now, res, |
||||||
|
}) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
|
||||||
|
rowIdx := 0 |
||||||
|
for _, current := range r { |
||||||
|
if current.time.Before(from) { |
||||||
|
require.Len(t, current.results, 1) |
||||||
|
require.Equal(t, eval.NoData, current.results[0].State) |
||||||
|
} else { |
||||||
|
for idx, result := range current.results { |
||||||
|
field := frame.Fields[idx+1] |
||||||
|
require.Equal(t, field.Labels, result.Instance) |
||||||
|
expected, err := field.FloatAt(rowIdx) |
||||||
|
require.NoError(t, err) |
||||||
|
require.EqualValues(t, expected, *result.Values[refID].Value) |
||||||
|
} |
||||||
|
rowIdx++ |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should be the last value after the frame interval", func(t *testing.T) { |
||||||
|
newTo := to.Add(10 * time.Second) |
||||||
|
r := make([]results, 0, int(newTo.Sub(from).Seconds())) |
||||||
|
err = evaluator.Eval(context.Background(), from, newTo, time.Second, func(now time.Time, res eval.Results) error { |
||||||
|
r = append(r, results{ |
||||||
|
now, res, |
||||||
|
}) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
|
||||||
|
rowIdx := 0 |
||||||
|
for _, current := range r { |
||||||
|
for idx, result := range current.results { |
||||||
|
field := frame.Fields[idx+1] |
||||||
|
require.Equal(t, field.Labels, result.Instance) |
||||||
|
expected, err := field.FloatAt(rowIdx) |
||||||
|
require.NoError(t, err) |
||||||
|
require.EqualValues(t, expected, *result.Values[refID].Value) |
||||||
|
} |
||||||
|
if current.time.Before(to) { |
||||||
|
rowIdx++ |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
}) |
||||||
|
t.Run("should stop if callback error", func(t *testing.T) { |
||||||
|
expectedError := errors.New("error") |
||||||
|
evals := 0 |
||||||
|
err = evaluator.Eval(context.Background(), from, to, time.Second, func(now time.Time, res eval.Results) error { |
||||||
|
if evals > 5 { |
||||||
|
return expectedError |
||||||
|
} |
||||||
|
evals++ |
||||||
|
return nil |
||||||
|
}) |
||||||
|
require.ErrorIs(t, err, expectedError) |
||||||
|
}) |
||||||
|
} |
@ -0,0 +1,27 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
) |
||||||
|
|
||||||
|
// QueryEvaluator is evaluator of regular alert rule queries
|
||||||
|
type queryEvaluator struct { |
||||||
|
eval eval.ConditionEvaluator |
||||||
|
} |
||||||
|
|
||||||
|
func (d *queryEvaluator) Eval(ctx context.Context, from, to time.Time, interval time.Duration, callback callbackFunc) error { |
||||||
|
for now := from; now.Before(to); now = now.Add(interval) { |
||||||
|
results, err := d.eval.Evaluate(ctx, now) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
err = callback(now, results) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,89 @@ |
|||||||
|
package backtesting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"errors" |
||||||
|
"math/rand" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock" |
||||||
|
"github.com/stretchr/testify/require" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||||
|
"github.com/grafana/grafana/pkg/services/ngalert/eval/eval_mocks" |
||||||
|
) |
||||||
|
|
||||||
|
func TestQueryEvaluator_Eval(t *testing.T) { |
||||||
|
ctx := context.Background() |
||||||
|
interval := time.Duration(rand.Int63n(9)+1) * time.Second |
||||||
|
times := rand.Intn(11) + 5 |
||||||
|
to := time.Now() |
||||||
|
from := to.Add(-time.Duration(times) * interval) |
||||||
|
|
||||||
|
t.Run("should evaluate query", func(t *testing.T) { |
||||||
|
m := &eval_mocks.ConditionEvaluatorMock{} |
||||||
|
expectedResults := eval.Results{} |
||||||
|
m.EXPECT().Evaluate(mock.Anything, mock.Anything).Return(expectedResults, nil) |
||||||
|
evaluator := queryEvaluator{ |
||||||
|
eval: m, |
||||||
|
} |
||||||
|
|
||||||
|
intervals := make([]time.Time, 0, times) |
||||||
|
|
||||||
|
err := evaluator.Eval(ctx, from, to, interval, func(now time.Time, results eval.Results) error { |
||||||
|
intervals = append(intervals, now) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
require.NoError(t, err) |
||||||
|
require.Len(t, intervals, times) |
||||||
|
|
||||||
|
m.AssertNumberOfCalls(t, "Evaluate", times) |
||||||
|
for _, now := range intervals { |
||||||
|
m.AssertCalled(t, "Evaluate", ctx, now) |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("should stop evaluation if error", func(t *testing.T) { |
||||||
|
t.Run("when evaluation fails", func(t *testing.T) { |
||||||
|
m := &eval_mocks.ConditionEvaluatorMock{} |
||||||
|
expectedResults := eval.Results{} |
||||||
|
expectedError := errors.New("test") |
||||||
|
m.EXPECT().Evaluate(mock.Anything, mock.Anything).Return(expectedResults, nil).Times(3) |
||||||
|
m.EXPECT().Evaluate(mock.Anything, mock.Anything).Return(nil, expectedError).Once() |
||||||
|
evaluator := queryEvaluator{ |
||||||
|
eval: m, |
||||||
|
} |
||||||
|
|
||||||
|
intervals := make([]time.Time, 0, times) |
||||||
|
|
||||||
|
err := evaluator.Eval(ctx, from, to, interval, func(now time.Time, results eval.Results) error { |
||||||
|
intervals = append(intervals, now) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
require.ErrorIs(t, err, expectedError) |
||||||
|
require.Len(t, intervals, 3) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("when callback fails", func(t *testing.T) { |
||||||
|
m := &eval_mocks.ConditionEvaluatorMock{} |
||||||
|
expectedResults := eval.Results{} |
||||||
|
expectedError := errors.New("test") |
||||||
|
m.EXPECT().Evaluate(mock.Anything, mock.Anything).Return(expectedResults, nil) |
||||||
|
evaluator := queryEvaluator{ |
||||||
|
eval: m, |
||||||
|
} |
||||||
|
|
||||||
|
intervals := make([]time.Time, 0, times) |
||||||
|
|
||||||
|
err := evaluator.Eval(ctx, from, to, interval, func(now time.Time, results eval.Results) error { |
||||||
|
if len(intervals) > 3 { |
||||||
|
return expectedError |
||||||
|
} |
||||||
|
intervals = append(intervals, now) |
||||||
|
return nil |
||||||
|
}) |
||||||
|
require.ErrorIs(t, err, expectedError) |
||||||
|
}) |
||||||
|
}) |
||||||
|
} |
@ -0,0 +1,306 @@ |
|||||||
|
{ |
||||||
|
"data": { |
||||||
|
"from": "2022-10-19T18:44:00Z", |
||||||
|
"to": "2022-10-19T19:44:00Z", |
||||||
|
"interval": "1m", |
||||||
|
"for": "0", |
||||||
|
"labels": { |
||||||
|
"templatable-label": "test" |
||||||
|
}, |
||||||
|
"annotations": { |
||||||
|
"anno-test": "test" |
||||||
|
}, |
||||||
|
"condition": "A", |
||||||
|
"no_data_state": "Alerting", |
||||||
|
"data": [ |
||||||
|
{ |
||||||
|
"refId": "A", |
||||||
|
"queryType": "", |
||||||
|
"datasourceUid": "__data__", |
||||||
|
"model": { |
||||||
|
"data": { |
||||||
|
"schema": { |
||||||
|
"name": "A-series", |
||||||
|
"refId": "A", |
||||||
|
"fields": [ |
||||||
|
{ |
||||||
|
"name": "Time", |
||||||
|
"type": "time", |
||||||
|
"typeInfo": { |
||||||
|
"frame": "time.Time", |
||||||
|
"nullable": true |
||||||
|
} |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "A-series", |
||||||
|
"type": "number", |
||||||
|
"typeInfo": { |
||||||
|
"frame": "float64", |
||||||
|
"nullable": true |
||||||
|
}, |
||||||
|
"labels": { |
||||||
|
"label": "2", |
||||||
|
"test": "1" |
||||||
|
} |
||||||
|
} |
||||||
|
] |
||||||
|
}, |
||||||
|
"data": { |
||||||
|
"values": [ |
||||||
|
[ |
||||||
|
1666205040000, |
||||||
|
1666205100000, |
||||||
|
1666205160000, |
||||||
|
1666205220000, |
||||||
|
1666205280000, |
||||||
|
1666205340000, |
||||||
|
1666205400000, |
||||||
|
1666205460000, |
||||||
|
1666205520000, |
||||||
|
1666205580000, |
||||||
|
1666205640000, |
||||||
|
1666205700000, |
||||||
|
1666205760000, |
||||||
|
1666205820000, |
||||||
|
1666205880000, |
||||||
|
1666205940000, |
||||||
|
1666206000000, |
||||||
|
1666206060000, |
||||||
|
1666206120000, |
||||||
|
1666206180000, |
||||||
|
1666206240000, |
||||||
|
1666206300000, |
||||||
|
1666206360000, |
||||||
|
1666206420000, |
||||||
|
1666206480000, |
||||||
|
1666206540000, |
||||||
|
1666206600000, |
||||||
|
1666206660000, |
||||||
|
1666206720000, |
||||||
|
1666206780000, |
||||||
|
1666206840000, |
||||||
|
1666206900000, |
||||||
|
1666206960000, |
||||||
|
1666207020000, |
||||||
|
1666207080000, |
||||||
|
1666207140000, |
||||||
|
1666207200000, |
||||||
|
1666207260000, |
||||||
|
1666207320000, |
||||||
|
1666207380000, |
||||||
|
1666207440000, |
||||||
|
1666207500000, |
||||||
|
1666207560000, |
||||||
|
1666207620000, |
||||||
|
1666207680000, |
||||||
|
1666207740000, |
||||||
|
1666207800000, |
||||||
|
1666207860000, |
||||||
|
1666207920000, |
||||||
|
1666207980000, |
||||||
|
1666208040000, |
||||||
|
1666208100000, |
||||||
|
1666208160000, |
||||||
|
1666208220000, |
||||||
|
1666208280000, |
||||||
|
1666208340000, |
||||||
|
1666208400000, |
||||||
|
1666208460000, |
||||||
|
1666208520000, |
||||||
|
1666208580000, |
||||||
|
1666208640000 |
||||||
|
], |
||||||
|
[ |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
1, |
||||||
|
1, |
||||||
|
1, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
1, |
||||||
|
1, |
||||||
|
1, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
1, |
||||||
|
1, |
||||||
|
1, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
1, |
||||||
|
1, |
||||||
|
1, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0, |
||||||
|
0 |
||||||
|
] |
||||||
|
] |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
] |
||||||
|
}, |
||||||
|
"query": { |
||||||
|
"from": "2022-10-19T18:44:00Z", |
||||||
|
"to": "2022-10-19T19:44:00Z", |
||||||
|
"interval": "1m", |
||||||
|
"for": "5m", |
||||||
|
"labels": { |
||||||
|
"templatable-label": "TMPL-{{.Labels.state}}" |
||||||
|
}, |
||||||
|
"annotations": { |
||||||
|
"anno-test": "TMPL-ANNO-{{.Labels.state}}" |
||||||
|
}, |
||||||
|
"condition": "C", |
||||||
|
"no_data_state": "Alerting", |
||||||
|
"data": [ |
||||||
|
{ |
||||||
|
"refId": "A", |
||||||
|
"datasourceUid": "testdata", |
||||||
|
"queryType": "", |
||||||
|
"relativeTimeRange": { |
||||||
|
"from": 600, |
||||||
|
"to": 0 |
||||||
|
}, |
||||||
|
"model": { |
||||||
|
"refId": "A", |
||||||
|
"hide": false, |
||||||
|
"scenarioId": "usa", |
||||||
|
"usa": { |
||||||
|
"mode": "timeseries", |
||||||
|
"period": "1m", |
||||||
|
"states": [ |
||||||
|
"GA", "FL", "AL", "AZ" |
||||||
|
], |
||||||
|
"fields": [ |
||||||
|
"baz" |
||||||
|
] |
||||||
|
} |
||||||
|
} |
||||||
|
}, |
||||||
|
{ |
||||||
|
"refId": "B", |
||||||
|
"datasourceUid": "-100", |
||||||
|
"queryType": "", |
||||||
|
"model": { |
||||||
|
"refId": "B", |
||||||
|
"hide": false, |
||||||
|
"type": "reduce", |
||||||
|
"datasource": { |
||||||
|
"uid": "-100", |
||||||
|
"type": "__expr__" |
||||||
|
}, |
||||||
|
"conditions": [ |
||||||
|
{ |
||||||
|
"type": "query", |
||||||
|
"evaluator": { |
||||||
|
"params": [], |
||||||
|
"type": "gt" |
||||||
|
}, |
||||||
|
"operator": { |
||||||
|
"type": "and" |
||||||
|
}, |
||||||
|
"query": { |
||||||
|
"params": [ |
||||||
|
"B" |
||||||
|
] |
||||||
|
}, |
||||||
|
"reducer": { |
||||||
|
"params": [], |
||||||
|
"type": "last" |
||||||
|
} |
||||||
|
} |
||||||
|
], |
||||||
|
"reducer": "last", |
||||||
|
"expression": "A" |
||||||
|
}, |
||||||
|
"relativeTimeRange": { |
||||||
|
"from": 600, |
||||||
|
"to": 0 |
||||||
|
} |
||||||
|
}, |
||||||
|
{ |
||||||
|
"refId": "C", |
||||||
|
"datasourceUid": "-100", |
||||||
|
"queryType": "", |
||||||
|
"model": { |
||||||
|
"refId": "C", |
||||||
|
"hide": false, |
||||||
|
"type": "threshold", |
||||||
|
"datasource": { |
||||||
|
"uid": "-100", |
||||||
|
"type": "__expr__" |
||||||
|
}, |
||||||
|
"conditions": [ |
||||||
|
{ |
||||||
|
"type": "query", |
||||||
|
"evaluator": { |
||||||
|
"params": [ |
||||||
|
0 |
||||||
|
], |
||||||
|
"type": "gt" |
||||||
|
}, |
||||||
|
"operator": { |
||||||
|
"type": "and" |
||||||
|
}, |
||||||
|
"query": { |
||||||
|
"params": [ |
||||||
|
"C" |
||||||
|
] |
||||||
|
}, |
||||||
|
"reducer": { |
||||||
|
"params": [], |
||||||
|
"type": "last" |
||||||
|
} |
||||||
|
} |
||||||
|
], |
||||||
|
"expression": "B" |
||||||
|
}, |
||||||
|
"relativeTimeRange": { |
||||||
|
"from": 600, |
||||||
|
"to": 0 |
||||||
|
} |
||||||
|
} |
||||||
|
] |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,136 @@ |
|||||||
|
package alerting |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"net/http" |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||||
|
"github.com/stretchr/testify/require" |
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/accesscontrol" |
||||||
|
"github.com/grafana/grafana/pkg/services/accesscontrol/resourcepermissions" |
||||||
|
"github.com/grafana/grafana/pkg/services/datasources" |
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt" |
||||||
|
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||||
|
"github.com/grafana/grafana/pkg/services/org" |
||||||
|
"github.com/grafana/grafana/pkg/services/user" |
||||||
|
"github.com/grafana/grafana/pkg/setting" |
||||||
|
"github.com/grafana/grafana/pkg/tests/testinfra" |
||||||
|
) |
||||||
|
|
||||||
|
func TestBacktesting(t *testing.T) { |
||||||
|
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{ |
||||||
|
DisableLegacyAlerting: true, |
||||||
|
EnableUnifiedAlerting: true, |
||||||
|
DisableAnonymous: true, |
||||||
|
AppModeProduction: true, |
||||||
|
EnableFeatureToggles: []string{ |
||||||
|
featuremgmt.FlagAlertingBacktesting, |
||||||
|
}, |
||||||
|
EnableLog: false, |
||||||
|
}) |
||||||
|
|
||||||
|
grafanaListedAddr, env := testinfra.StartGrafanaEnv(t, dir, path) |
||||||
|
|
||||||
|
userId := createUser(t, env.SQLStore, user.CreateUserCommand{ |
||||||
|
DefaultOrgRole: string(org.RoleAdmin), |
||||||
|
Password: "admin", |
||||||
|
Login: "admin", |
||||||
|
}) |
||||||
|
|
||||||
|
apiCli := newAlertingApiClient(grafanaListedAddr, "admin", "admin") |
||||||
|
|
||||||
|
input, err := os.ReadFile(filepath.Join("api_backtesting_data.json")) |
||||||
|
require.NoError(t, err) |
||||||
|
var testData map[string]apimodels.BacktestConfig |
||||||
|
require.NoError(t, json.Unmarshal(input, &testData)) |
||||||
|
|
||||||
|
queryRequest, ok := testData["query"] |
||||||
|
require.Truef(t, ok, "The data file does not contain a field `query`") |
||||||
|
|
||||||
|
for _, query := range queryRequest.Data { |
||||||
|
isExpr, _ := query.IsExpression() |
||||||
|
if isExpr { |
||||||
|
continue |
||||||
|
} |
||||||
|
t.Logf("Creating a new test data source with UID %s", query.DatasourceUID) |
||||||
|
dsCmd := &datasources.AddDataSourceCommand{ |
||||||
|
Name: "Backtesting-TestDatasource", |
||||||
|
Type: "testdata", |
||||||
|
Access: datasources.DS_ACCESS_PROXY, |
||||||
|
Uid: query.DatasourceUID, |
||||||
|
UserId: userId, |
||||||
|
OrgId: 1, |
||||||
|
} |
||||||
|
err := env.Server.HTTPServer.DataSourcesService.AddDataSource(context.Background(), dsCmd) |
||||||
|
require.NoError(t, err) |
||||||
|
break |
||||||
|
} |
||||||
|
|
||||||
|
t.Run("and request contains data", func(t *testing.T) { |
||||||
|
t.Run("should accept request", func(t *testing.T) { |
||||||
|
request, ok := testData["data"] |
||||||
|
require.Truef(t, ok, "The data file does not contain a field `data`") |
||||||
|
|
||||||
|
status, body := apiCli.SubmitRuleForBacktesting(t, request) |
||||||
|
require.Equal(t, http.StatusOK, status) |
||||||
|
var result data.Frame |
||||||
|
require.NoErrorf(t, json.Unmarshal([]byte(body), &result), "cannot parse response to data frame") |
||||||
|
}) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("and request contains query", func(t *testing.T) { |
||||||
|
t.Run("should accept request with query", func(t *testing.T) { |
||||||
|
status, body := apiCli.SubmitRuleForBacktesting(t, queryRequest) |
||||||
|
require.Equalf(t, http.StatusOK, status, "Response: %s", body) |
||||||
|
var result data.Frame |
||||||
|
require.NoErrorf(t, json.Unmarshal([]byte(body), &result), "cannot parse response to data frame") |
||||||
|
}) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("if user does not have permissions", func(t *testing.T) { |
||||||
|
if !setting.IsEnterprise { |
||||||
|
t.Skip("Enterprise-only test") |
||||||
|
} |
||||||
|
|
||||||
|
testUserId := createUser(t, env.SQLStore, user.CreateUserCommand{ |
||||||
|
DefaultOrgRole: "", |
||||||
|
Password: "test", |
||||||
|
Login: "test", |
||||||
|
}) |
||||||
|
|
||||||
|
testUserApiCli := newAlertingApiClient(grafanaListedAddr, "test", "test") |
||||||
|
|
||||||
|
t.Run("fail if can't read rules", func(t *testing.T) { |
||||||
|
status, body := testUserApiCli.SubmitRuleForBacktesting(t, queryRequest) |
||||||
|
require.Contains(t, body, accesscontrol.ActionAlertingRuleRead) |
||||||
|
require.Equalf(t, http.StatusForbidden, status, "Response: %s", body) |
||||||
|
}) |
||||||
|
|
||||||
|
// access control permissions store
|
||||||
|
permissionsStore := resourcepermissions.NewStore(env.SQLStore) |
||||||
|
_, err := permissionsStore.SetUserResourcePermission(context.Background(), |
||||||
|
accesscontrol.GlobalOrgID, |
||||||
|
accesscontrol.User{ID: testUserId}, |
||||||
|
resourcepermissions.SetResourcePermissionCommand{ |
||||||
|
Actions: []string{ |
||||||
|
accesscontrol.ActionAlertingRuleRead, |
||||||
|
}, |
||||||
|
Resource: "folders", |
||||||
|
ResourceID: "*", |
||||||
|
ResourceAttribute: "uid", |
||||||
|
}, nil) |
||||||
|
require.NoError(t, err) |
||||||
|
testUserApiCli.ReloadCachedPermissions(t) |
||||||
|
|
||||||
|
t.Run("fail if can't query data sources", func(t *testing.T) { |
||||||
|
status, body := testUserApiCli.SubmitRuleForBacktesting(t, queryRequest) |
||||||
|
require.Contains(t, body, "user is not authorized to query one or many data sources used by the rule") |
||||||
|
require.Equalf(t, http.StatusUnauthorized, status, "Response: %s", body) |
||||||
|
}) |
||||||
|
}) |
||||||
|
} |
Loading…
Reference in new issue