From 4d02f73e5f1936ba5a2b2e094806676d434c0788 Mon Sep 17 00:00:00 2001 From: Yuriy Tseretyan Date: Wed, 22 Jun 2022 10:52:46 -0400 Subject: [PATCH] Alerting: Persist rule position in the group (#50051) Migrations: * add a new column alert_group_idx to alert_rule table * add a new column alert_group_idx to alert_rule_version table * re-index existing rules during migration API: * set group index on update. Use the natural order of items in the array as group index * sort rules in the group on GET * update the version of all rules of all affected groups. This will make optimistic lock work in the case of multiple concurrent request touching the same groups. UI: * update UI to keep the order of alerts in a group --- pkg/services/ngalert/CHANGELOG.md | 1 + pkg/services/ngalert/api/api_prometheus.go | 2 +- .../ngalert/api/api_prometheus_test.go | 43 +++ pkg/services/ngalert/api/api_ruler.go | 58 +++- pkg/services/ngalert/api/api_ruler_test.go | 267 +++++++++++++++++- .../ngalert/api/api_ruler_validation.go | 1 + .../ngalert/api/authorization_test.go | 14 +- pkg/services/ngalert/models/alert_rule.go | 19 +- .../ngalert/models/alert_rule_test.go | 38 +++ pkg/services/ngalert/models/testing.go | 48 +++- pkg/services/ngalert/store/alert_rule.go | 4 +- .../sqlstore/migrations/migrations.go | 2 + .../sqlstore/migrations/ualert/alert_rule.go | 4 + .../sqlstore/migrations/ualert/tables.go | 20 ++ .../sqlstore/migrations/ualert/ualert.go | 91 +++++- pkg/tests/api/alerting/api_ruler_test.go | 98 +++++++ pkg/tests/api/alerting/testing.go | 14 +- .../alerting/unified/utils/rulerClient.ts | 15 +- 18 files changed, 703 insertions(+), 36 deletions(-) diff --git a/pkg/services/ngalert/CHANGELOG.md b/pkg/services/ngalert/CHANGELOG.md index e7e9577060f..2b4ab88a4d1 100644 --- a/pkg/services/ngalert/CHANGELOG.md +++ b/pkg/services/ngalert/CHANGELOG.md @@ -72,6 +72,7 @@ Scopes must have an order to ensure consistency and ease of search, this helps u - [FEATURE] Indicate whether contact point is provisioned when GETting Alertmanager configuration #48323 - [FEATURE] Indicate whether alert rule is provisioned when GETting the rule #48458 - [FEATURE] Alert rules with associated panels will take screenshots. #49293 #49338 #49374 #49377 #49378 #49379 #49381 #49385 #49439 #49445 +- [FEATURE] Persistent order of alert rules in a group #50051 - [BUGFIX] Migration: ignore alerts that do not belong to any existing organization\dashboard #49192 - [BUGFIX] Allow anonymous access to alerts #49203 - [BUGFIX] RBAC: replace create\update\delete actions for notification policies by alert.notifications:write #49185 diff --git a/pkg/services/ngalert/api/api_prometheus.go b/pkg/services/ngalert/api/api_prometheus.go index 663c39e96d4..0136e7cc859 100644 --- a/pkg/services/ngalert/api/api_prometheus.go +++ b/pkg/services/ngalert/api/api_prometheus.go @@ -191,7 +191,7 @@ func (srv PrometheusSrv) toRuleGroup(groupName string, folder *models.Folder, ru Name: groupName, File: folder.Title, // file is what Prometheus uses for provisioning, we replace it with namespace. } - + ngmodels.RulesGroup(rules).SortByGroupIndex() for _, rule := range rules { alertingRule := apimodels.AlertingRule{ State: "inactive", diff --git a/pkg/services/ngalert/api/api_prometheus_test.go b/pkg/services/ngalert/api/api_prometheus_test.go index b4b8b70d836..81448d72db3 100644 --- a/pkg/services/ngalert/api/api_prometheus_test.go +++ b/pkg/services/ngalert/api/api_prometheus_test.go @@ -419,6 +419,49 @@ func TestRouteGetRuleStatuses(t *testing.T) { `, folder.Title), string(r.Body())) }) + t.Run("with many rules in a group", func(t *testing.T) { + t.Run("should return sorted", func(t *testing.T) { + ruleStore := store.NewFakeRuleStore(t) + fakeAIM := NewFakeAlertInstanceManager(t) + groupKey := ngmodels.GenerateGroupKey(orgID) + _, rules := ngmodels.GenerateUniqueAlertRules(rand.Intn(5)+5, ngmodels.AlertRuleGen(withGroupKey(groupKey), ngmodels.WithUniqueGroupIndex())) + ruleStore.PutRule(context.Background(), rules...) + + api := PrometheusSrv{ + log: log.NewNopLogger(), + manager: fakeAIM, + store: ruleStore, + ac: acmock.New().WithDisabled(), + } + + response := api.RouteGetRuleStatuses(c) + require.Equal(t, http.StatusOK, response.Status()) + result := &apimodels.RuleResponse{} + require.NoError(t, json.Unmarshal(response.Body(), result)) + + ngmodels.RulesGroup(rules).SortByGroupIndex() + + require.Len(t, result.Data.RuleGroups, 1) + group := result.Data.RuleGroups[0] + require.Equal(t, groupKey.RuleGroup, group.Name) + require.Len(t, group.Rules, len(rules)) + for i, actual := range group.Rules { + expected := rules[i] + if actual.Name != expected.Title { + var actualNames []string + var expectedNames []string + for _, rule := range group.Rules { + actualNames = append(actualNames, rule.Name) + } + for _, rule := range rules { + expectedNames = append(expectedNames, rule.Title) + } + require.Fail(t, fmt.Sprintf("rules are not sorted by group index. Expected: %v. Actual: %v", expectedNames, actualNames)) + } + } + }) + }) + t.Run("when fine-grained access is enabled", func(t *testing.T) { t.Run("should return only rules if the user can query all data sources", func(t *testing.T) { ruleStore := store.NewFakeRuleStore(t) diff --git a/pkg/services/ngalert/api/api_ruler.go b/pkg/services/ngalert/api/api_ruler.go index b2f69167225..3bbe6dcf87d 100644 --- a/pkg/services/ngalert/api/api_ruler.go +++ b/pkg/services/ngalert/api/api_ruler.go @@ -184,7 +184,7 @@ func (srv RulerSrv) RouteGetNamespaceRulesConfig(c *models.ReqContext) response. return ErrResp(http.StatusInternalServerError, err, "failed to get provenance for rule group") } - ruleGroups := make(map[string][]*ngmodels.AlertRule) + ruleGroups := make(map[string]ngmodels.RulesGroup) for _, r := range q.Result { ruleGroups[r.RuleGroup] = append(ruleGroups[r.RuleGroup], r) } @@ -284,7 +284,7 @@ func (srv RulerSrv) RouteGetRulesConfig(c *models.ReqContext) response.Response return ErrResp(http.StatusInternalServerError, err, "failed to get alert rules") } - configs := make(map[ngmodels.AlertRuleGroupKey][]*ngmodels.AlertRule) + configs := make(map[ngmodels.AlertRuleGroupKey]ngmodels.RulesGroup) for _, r := range q.Result { groupKey := r.GetGroupKey() group := configs[groupKey] @@ -360,7 +360,7 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *models.ReqContext, groupKey ngmod return err } - finalChanges = groupChanges + finalChanges = calculateAutomaticChanges(groupChanges) logger.Debug("updating database with the authorized changes", "add", len(finalChanges.New), "update", len(finalChanges.New), "delete", len(finalChanges.Delete)) if len(finalChanges.Update) > 0 || len(finalChanges.New) > 0 { @@ -448,7 +448,8 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *models.ReqContext, groupKey ngmod return response.JSON(http.StatusAccepted, util.DynMap{"message": "rule group updated successfully"}) } -func toGettableRuleGroupConfig(groupName string, rules []*ngmodels.AlertRule, namespaceID int64, provenanceRecords map[string]ngmodels.Provenance) apimodels.GettableRuleGroupConfig { +func toGettableRuleGroupConfig(groupName string, rules ngmodels.RulesGroup, namespaceID int64, provenanceRecords map[string]ngmodels.Provenance) apimodels.GettableRuleGroupConfig { + rules.SortByGroupIndex() ruleNodes := make([]apimodels.GettableExtendedRuleNode, 0, len(rules)) var interval time.Duration if len(rules) > 0 { @@ -516,7 +517,7 @@ type changes struct { GroupKey ngmodels.AlertRuleGroupKey // AffectedGroups contains all rules of all groups that are affected by these changes. // For example, during moving a rule from one group to another this map will contain all rules from two groups - AffectedGroups map[ngmodels.AlertRuleGroupKey][]*ngmodels.AlertRule + AffectedGroups map[ngmodels.AlertRuleGroupKey]ngmodels.RulesGroup New []*ngmodels.AlertRule Update []ruleUpdate Delete []*ngmodels.AlertRule @@ -555,7 +556,7 @@ func verifyProvisionedRulesNotAffected(ctx context.Context, provenanceStore prov // calculateChanges calculates the difference between rules in the group in the database and the submitted rules. If a submitted rule has UID it tries to find it in the database (in other groups). // returns a list of rules that need to be added, updated and deleted. Deleted considered rules in the database that belong to the group but do not exist in the list of submitted rules. func calculateChanges(ctx context.Context, ruleStore store.RuleStore, groupKey ngmodels.AlertRuleGroupKey, submittedRules []*ngmodels.AlertRule) (*changes, error) { - affectedGroups := make(map[ngmodels.AlertRuleGroupKey][]*ngmodels.AlertRule) + affectedGroups := make(map[ngmodels.AlertRuleGroupKey]ngmodels.RulesGroup) q := &ngmodels.ListAlertRulesQuery{ OrgID: groupKey.OrgID, NamespaceUIDs: []string{groupKey.NamespaceUID}, @@ -636,5 +637,50 @@ func calculateChanges(ctx context.Context, ruleStore store.RuleStore, groupKey n }, nil } +// calculateAutomaticChanges scans all affected groups and creates either a noop update that will increment the version of each rule as well as re-index other groups. +// this is needed to make sure that there are no any concurrent changes made to all affected groups. +// Returns a copy of changes enriched with either noop or group index changes for all rules in +func calculateAutomaticChanges(ch *changes) *changes { + updatingRules := make(map[ngmodels.AlertRuleKey]struct{}, len(ch.Delete)+len(ch.Update)) + for _, update := range ch.Update { + updatingRules[update.Existing.GetKey()] = struct{}{} + } + for _, del := range ch.Delete { + updatingRules[del.GetKey()] = struct{}{} + } + var toUpdate []ruleUpdate + for groupKey, rules := range ch.AffectedGroups { + if groupKey != ch.GroupKey { + rules.SortByGroupIndex() + } + idx := 1 + for _, rule := range rules { + if _, ok := updatingRules[rule.GetKey()]; ok { // exclude rules that are going to be either updated or deleted + continue + } + upd := ruleUpdate{ + Existing: rule, + New: rule, + } + if groupKey != ch.GroupKey { + if rule.RuleGroupIndex != idx { + upd.New = ngmodels.CopyRule(rule) + upd.New.RuleGroupIndex = idx + upd.Diff = rule.Diff(upd.New, alertRuleFieldsToIgnoreInDiff...) + } + idx++ + } + toUpdate = append(toUpdate, upd) + } + } + return &changes{ + GroupKey: ch.GroupKey, + AffectedGroups: ch.AffectedGroups, + New: ch.New, + Update: append(ch.Update, toUpdate...), + Delete: ch.Delete, + } +} + // alertRuleFieldsToIgnoreInDiff contains fields that the AlertRule.Diff should ignore var alertRuleFieldsToIgnoreInDiff = []string{"ID", "Version", "Updated"} diff --git a/pkg/services/ngalert/api/api_ruler_test.go b/pkg/services/ngalert/api/api_ruler_test.go index 7228bb17cdb..d624620c88f 100644 --- a/pkg/services/ngalert/api/api_ruler_test.go +++ b/pkg/services/ngalert/api/api_ruler_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "math/rand" "net/http" "net/url" @@ -75,7 +76,7 @@ func TestCalculateChanges(t *testing.T) { require.Equal(t, db, toDelete) } require.Contains(t, changes.AffectedGroups, groupKey) - require.Equal(t, inDatabase, changes.AffectedGroups[groupKey]) + require.Equal(t, models.RulesGroup(inDatabase), changes.AffectedGroups[groupKey]) }) t.Run("should detect alerts that needs to be updated", func(t *testing.T) { @@ -102,7 +103,7 @@ func TestCalculateChanges(t *testing.T) { require.Empty(t, changes.New) require.Contains(t, changes.AffectedGroups, groupKey) - require.Equal(t, inDatabase, changes.AffectedGroups[groupKey]) + require.Equal(t, models.RulesGroup(inDatabase), changes.AffectedGroups[groupKey]) }) t.Run("should include only if there are changes ignoring specific fields", func(t *testing.T) { @@ -658,6 +659,49 @@ func TestRouteGetNamespaceRulesConfig(t *testing.T) { } require.True(t, found) }) + t.Run("should enforce order of rules in the group", func(t *testing.T) { + orgID := rand.Int63() + folder := randFolder() + ruleStore := store.NewFakeRuleStore(t) + ruleStore.Folders[orgID] = append(ruleStore.Folders[orgID], folder) + groupKey := models.GenerateGroupKey(orgID) + groupKey.NamespaceUID = folder.Uid + + expectedRules := models.GenerateAlertRules(rand.Intn(5)+5, models.AlertRuleGen(withGroupKey(groupKey), models.WithUniqueGroupIndex())) + ruleStore.PutRule(context.Background(), expectedRules...) + ac := acMock.New().WithDisabled() + + response := createService(ac, ruleStore, nil).RouteGetNamespaceRulesConfig(createRequestContext(orgID, models2.ROLE_VIEWER, map[string]string{ + ":Namespace": folder.Title, + })) + + require.Equal(t, http.StatusAccepted, response.Status()) + result := &apimodels.NamespaceConfigResponse{} + require.NoError(t, json.Unmarshal(response.Body(), result)) + require.NotNil(t, result) + + models.RulesGroup(expectedRules).SortByGroupIndex() + + require.Contains(t, *result, folder.Title) + groups := (*result)[folder.Title] + require.Len(t, groups, 1) + group := groups[0] + require.Equal(t, groupKey.RuleGroup, group.Name) + for i, actual := range groups[0].Rules { + expected := expectedRules[i] + if actual.GrafanaManagedAlert.UID != expected.UID { + var actualUIDs []string + var expectedUIDs []string + for _, rule := range group.Rules { + actualUIDs = append(actualUIDs, rule.GrafanaManagedAlert.UID) + } + for _, rule := range expectedRules { + expectedUIDs = append(expectedUIDs, rule.UID) + } + require.Fail(t, fmt.Sprintf("rules are not sorted by group index. Expected: %v. Actual: %v", expectedUIDs, actualUIDs)) + } + } + }) } func TestRouteGetRulesConfig(t *testing.T) { @@ -698,6 +742,48 @@ func TestRouteGetRulesConfig(t *testing.T) { }) }) }) + + t.Run("should return rules in group sorted by group index", func(t *testing.T) { + orgID := rand.Int63() + folder := randFolder() + ruleStore := store.NewFakeRuleStore(t) + ruleStore.Folders[orgID] = append(ruleStore.Folders[orgID], folder) + groupKey := models.GenerateGroupKey(orgID) + groupKey.NamespaceUID = folder.Uid + + expectedRules := models.GenerateAlertRules(rand.Intn(5)+5, models.AlertRuleGen(withGroupKey(groupKey), models.WithUniqueGroupIndex())) + ruleStore.PutRule(context.Background(), expectedRules...) + ac := acMock.New().WithDisabled() + + response := createService(ac, ruleStore, nil).RouteGetRulesConfig(createRequestContext(orgID, models2.ROLE_VIEWER, nil)) + + require.Equal(t, http.StatusOK, response.Status()) + result := &apimodels.NamespaceConfigResponse{} + require.NoError(t, json.Unmarshal(response.Body(), result)) + require.NotNil(t, result) + + models.RulesGroup(expectedRules).SortByGroupIndex() + + require.Contains(t, *result, folder.Title) + groups := (*result)[folder.Title] + require.Len(t, groups, 1) + group := groups[0] + require.Equal(t, groupKey.RuleGroup, group.Name) + for i, actual := range groups[0].Rules { + expected := expectedRules[i] + if actual.GrafanaManagedAlert.UID != expected.UID { + var actualUIDs []string + var expectedUIDs []string + for _, rule := range group.Rules { + actualUIDs = append(actualUIDs, rule.GrafanaManagedAlert.UID) + } + for _, rule := range expectedRules { + expectedUIDs = append(expectedUIDs, rule.UID) + } + require.Fail(t, fmt.Sprintf("rules are not sorted by group index. Expected: %v. Actual: %v", expectedUIDs, actualUIDs)) + } + } + }) } func TestRouteGetRulesGroupConfig(t *testing.T) { @@ -736,12 +822,52 @@ func TestRouteGetRulesGroupConfig(t *testing.T) { }) }) }) + + t.Run("should return rules in group sorted by group index", func(t *testing.T) { + orgID := rand.Int63() + folder := randFolder() + ruleStore := store.NewFakeRuleStore(t) + ruleStore.Folders[orgID] = append(ruleStore.Folders[orgID], folder) + groupKey := models.GenerateGroupKey(orgID) + groupKey.NamespaceUID = folder.Uid + + expectedRules := models.GenerateAlertRules(rand.Intn(5)+5, models.AlertRuleGen(withGroupKey(groupKey), models.WithUniqueGroupIndex())) + ruleStore.PutRule(context.Background(), expectedRules...) + ac := acMock.New().WithDisabled() + + response := createService(ac, ruleStore, nil).RouteGetRulesGroupConfig(createRequestContext(orgID, models2.ROLE_VIEWER, map[string]string{ + ":Namespace": folder.Title, + ":Groupname": groupKey.RuleGroup, + })) + + require.Equal(t, http.StatusAccepted, response.Status()) + result := &apimodels.RuleGroupConfigResponse{} + require.NoError(t, json.Unmarshal(response.Body(), result)) + require.NotNil(t, result) + + models.RulesGroup(expectedRules).SortByGroupIndex() + + for i, actual := range result.Rules { + expected := expectedRules[i] + if actual.GrafanaManagedAlert.UID != expected.UID { + var actualUIDs []string + var expectedUIDs []string + for _, rule := range result.Rules { + actualUIDs = append(actualUIDs, rule.GrafanaManagedAlert.UID) + } + for _, rule := range expectedRules { + expectedUIDs = append(expectedUIDs, rule.UID) + } + require.Fail(t, fmt.Sprintf("rules are not sorted by group index. Expected: %v. Actual: %v", expectedUIDs, actualUIDs)) + } + } + }) } func TestVerifyProvisionedRulesNotAffected(t *testing.T) { orgID := rand.Int63() group := models.GenerateGroupKey(orgID) - affectedGroups := make(map[models.AlertRuleGroupKey][]*models.AlertRule) + affectedGroups := make(map[models.AlertRuleGroupKey]models.RulesGroup) var allRules []*models.AlertRule { rules := models.GenerateAlertRules(rand.Intn(3)+1, models.AlertRuleGen(withGroupKey(group))) @@ -799,6 +925,141 @@ func TestVerifyProvisionedRulesNotAffected(t *testing.T) { }) } +func TestCalculateAutomaticChanges(t *testing.T) { + orgID := rand.Int63() + + t.Run("should mark all rules in affected groups", func(t *testing.T) { + group := models.GenerateGroupKey(orgID) + rules := models.GenerateAlertRules(10, models.AlertRuleGen(withGroupKey(group))) + // copy rules to make sure that the function does not modify the original rules + copies := make([]*models.AlertRule, 0, len(rules)) + for _, rule := range rules { + copies = append(copies, models.CopyRule(rule)) + } + + var updates []ruleUpdate + for i := 0; i < 5; i++ { + ruleCopy := models.CopyRule(copies[i]) + ruleCopy.Title += util.GenerateShortUID() + updates = append(updates, ruleUpdate{ + Existing: copies[i], + New: ruleCopy, + }) + } + + // simulate adding new rules, updating a few existing and delete some from the same rule + ch := &changes{ + GroupKey: group, + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ + group: copies, + }, + New: models.GenerateAlertRules(2, models.AlertRuleGen(withGroupKey(group))), + Update: updates, + Delete: rules[5:7], + } + + result := calculateAutomaticChanges(ch) + + require.NotEqual(t, ch, result) + require.Equal(t, ch.GroupKey, result.GroupKey) + require.Equal(t, map[models.AlertRuleGroupKey]models.RulesGroup{ + group: rules, + }, result.AffectedGroups) + require.Equal(t, ch.New, result.New) + require.Equal(t, rules[5:7], result.Delete) + var expected []ruleUpdate + expected = append(expected, updates...) + // all rules that were not updated directly by user should be added to the + for _, rule := range rules[7:] { + expected = append(expected, ruleUpdate{ + Existing: rule, + New: rule, + }) + } + require.Equal(t, expected, result.Update) + }) + + t.Run("should re-index rules in affected groups other than updated", func(t *testing.T) { + group := models.GenerateGroupKey(orgID) + rules := models.GenerateAlertRules(3, models.AlertRuleGen(withGroupKey(group), models.WithSequentialGroupIndex())) + group2 := models.GenerateGroupKey(orgID) + rules2 := models.GenerateAlertRules(4, models.AlertRuleGen(withGroupKey(group2), models.WithSequentialGroupIndex())) + + movedIndex := rand.Intn(len(rules2) - 1) + movedRule := rules2[movedIndex] + copyRule := models.CopyRule(movedRule) + copyRule.RuleGroup = group.RuleGroup + copyRule.NamespaceUID = group.NamespaceUID + copyRule.RuleGroupIndex = len(rules) + update := ruleUpdate{ + Existing: movedRule, + New: copyRule, + } + + shuffled := make([]*models.AlertRule, 0, len(rules2)) + copy(shuffled, rules2) + rand.Shuffle(len(shuffled), func(i, j int) { + shuffled[i], shuffled[j] = shuffled[j], shuffled[i] + }) + + // simulate moving a rule from one group to another. + ch := &changes{ + GroupKey: group, + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ + group: rules, + group2: shuffled, + }, + Update: []ruleUpdate{ + update, + }, + } + + result := calculateAutomaticChanges(ch) + + require.NotEqual(t, ch, result) + require.Equal(t, ch.GroupKey, result.GroupKey) + require.Equal(t, ch.AffectedGroups, result.AffectedGroups) + require.Equal(t, ch.New, result.New) + require.Equal(t, ch.Delete, result.Delete) + + require.Equal(t, ch.Update, result.Update[0:1]) + + require.Contains(t, result.Update, update) + for _, rule := range rules { + assert.Containsf(t, result.Update, ruleUpdate{ + Existing: rule, + New: rule, + }, "automatic changes expected to contain all rules of the updated group") + } + + // calculate expected index of the rules in the source group after the move + expectedReindex := make(map[string]int, len(rules2)-1) + idx := 1 + for _, rule := range rules2 { + if rule.UID == movedRule.UID { + continue + } + expectedReindex[rule.UID] = idx + idx++ + } + + for _, upd := range result.Update { + expectedIdx, ok := expectedReindex[upd.Existing.UID] + if !ok { + continue + } + diff := upd.Existing.Diff(upd.New) + if upd.Existing.RuleGroupIndex != expectedIdx { + require.Lenf(t, diff, 1, fmt.Sprintf("the rule in affected group should be re-indexed to %d but it still has index %d. Moved rule with index %d", expectedIdx, upd.Existing.RuleGroupIndex, movedIndex)) + require.Equal(t, "RuleGroupIndex", diff[0].Path) + require.Equal(t, expectedIdx, upd.New.RuleGroupIndex) + } else { + require.Empty(t, diff) + } + } + }) +} + func createService(ac *acMock.Mock, store *store.FakeRuleStore, scheduler schedule.ScheduleService) *RulerSrv { return &RulerSrv{ xactManager: store, diff --git a/pkg/services/ngalert/api/api_ruler_validation.go b/pkg/services/ngalert/api/api_ruler_validation.go index 5eed7bb8001..0ba65ee2bd1 100644 --- a/pkg/services/ngalert/api/api_ruler_validation.go +++ b/pkg/services/ngalert/api/api_ruler_validation.go @@ -177,6 +177,7 @@ func validateRuleGroup( } uids[rule.UID] = idx } + rule.RuleGroupIndex = idx + 1 result = append(result, rule) } return result, nil diff --git a/pkg/services/ngalert/api/authorization_test.go b/pkg/services/ngalert/api/authorization_test.go index b9b7d9d5c89..faa74552238 100644 --- a/pkg/services/ngalert/api/authorization_test.go +++ b/pkg/services/ngalert/api/authorization_test.go @@ -107,7 +107,7 @@ func createAllCombinationsOfPermissions(permissions map[string][]string) []map[s return permissionCombinations } -func getDatasourceScopesForRules(rules []*models.AlertRule) []string { +func getDatasourceScopesForRules(rules models.RulesGroup) []string { scopesMap := map[string]struct{}{} var result []string for _, rule := range rules { @@ -123,8 +123,8 @@ func getDatasourceScopesForRules(rules []*models.AlertRule) []string { return result } -func mapUpdates(updates []ruleUpdate, mapFunc func(ruleUpdate) *models.AlertRule) []*models.AlertRule { - result := make([]*models.AlertRule, 0, len(updates)) +func mapUpdates(updates []ruleUpdate, mapFunc func(ruleUpdate) *models.AlertRule) models.RulesGroup { + result := make(models.RulesGroup, 0, len(updates)) for _, update := range updates { result = append(result, mapFunc(update)) } @@ -172,7 +172,7 @@ func TestAuthorizeRuleChanges(t *testing.T) { rules2 := models.GenerateAlertRules(rand.Intn(4)+1, models.AlertRuleGen(withGroupKey(groupKey))) return &changes{ GroupKey: groupKey, - AffectedGroups: map[models.AlertRuleGroupKey][]*models.AlertRule{ + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ groupKey: append(rules, rules2...), }, New: nil, @@ -208,7 +208,7 @@ func TestAuthorizeRuleChanges(t *testing.T) { return &changes{ GroupKey: groupKey, - AffectedGroups: map[models.AlertRuleGroupKey][]*models.AlertRule{ + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ groupKey: append(rules, rules1...), }, New: nil, @@ -252,7 +252,7 @@ func TestAuthorizeRuleChanges(t *testing.T) { return &changes{ GroupKey: targetGroupKey, - AffectedGroups: map[models.AlertRuleGroupKey][]*models.AlertRule{ + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ groupKey: append(rules, rules1...), }, New: nil, @@ -317,7 +317,7 @@ func TestAuthorizeRuleChanges(t *testing.T) { return &changes{ GroupKey: targetGroupKey, - AffectedGroups: map[models.AlertRuleGroupKey][]*models.AlertRule{ + AffectedGroups: map[models.AlertRuleGroupKey]models.RulesGroup{ groupKey: sourceGroup, targetGroupKey: targetGroup, }, diff --git a/pkg/services/ngalert/models/alert_rule.go b/pkg/services/ngalert/models/alert_rule.go index 073c1c259fc..c3161282602 100644 --- a/pkg/services/ngalert/models/alert_rule.go +++ b/pkg/services/ngalert/models/alert_rule.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "sort" "time" "github.com/google/go-cmp/cmp" @@ -125,6 +126,7 @@ type AlertRule struct { DashboardUID *string `xorm:"dashboard_uid"` PanelID *int64 `xorm:"panel_id"` RuleGroup string + RuleGroupIndex int `xorm:"rule_group_idx"` NoDataState NoDataState ExecErrState ExecutionErrorState // ideally this field should have been apimodels.ApiDuration @@ -140,6 +142,9 @@ type SchedulableAlertRule struct { OrgID int64 `xorm:"org_id"` IntervalSeconds int64 Version int64 + NamespaceUID string `xorm:"namespace_uid"` + RuleGroup string + RuleGroupIndex int `xorm:"rule_group_idx"` } type LabelOption func(map[string]string) @@ -251,6 +256,7 @@ type AlertRuleVersion struct { RuleUID string `xorm:"rule_uid"` RuleNamespaceUID string `xorm:"rule_namespace_uid"` RuleGroup string + RuleGroupIndex int `xorm:"rule_group_idx"` ParentVersion int64 RestoredFrom int64 Version int64 @@ -297,7 +303,7 @@ type ListAlertRulesQuery struct { DashboardUID string PanelID int64 - Result []*AlertRule + Result RulesGroup } type GetAlertRulesForSchedulingQuery struct { @@ -394,3 +400,14 @@ func ValidateRuleGroupInterval(intervalSeconds, baseIntervalSeconds int64) error } return nil } + +type RulesGroup []*AlertRule + +func (g RulesGroup) SortByGroupIndex() { + sort.Slice(g, func(i, j int) bool { + if g[i].RuleGroupIndex == g[j].RuleGroupIndex { + return g[i].ID < g[j].ID + } + return g[i].RuleGroupIndex < g[j].RuleGroupIndex + }) +} diff --git a/pkg/services/ngalert/models/alert_rule_test.go b/pkg/services/ngalert/models/alert_rule_test.go index 7d346391509..6681ff40f65 100644 --- a/pkg/services/ngalert/models/alert_rule_test.go +++ b/pkg/services/ngalert/models/alert_rule_test.go @@ -3,6 +3,7 @@ package models import ( "encoding/json" "math/rand" + "sort" "strings" "testing" "time" @@ -354,6 +355,13 @@ func TestDiff(t *testing.T) { assert.Equal(t, rule2.For, diff[0].Right.Interface()) difCnt++ } + if rule1.RuleGroupIndex != rule2.RuleGroupIndex { + diff := diffs.GetDiffsForField("RuleGroupIndex") + assert.Len(t, diff, 1) + assert.Equal(t, rule1.RuleGroupIndex, diff[0].Left.Interface()) + assert.Equal(t, rule2.RuleGroupIndex, diff[0].Right.Interface()) + difCnt++ + } require.Lenf(t, diffs, difCnt, "Got some unexpected diffs. Either add to ignore or add assert to it") @@ -538,3 +546,33 @@ func TestDiff(t *testing.T) { }) }) } + +func TestSortByGroupIndex(t *testing.T) { + t.Run("should sort rules by GroupIndex", func(t *testing.T) { + rules := GenerateAlertRules(rand.Intn(5)+5, AlertRuleGen(WithUniqueGroupIndex())) + rand.Shuffle(len(rules), func(i, j int) { + rules[i], rules[j] = rules[j], rules[i] + }) + require.False(t, sort.SliceIsSorted(rules, func(i, j int) bool { + return rules[i].RuleGroupIndex < rules[j].RuleGroupIndex + })) + RulesGroup(rules).SortByGroupIndex() + require.True(t, sort.SliceIsSorted(rules, func(i, j int) bool { + return rules[i].RuleGroupIndex < rules[j].RuleGroupIndex + })) + }) + + t.Run("should sort by ID if same GroupIndex", func(t *testing.T) { + rules := GenerateAlertRules(rand.Intn(5)+5, AlertRuleGen(WithUniqueID(), WithGroupIndex(rand.Int()))) + rand.Shuffle(len(rules), func(i, j int) { + rules[i], rules[j] = rules[j], rules[i] + }) + require.False(t, sort.SliceIsSorted(rules, func(i, j int) bool { + return rules[i].ID < rules[j].ID + })) + RulesGroup(rules).SortByGroupIndex() + require.True(t, sort.SliceIsSorted(rules, func(i, j int) bool { + return rules[i].ID < rules[j].ID + })) + }) +} diff --git a/pkg/services/ngalert/models/testing.go b/pkg/services/ngalert/models/testing.go index b66e1aeaaf1..31492de9fe7 100644 --- a/pkg/services/ngalert/models/testing.go +++ b/pkg/services/ngalert/models/testing.go @@ -9,9 +9,11 @@ import ( "github.com/grafana/grafana/pkg/util" ) +type AlertRuleMutator func(*AlertRule) + // AlertRuleGen provides a factory function that generates a random AlertRule. // The mutators arguments allows changing fields of the resulting structure -func AlertRuleGen(mutators ...func(*AlertRule)) func() *AlertRule { +func AlertRuleGen(mutators ...AlertRuleMutator) func() *AlertRule { return func() *AlertRule { randNoDataState := func() NoDataState { s := [...]NoDataState{ @@ -74,6 +76,7 @@ func AlertRuleGen(mutators ...func(*AlertRule)) func() *AlertRule { DashboardUID: dashUID, PanelID: panelID, RuleGroup: "TEST-GROUP-" + util.GenerateShortUID(), + RuleGroupIndex: rand.Int(), NoDataState: randNoDataState(), ExecErrState: randErrState(), For: forInterval, @@ -88,6 +91,48 @@ func AlertRuleGen(mutators ...func(*AlertRule)) func() *AlertRule { } } +func WithUniqueID() AlertRuleMutator { + usedID := make(map[int64]struct{}) + return func(rule *AlertRule) { + for { + id := rand.Int63() + if _, ok := usedID[id]; !ok { + usedID[id] = struct{}{} + rule.ID = id + return + } + } + } +} + +func WithGroupIndex(groupIndex int) AlertRuleMutator { + return func(rule *AlertRule) { + rule.RuleGroupIndex = groupIndex + } +} + +func WithUniqueGroupIndex() AlertRuleMutator { + usedIdx := make(map[int]struct{}) + return func(rule *AlertRule) { + for { + idx := rand.Int() + if _, ok := usedIdx[idx]; !ok { + usedIdx[idx] = struct{}{} + rule.RuleGroupIndex = idx + return + } + } + } +} + +func WithSequentialGroupIndex() AlertRuleMutator { + idx := 1 + return func(rule *AlertRule) { + rule.RuleGroupIndex = idx + idx++ + } +} + func GenerateAlertQuery() AlertQuery { f := rand.Intn(10) + 5 t := rand.Intn(f) @@ -155,6 +200,7 @@ func CopyRule(r *AlertRule) *AlertRule { UID: r.UID, NamespaceUID: r.NamespaceUID, RuleGroup: r.RuleGroup, + RuleGroupIndex: r.RuleGroupIndex, NoDataState: r.NoDataState, ExecErrState: r.ExecErrState, For: r.For, diff --git a/pkg/services/ngalert/store/alert_rule.go b/pkg/services/ngalert/store/alert_rule.go index 6310078f099..c490ae8ae09 100644 --- a/pkg/services/ngalert/store/alert_rule.go +++ b/pkg/services/ngalert/store/alert_rule.go @@ -230,6 +230,7 @@ func (st DBstore) UpdateAlertRules(ctx context.Context, rules []UpdateRule) erro RuleUID: r.New.UID, RuleNamespaceUID: r.New.NamespaceUID, RuleGroup: r.New.RuleGroup, + RuleGroupIndex: r.New.RuleGroupIndex, ParentVersion: parentVersion, Version: r.New.Version + 1, Created: r.New.Updated, @@ -283,7 +284,7 @@ func (st DBstore) ListAlertRules(ctx context.Context, query *ngmodels.ListAlertR q = q.Where("rule_group = ?", query.RuleGroup) } - q = q.OrderBy("id ASC") + q = q.Asc("namespace_uid", "rule_group", "rule_group_idx", "id") alertRules := make([]*ngmodels.AlertRule, 0) if err := q.Find(&alertRules); err != nil { @@ -409,6 +410,7 @@ func (st DBstore) GetAlertRulesForScheduling(ctx context.Context, query *ngmodel } q = q.NotIn("org_id", excludeOrgs...) } + q = q.Asc("namespace_uid", "rule_group", "rule_group_idx", "id") if err := q.Find(&alerts); err != nil { return err } diff --git a/pkg/services/sqlstore/migrations/migrations.go b/pkg/services/sqlstore/migrations/migrations.go index 02894d18d87..2f48fca46fe 100644 --- a/pkg/services/sqlstore/migrations/migrations.go +++ b/pkg/services/sqlstore/migrations/migrations.go @@ -92,6 +92,8 @@ func (*OSSMigrations) AddMigration(mg *Migrator) { accesscontrol.AddManagedFolderAlertActionsMigration(mg) accesscontrol.AddActionNameMigrator(mg) addPlaylistUIDMigration(mg) + + ualert.UpdateRuleGroupIndexMigration(mg) } func addMigrationLogMigrations(mg *Migrator) { diff --git a/pkg/services/sqlstore/migrations/ualert/alert_rule.go b/pkg/services/sqlstore/migrations/ualert/alert_rule.go index cefa976130a..bc37fcba3aa 100644 --- a/pkg/services/sqlstore/migrations/ualert/alert_rule.go +++ b/pkg/services/sqlstore/migrations/ualert/alert_rule.go @@ -13,6 +13,7 @@ import ( ) type alertRule struct { + ID int64 `xorm:"pk autoincr 'id'"` OrgID int64 `xorm:"org_id"` Title string Condition string @@ -22,6 +23,7 @@ type alertRule struct { UID string `xorm:"uid"` NamespaceUID string `xorm:"namespace_uid"` RuleGroup string + RuleGroupIndex int `xorm:"rule_group_idx"` NoDataState string ExecErrState string For duration @@ -35,6 +37,7 @@ type alertRuleVersion struct { RuleUID string `xorm:"rule_uid"` RuleNamespaceUID string `xorm:"rule_namespace_uid"` RuleGroup string + RuleGroupIndex int `xorm:"rule_group_idx"` ParentVersion int64 RestoredFrom int64 Version int64 @@ -59,6 +62,7 @@ func (a *alertRule) makeVersion() *alertRuleVersion { RuleUID: a.UID, RuleNamespaceUID: a.NamespaceUID, RuleGroup: a.RuleGroup, + RuleGroupIndex: a.RuleGroupIndex, ParentVersion: 0, RestoredFrom: 0, Version: 1, diff --git a/pkg/services/sqlstore/migrations/ualert/tables.go b/pkg/services/sqlstore/migrations/ualert/tables.go index 37127d1d099..05f4dcaca0a 100644 --- a/pkg/services/sqlstore/migrations/ualert/tables.go +++ b/pkg/services/sqlstore/migrations/ualert/tables.go @@ -241,6 +241,16 @@ func AddAlertRuleMigrations(mg *migrator.Migrator, defaultIntervalSeconds int64) Cols: []string{"org_id", "dashboard_uid", "panel_id"}, }, )) + + mg.AddMigration("add rule_group_idx column to alert_rule", migrator.NewAddColumnMigration( + migrator.Table{Name: "alert_rule"}, + &migrator.Column{ + Name: "rule_group_idx", + Type: migrator.DB_Int, + Nullable: false, + Default: "1", + }, + )) } func AddAlertRuleVersionMigrations(mg *migrator.Migrator) { @@ -284,6 +294,16 @@ func AddAlertRuleVersionMigrations(mg *migrator.Migrator) { // add labels column mg.AddMigration("add column labels to alert_rule_version", migrator.NewAddColumnMigration(alertRuleVersion, &migrator.Column{Name: "labels", Type: migrator.DB_Text, Nullable: true})) + + mg.AddMigration("add rule_group_idx column to alert_rule_version", migrator.NewAddColumnMigration( + migrator.Table{Name: "alert_rule_version"}, + &migrator.Column{ + Name: "rule_group_idx", + Type: migrator.DB_Int, + Nullable: false, + Default: "1", + }, + )) } func AddAlertmanagerConfigMigrations(mg *migrator.Migrator) { diff --git a/pkg/services/sqlstore/migrations/ualert/ualert.go b/pkg/services/sqlstore/migrations/ualert/ualert.go index d6dd13c71f4..6d7946bc7c6 100644 --- a/pkg/services/sqlstore/migrations/ualert/ualert.go +++ b/pkg/services/sqlstore/migrations/ualert/ualert.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strconv" "strings" + "time" pb "github.com/prometheus/alertmanager/silence/silencepb" "xorm.io/xorm" @@ -37,6 +38,7 @@ var migTitle = "move dashboard alerts to unified alerting" var rmMigTitle = "remove unified alerting data" const clearMigrationEntryTitle = "clear migration entry %q" +const codeMigration = "code migration" type MigrationError struct { AlertId int64 @@ -227,7 +229,7 @@ type migration struct { } func (m *migration) SQL(dialect migrator.Dialect) string { - return "code migration" + return codeMigration } // nolint: gocyclo @@ -511,7 +513,7 @@ type rmMigration struct { } func (m *rmMigration) SQL(dialect migrator.Dialect) string { - return "code migration" + return codeMigration } func (m *rmMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error { @@ -710,7 +712,7 @@ func (u *upgradeNgAlerting) updateAlertmanagerFiles(orgId int64, migrator *migra } func (u *upgradeNgAlerting) SQL(migrator.Dialect) string { - return "code migration" + return codeMigration } // getAlertFolderNameFromDashboard generates a folder name for alerts that belong to a dashboard. Formats the string according to DASHBOARD_FOLDER format. @@ -776,5 +778,86 @@ func (c createDefaultFoldersForAlertingMigration) Exec(sess *xorm.Session, migra } func (c createDefaultFoldersForAlertingMigration) SQL(migrator.Dialect) string { - return "code migration" + return codeMigration +} + +// UpdateRuleGroupIndexMigration updates a new field rule_group_index for alert rules that belong to a group with more than 1 alert. +func UpdateRuleGroupIndexMigration(mg *migrator.Migrator) { + if !mg.Cfg.UnifiedAlerting.IsEnabled() { + return + } + mg.AddMigration("update group index for alert rules", &updateRulesOrderInGroup{}) +} + +type updateRulesOrderInGroup struct { + migrator.MigrationBase +} + +func (c updateRulesOrderInGroup) SQL(migrator.Dialect) string { + return codeMigration +} + +func (c updateRulesOrderInGroup) Exec(sess *xorm.Session, migrator *migrator.Migrator) error { + var rows []*alertRule + if err := sess.Table(alertRule{}).Asc("id").Find(&rows); err != nil { + return fmt.Errorf("failed to read the list of alert rules: %w", err) + } + + if len(rows) == 0 { + migrator.Logger.Debug("No rules to migrate.") + return nil + } + + groups := map[ngmodels.AlertRuleGroupKey][]*alertRule{} + + for _, row := range rows { + groupKey := ngmodels.AlertRuleGroupKey{ + OrgID: row.OrgID, + NamespaceUID: row.NamespaceUID, + RuleGroup: row.RuleGroup, + } + groups[groupKey] = append(groups[groupKey], row) + } + + toUpdate := make([]*alertRule, 0, len(rows)) + + for _, rules := range groups { + for i, rule := range rules { + if rule.RuleGroupIndex == i+1 { + continue + } + rule.RuleGroupIndex = i + 1 + toUpdate = append(toUpdate, rule) + } + } + + if len(toUpdate) == 0 { + migrator.Logger.Debug("No rules to upgrade group index") + return nil + } + + updated := time.Now() + versions := make([]*alertRuleVersion, 0, len(toUpdate)) + + for _, rule := range toUpdate { + rule.Updated = updated + version := rule.makeVersion() + version.Version = rule.Version + 1 + version.ParentVersion = rule.Version + rule.Version++ + _, err := sess.ID(rule.ID).Cols("version", "updated", "rule_group_idx").Update(rule) + if err != nil { + migrator.Logger.Error("failed to update alert rule", "uid", rule.UID, "err", err) + return fmt.Errorf("unable to update alert rules with group index: %w", err) + } + migrator.Logger.Debug("updated group index for alert rule", "rule_uid", rule.UID) + versions = append(versions, version) + } + + _, err := sess.Insert(&versions) + if err != nil { + migrator.Logger.Error("failed to insert changes to alert_rule_version", "err", err) + return fmt.Errorf("unable to update alert rules with group index: %w", err) + } + return nil } diff --git a/pkg/tests/api/alerting/api_ruler_test.go b/pkg/tests/api/alerting/api_ruler_test.go index df576bdd57d..cd2296c6779 100644 --- a/pkg/tests/api/alerting/api_ruler_test.go +++ b/pkg/tests/api/alerting/api_ruler_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "math/rand" "net/http" "testing" "time" @@ -17,6 +18,7 @@ import ( apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models" "github.com/grafana/grafana/pkg/tests/testinfra" + "github.com/grafana/grafana/pkg/util" ) func TestAlertRulePermissions(t *testing.T) { @@ -719,6 +721,102 @@ func TestRulerRulesFilterByDashboard(t *testing.T) { } } +func TestRuleGroupSequence(t *testing.T) { + // Setup Grafana and its Database + dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{ + DisableLegacyAlerting: true, + EnableUnifiedAlerting: true, + DisableAnonymous: true, + AppModeProduction: true, + }) + grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path) + + // Create a user to make authenticated requests + createUser(t, store, models.CreateUserCommand{ + DefaultOrgRole: string(models.ROLE_EDITOR), + Password: "password", + Login: "grafana", + }) + + client := newAlertingApiClient(grafanaListedAddr, "grafana", "password") + folder1Title := "folder1" + client.CreateFolder(t, util.GenerateShortUID(), folder1Title) + + group1 := generateAlertRuleGroup(5, alertRuleGen()) + group2 := generateAlertRuleGroup(5, alertRuleGen()) + + status, _ := client.PostRulesGroup(t, folder1Title, &group1) + require.Equal(t, http.StatusAccepted, status) + status, _ = client.PostRulesGroup(t, folder1Title, &group2) + require.Equal(t, http.StatusAccepted, status) + + t.Run("should persist order of the rules in a group", func(t *testing.T) { + group1Get := client.GetRulesGroup(t, folder1Title, group1.Name) + assert.Equal(t, group1.Name, group1Get.Name) + assert.Equal(t, group1.Interval, group1Get.Interval) + assert.Len(t, group1Get.Rules, len(group1.Rules)) + for i, getRule := range group1Get.Rules { + rule := group1.Rules[i] + assert.Equal(t, getRule.GrafanaManagedAlert.Title, rule.GrafanaManagedAlert.Title) + assert.NotEmpty(t, getRule.GrafanaManagedAlert.UID) + } + + // now shuffle the rules + postableGroup1 := convertGettableRuleGroupToPostable(group1Get.GettableRuleGroupConfig) + rand.Shuffle(len(postableGroup1.Rules), func(i, j int) { + postableGroup1.Rules[i], postableGroup1.Rules[j] = postableGroup1.Rules[j], postableGroup1.Rules[i] + }) + expectedUids := make([]string, 0, len(postableGroup1.Rules)) + for _, rule := range postableGroup1.Rules { + expectedUids = append(expectedUids, rule.GrafanaManagedAlert.UID) + } + status, _ := client.PostRulesGroup(t, folder1Title, &postableGroup1) + require.Equal(t, http.StatusAccepted, status) + + group1Get = client.GetRulesGroup(t, folder1Title, group1.Name) + + require.Len(t, group1Get.Rules, len(postableGroup1.Rules)) + + actualUids := make([]string, 0, len(group1Get.Rules)) + for _, getRule := range group1Get.Rules { + actualUids = append(actualUids, getRule.GrafanaManagedAlert.UID) + } + assert.Equal(t, expectedUids, actualUids) + }) + + t.Run("should be able to move a rule from another group in a specific position", func(t *testing.T) { + group1Get := client.GetRulesGroup(t, folder1Title, group1.Name) + group2Get := client.GetRulesGroup(t, folder1Title, group2.Name) + + movedRule := convertGettableRuleToPostable(group2Get.Rules[3]) + // now shuffle the rules + postableGroup1 := convertGettableRuleGroupToPostable(group1Get.GettableRuleGroupConfig) + postableGroup1.Rules = append(append(append([]apimodels.PostableExtendedRuleNode{}, postableGroup1.Rules[0:1]...), movedRule), postableGroup1.Rules[2:]...) + expectedUids := make([]string, 0, len(postableGroup1.Rules)) + for _, rule := range postableGroup1.Rules { + expectedUids = append(expectedUids, rule.GrafanaManagedAlert.UID) + } + status, _ := client.PostRulesGroup(t, folder1Title, &postableGroup1) + require.Equal(t, http.StatusAccepted, status) + + group1Get = client.GetRulesGroup(t, folder1Title, group1.Name) + + require.Len(t, group1Get.Rules, len(postableGroup1.Rules)) + + actualUids := make([]string, 0, len(group1Get.Rules)) + for _, getRule := range group1Get.Rules { + actualUids = append(actualUids, getRule.GrafanaManagedAlert.UID) + } + assert.Equal(t, expectedUids, actualUids) + + group2Get = client.GetRulesGroup(t, folder1Title, group2.Name) + assert.Len(t, group2Get.Rules, len(group2.Rules)-1) + for _, rule := range group2Get.Rules { + require.NotEqual(t, movedRule.GrafanaManagedAlert.UID, rule.GrafanaManagedAlert.UID) + } + }) +} + func newTestingRuleConfig(t *testing.T) apimodels.PostableRuleGroupConfig { interval, err := model.ParseDuration("1m") require.NoError(t, err) diff --git a/pkg/tests/api/alerting/testing.go b/pkg/tests/api/alerting/testing.go index e195c5a5f8c..a7910819160 100644 --- a/pkg/tests/api/alerting/testing.go +++ b/pkg/tests/api/alerting/testing.go @@ -85,7 +85,7 @@ func getBody(t *testing.T, body io.ReadCloser) string { return string(b) } -func AlertRuleGen() func() apimodels.PostableExtendedRuleNode { +func alertRuleGen() func() apimodels.PostableExtendedRuleNode { return func() apimodels.PostableExtendedRuleNode { return apimodels.PostableExtendedRuleNode{ ApiRuleNode: &apimodels.ApiRuleNode{ @@ -115,7 +115,7 @@ func AlertRuleGen() func() apimodels.PostableExtendedRuleNode { } } -func GenerateAlertRuleGroup(rulesCount int, gen func() apimodels.PostableExtendedRuleNode) apimodels.PostableRuleGroupConfig { +func generateAlertRuleGroup(rulesCount int, gen func() apimodels.PostableExtendedRuleNode) apimodels.PostableRuleGroupConfig { rules := make([]apimodels.PostableExtendedRuleNode, 0, rulesCount) for i := 0; i < rulesCount; i++ { rules = append(rules, gen()) @@ -127,10 +127,10 @@ func GenerateAlertRuleGroup(rulesCount int, gen func() apimodels.PostableExtende } } -func ConvertGettableRuleGroupToPostable(gettable apimodels.GettableRuleGroupConfig) apimodels.PostableRuleGroupConfig { +func convertGettableRuleGroupToPostable(gettable apimodels.GettableRuleGroupConfig) apimodels.PostableRuleGroupConfig { rules := make([]apimodels.PostableExtendedRuleNode, 0, len(gettable.Rules)) for _, rule := range gettable.Rules { - rules = append(rules, ConvertGettableRuleToPostable(rule)) + rules = append(rules, convertGettableRuleToPostable(rule)) } return apimodels.PostableRuleGroupConfig{ Name: gettable.Name, @@ -139,14 +139,14 @@ func ConvertGettableRuleGroupToPostable(gettable apimodels.GettableRuleGroupConf } } -func ConvertGettableRuleToPostable(gettable apimodels.GettableExtendedRuleNode) apimodels.PostableExtendedRuleNode { +func convertGettableRuleToPostable(gettable apimodels.GettableExtendedRuleNode) apimodels.PostableExtendedRuleNode { return apimodels.PostableExtendedRuleNode{ ApiRuleNode: gettable.ApiRuleNode, - GrafanaManagedAlert: ConvertGettableGrafanaRuleToPostable(gettable.GrafanaManagedAlert), + GrafanaManagedAlert: convertGettableGrafanaRuleToPostable(gettable.GrafanaManagedAlert), } } -func ConvertGettableGrafanaRuleToPostable(gettable *apimodels.GettableGrafanaRule) *apimodels.PostableGrafanaRule { +func convertGettableGrafanaRuleToPostable(gettable *apimodels.GettableGrafanaRule) *apimodels.PostableGrafanaRule { if gettable == nil { return nil } diff --git a/public/app/features/alerting/unified/utils/rulerClient.ts b/public/app/features/alerting/unified/utils/rulerClient.ts index 0398bd47872..73a7b296586 100644 --- a/public/app/features/alerting/unified/utils/rulerClient.ts +++ b/public/app/features/alerting/unified/utils/rulerClient.ts @@ -216,11 +216,16 @@ export function getRulerClient(rulerConfig: RulerDataSourceConfig): RulerClient // make sure our updated alert has the same UID as before copyGrafanaUID(existingRule, newRule); - // create the new array of rules we want to send to the group - const newRules = existingRule.group.rules - .filter((rule): rule is RulerGrafanaRuleDTO => isGrafanaRulerRule(rule)) - .filter((rule) => rule.grafana_alert.uid !== existingRule.rule.grafana_alert.uid) - .concat(newRule as RulerGrafanaRuleDTO); + // create the new array of rules we want to send to the group. Keep the order of alerts in the group. + const newRules = existingRule.group.rules.map((rule) => { + if (!isGrafanaRulerRule(rule)) { + return rule; + } + if (rule.grafana_alert.uid === existingRule.rule.grafana_alert.uid) { + return newRule; + } + return rule; + }); await setRulerRuleGroup(rulerConfig, existingRule.namespace, { name: existingRule.group.name,