mirror of https://github.com/grafana/grafana
Alerting: Send alerts to external Alertmanager(s) (#37298)
* Alerting: Send alerts to external Alertmanager(s) Within this PR we're adding support for registering or unregistering sending to a set of external alertmanagers. A few of the things that are going are: - Introduce a new table to hold "admin" (either org or global) configuration we can change at runtime. - A new periodic check that polls for this configuration and adjusts the "senders" accordingly. - Introduces a new concept of "senders" that are responsible for shipping the alerts to the external Alertmanager(s). In a nutshell, this is the Prometheus notifier (the one in charge of sending the alert) mapped to a multi-tenant map. There are a few code movements here and there but those are minor, I tried to keep things intact as much as possible so that we could have an easier diff.pull/37643/head
parent
7e42bb5df0
commit
f83cd401e5
@ -0,0 +1,74 @@ |
||||
package api |
||||
|
||||
import ( |
||||
"errors" |
||||
"net/http" |
||||
|
||||
"github.com/grafana/grafana/pkg/api/response" |
||||
"github.com/grafana/grafana/pkg/infra/log" |
||||
"github.com/grafana/grafana/pkg/models" |
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/store" |
||||
) |
||||
|
||||
type AdminSrv struct { |
||||
store store.AdminConfigurationStore |
||||
log log.Logger |
||||
} |
||||
|
||||
func (srv AdminSrv) RouteGetNGalertConfig(c *models.ReqContext) response.Response { |
||||
if c.OrgRole != models.ROLE_ADMIN { |
||||
return accessForbiddenResp() |
||||
} |
||||
|
||||
cfg, err := srv.store.GetAdminConfiguration(c.OrgId) |
||||
if err != nil { |
||||
if errors.Is(err, store.ErrNoAdminConfiguration) { |
||||
return ErrResp(http.StatusNotFound, err, "") |
||||
} |
||||
|
||||
msg := "failed to fetch admin configuration from the database" |
||||
srv.log.Error(msg, "err", err) |
||||
return ErrResp(http.StatusInternalServerError, err, msg) |
||||
} |
||||
|
||||
resp := apimodels.GettableNGalertConfig{ |
||||
Alertmanagers: cfg.Alertmanagers, |
||||
} |
||||
return response.JSON(http.StatusOK, resp) |
||||
} |
||||
|
||||
func (srv AdminSrv) RoutePostNGalertConfig(c *models.ReqContext, body apimodels.PostableNGalertConfig) response.Response { |
||||
if c.OrgRole != models.ROLE_ADMIN { |
||||
return accessForbiddenResp() |
||||
} |
||||
|
||||
cfg := &ngmodels.AdminConfiguration{ |
||||
Alertmanagers: body.Alertmanagers, |
||||
OrgID: c.OrgId, |
||||
} |
||||
|
||||
cmd := store.UpdateAdminConfigurationCmd{AdminConfiguration: cfg} |
||||
if err := srv.store.UpdateAdminConfiguration(cmd); err != nil { |
||||
msg := "failed to save the admin configuration to the database" |
||||
srv.log.Error(msg, "err", err) |
||||
return ErrResp(http.StatusBadRequest, err, msg) |
||||
} |
||||
|
||||
return response.JSON(http.StatusCreated, "admin configuration updated") |
||||
} |
||||
|
||||
func (srv AdminSrv) RouteDeleteNGalertConfig(c *models.ReqContext) response.Response { |
||||
if c.OrgRole != models.ROLE_ADMIN { |
||||
return accessForbiddenResp() |
||||
} |
||||
|
||||
err := srv.store.DeleteAdminConfiguration(c.OrgId) |
||||
if err != nil { |
||||
srv.log.Error("unable to delete configuration", "err", err) |
||||
return ErrResp(http.StatusInternalServerError, err, "") |
||||
} |
||||
|
||||
return response.JSON(http.StatusOK, "admin configuration deleted") |
||||
} |
@ -0,0 +1,59 @@ |
||||
/*Package api contains base API implementation of unified alerting |
||||
* |
||||
*Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
* |
||||
*Do not manually edit these files, please find ngalert/api/swagger-codegen/ for commands on how to generate them. |
||||
*/ |
||||
package api |
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/go-macaron/binding" |
||||
|
||||
"github.com/grafana/grafana/pkg/api/response" |
||||
"github.com/grafana/grafana/pkg/api/routing" |
||||
"github.com/grafana/grafana/pkg/middleware" |
||||
"github.com/grafana/grafana/pkg/models" |
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/metrics" |
||||
) |
||||
|
||||
type ConfigurationApiService interface { |
||||
RouteDeleteNGalertConfig(*models.ReqContext) response.Response |
||||
RouteGetNGalertConfig(*models.ReqContext) response.Response |
||||
RoutePostNGalertConfig(*models.ReqContext, apimodels.PostableNGalertConfig) response.Response |
||||
} |
||||
|
||||
func (api *API) RegisterConfigurationApiEndpoints(srv ConfigurationApiService, m *metrics.Metrics) { |
||||
api.RouteRegister.Group("", func(group routing.RouteRegister) { |
||||
group.Delete( |
||||
toMacaronPath("/api/v1/ngalert/admin_config"), |
||||
metrics.Instrument( |
||||
http.MethodDelete, |
||||
"/api/v1/ngalert/admin_config", |
||||
srv.RouteDeleteNGalertConfig, |
||||
m, |
||||
), |
||||
) |
||||
group.Get( |
||||
toMacaronPath("/api/v1/ngalert/admin_config"), |
||||
metrics.Instrument( |
||||
http.MethodGet, |
||||
"/api/v1/ngalert/admin_config", |
||||
srv.RouteGetNGalertConfig, |
||||
m, |
||||
), |
||||
) |
||||
group.Post( |
||||
toMacaronPath("/api/v1/ngalert/admin_config"), |
||||
binding.Bind(apimodels.PostableNGalertConfig{}), |
||||
metrics.Instrument( |
||||
http.MethodPost, |
||||
"/api/v1/ngalert/admin_config", |
||||
srv.RoutePostNGalertConfig, |
||||
m, |
||||
), |
||||
) |
||||
}, middleware.ReqSignedIn) |
||||
} |
@ -0,0 +1,51 @@ |
||||
package definitions |
||||
|
||||
// swagger:route GET /api/v1/ngalert/admin_config configuration RouteGetNGalertConfig
|
||||
//
|
||||
// Get the NGalert configuration of the user's organization, returns 404 if no configuration is present.
|
||||
//
|
||||
// Produces:
|
||||
// - application/json
|
||||
//
|
||||
// Responses:
|
||||
// 200: GettableNGalertConfig
|
||||
// 404: Failure
|
||||
// 500: Failure
|
||||
|
||||
// swagger:route POST /api/v1/ngalert/admin_config configuration RoutePostNGalertConfig
|
||||
//
|
||||
// Creates or updates the NGalert configuration of the user's organization.
|
||||
//
|
||||
// Consumes:
|
||||
// - application/json
|
||||
//
|
||||
// Responses:
|
||||
// 201: Ack
|
||||
// 400: ValidationError
|
||||
|
||||
// swagger:route DELETE /api/v1/ngalert/admin_config configuration RouteDeleteNGalertConfig
|
||||
//
|
||||
// Deletes the NGalert configuration of the user's organization.
|
||||
//
|
||||
// Consumes:
|
||||
// - application/json
|
||||
//
|
||||
// Responses:
|
||||
// 200: Ack
|
||||
// 500: Failure
|
||||
|
||||
// swagger:parameters RoutePostNGalertConfig
|
||||
type NGalertConfig struct { |
||||
// in:body
|
||||
Body PostableNGalertConfig |
||||
} |
||||
|
||||
// swagger:model
|
||||
type PostableNGalertConfig struct { |
||||
Alertmanagers []string `json:"alertmanagers"` |
||||
} |
||||
|
||||
// swagger:model
|
||||
type GettableNGalertConfig struct { |
||||
Alertmanagers []string `json:"alertmanagers"` |
||||
} |
@ -0,0 +1,24 @@ |
||||
package models |
||||
|
||||
import ( |
||||
"crypto/sha256" |
||||
"fmt" |
||||
) |
||||
|
||||
// AdminConfiguration represents the ngalert administration configuration settings.
|
||||
type AdminConfiguration struct { |
||||
ID int64 `xorm:"pk autoincr 'id'"` |
||||
OrgID int64 `xorm:"org_id"` |
||||
|
||||
// List of Alertmanager(s) URL to push alerts to.
|
||||
Alertmanagers []string |
||||
|
||||
CreatedAt int64 `xorm:"created"` |
||||
UpdatedAt int64 `xorm:"updated"` |
||||
} |
||||
|
||||
func (ac *AdminConfiguration) AsSHA256() string { |
||||
h := sha256.New() |
||||
_, _ = h.Write([]byte(fmt.Sprintf("%v", ac.Alertmanagers))) |
||||
return fmt.Sprintf("%x", h.Sum(nil)) |
||||
} |
@ -0,0 +1,310 @@ |
||||
package schedule |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"math/rand" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log" |
||||
"github.com/grafana/grafana/pkg/registry" |
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/eval" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/metrics" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/models" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/state" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/store" |
||||
"github.com/grafana/grafana/pkg/setting" |
||||
|
||||
"github.com/benbjohnson/clock" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestSendingToExternalAlertmanager(t *testing.T) { |
||||
t.Cleanup(registry.ClearOverrides) |
||||
|
||||
fakeAM := newFakeExternalAlertmanager(t) |
||||
defer fakeAM.Close() |
||||
fakeRuleStore := newFakeRuleStore(t) |
||||
fakeInstanceStore := &fakeInstanceStore{} |
||||
fakeAdminConfigStore := newFakeAdminConfigStore(t) |
||||
|
||||
// create alert rule with one second interval
|
||||
alertRule := CreateTestAlertRule(t, fakeRuleStore, 1, 1) |
||||
|
||||
// First, let's create an admin configuration that holds an alertmanager.
|
||||
adminConfig := &models.AdminConfiguration{OrgID: 1, Alertmanagers: []string{fakeAM.server.URL}} |
||||
cmd := store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
sched, mockedClock := setupScheduler(t, fakeRuleStore, fakeInstanceStore, fakeAdminConfigStore) |
||||
|
||||
// Make sure we sync the configuration at least once before the evaluation happens to guarantee the sender is running
|
||||
// when the first alert triggers.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, 1, len(sched.senders)) |
||||
require.Equal(t, 1, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Then, ensure we've discovered the Alertmanager.
|
||||
require.Eventually(t, func() bool { |
||||
return len(sched.AlertmanagersFor(1)) == 1 && len(sched.DroppedAlertmanagersFor(1)) == 0 |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
|
||||
ctx, cancel := context.WithCancel(context.Background()) |
||||
t.Cleanup(func() { |
||||
cancel() |
||||
}) |
||||
go func() { |
||||
AdminConfigPollingInterval = 10 * time.Minute // Do not poll in unit tests.
|
||||
err := sched.Run(ctx) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
// With everything up and running, let's advance the time to make sure we get at least one alert iteration.
|
||||
mockedClock.Add(2 * time.Second) |
||||
|
||||
// Eventually, our Alertmanager should have received at least one alert.
|
||||
require.Eventually(t, func() bool { |
||||
return fakeAM.AlertsCount() >= 1 && fakeAM.AlertNamesCompare([]string{alertRule.Title}) |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
|
||||
// Now, let's remove the Alertmanager from the admin configuration.
|
||||
adminConfig.Alertmanagers = []string{} |
||||
cmd = store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
// Again, make sure we sync and verify the senders.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, 0, len(sched.senders)) |
||||
require.Equal(t, 0, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Then, ensure we've dropped the Alertmanager.
|
||||
require.Eventually(t, func() bool { |
||||
return len(sched.AlertmanagersFor(1)) == 0 && len(sched.DroppedAlertmanagersFor(1)) == 0 |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
} |
||||
|
||||
func TestSendingToExternalAlertmanager_WithMultipleOrgs(t *testing.T) { |
||||
t.Cleanup(registry.ClearOverrides) |
||||
|
||||
fakeAM := newFakeExternalAlertmanager(t) |
||||
defer fakeAM.Close() |
||||
fakeRuleStore := newFakeRuleStore(t) |
||||
fakeInstanceStore := &fakeInstanceStore{} |
||||
fakeAdminConfigStore := newFakeAdminConfigStore(t) |
||||
|
||||
// Create two alert rules with one second interval.
|
||||
alertRuleOrgOne := CreateTestAlertRule(t, fakeRuleStore, 1, 1) |
||||
alertRuleOrgTwo := CreateTestAlertRule(t, fakeRuleStore, 1, 2) |
||||
|
||||
// First, let's create an admin configuration that holds an alertmanager.
|
||||
adminConfig := &models.AdminConfiguration{OrgID: 1, Alertmanagers: []string{fakeAM.server.URL}} |
||||
cmd := store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
sched, mockedClock := setupScheduler(t, fakeRuleStore, fakeInstanceStore, fakeAdminConfigStore) |
||||
|
||||
// Make sure we sync the configuration at least once before the evaluation happens to guarantee the sender is running
|
||||
// when the first alert triggers.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, 1, len(sched.senders)) |
||||
require.Equal(t, 1, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Then, ensure we've discovered the Alertmanager.
|
||||
require.Eventually(t, func() bool { |
||||
return len(sched.AlertmanagersFor(1)) == 1 && len(sched.DroppedAlertmanagersFor(1)) == 0 |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
|
||||
ctx, cancel := context.WithCancel(context.Background()) |
||||
t.Cleanup(func() { |
||||
cancel() |
||||
}) |
||||
go func() { |
||||
AdminConfigPollingInterval = 10 * time.Minute // Do not poll in unit tests.
|
||||
err := sched.Run(ctx) |
||||
require.NoError(t, err) |
||||
}() |
||||
|
||||
// 1. Now, let's assume a new org comes along.
|
||||
adminConfig2 := &models.AdminConfiguration{OrgID: 2, Alertmanagers: []string{fakeAM.server.URL}} |
||||
cmd = store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig2} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
// If we sync again, new senders must have spawned.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, 2, len(sched.senders)) |
||||
require.Equal(t, 2, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Then, ensure we've discovered the Alertmanager for the new organization.
|
||||
require.Eventually(t, func() bool { |
||||
return len(sched.AlertmanagersFor(2)) == 1 && len(sched.DroppedAlertmanagersFor(2)) == 0 |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
|
||||
// With everything up and running, let's advance the time to make sure we get at least one alert iteration.
|
||||
mockedClock.Add(2 * time.Second) |
||||
|
||||
// Eventually, our Alertmanager should have received at least two alerts.
|
||||
require.Eventually(t, func() bool { |
||||
return fakeAM.AlertsCount() == 2 && fakeAM.AlertNamesCompare([]string{alertRuleOrgOne.Title, alertRuleOrgTwo.Title}) |
||||
}, 20*time.Second, 200*time.Millisecond) |
||||
|
||||
// 2. Next, let's modify the configuration of an organization by adding an extra alertmanager.
|
||||
fakeAM2 := newFakeExternalAlertmanager(t) |
||||
adminConfig2 = &models.AdminConfiguration{OrgID: 2, Alertmanagers: []string{fakeAM.server.URL, fakeAM2.server.URL}} |
||||
cmd = store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig2} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
// Before we sync, let's grab the existing hash of this particular org.
|
||||
sched.sendersMtx.Lock() |
||||
currentHash := sched.sendersCfgHash[2] |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Now, sync again.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
|
||||
// The hash for org two should not be the same and we should still have two senders.
|
||||
sched.sendersMtx.Lock() |
||||
require.NotEqual(t, sched.sendersCfgHash[2], currentHash) |
||||
require.Equal(t, 2, len(sched.senders)) |
||||
require.Equal(t, 2, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Wait for the discovery of the new Alertmanager for orgID = 2.
|
||||
require.Eventually(t, func() bool { |
||||
return len(sched.AlertmanagersFor(2)) == 2 && len(sched.DroppedAlertmanagersFor(2)) == 0 |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
|
||||
// 3. Now, let's provide a configuration that fails for OrgID = 1.
|
||||
adminConfig2 = &models.AdminConfiguration{OrgID: 1, Alertmanagers: []string{"123://invalid.org"}} |
||||
cmd = store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig2} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
|
||||
// Before we sync, let's get the current config hash.
|
||||
sched.sendersMtx.Lock() |
||||
currentHash = sched.sendersCfgHash[1] |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Now, sync again.
|
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
|
||||
// The old configuration should still be running.
|
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, sched.sendersCfgHash[1], currentHash) |
||||
sched.sendersMtx.Unlock() |
||||
require.Equal(t, 1, len(sched.AlertmanagersFor(1))) |
||||
|
||||
// If we fix it - it should be applied.
|
||||
adminConfig2 = &models.AdminConfiguration{OrgID: 1, Alertmanagers: []string{"notarealalertmanager:3030"}} |
||||
cmd = store.UpdateAdminConfigurationCmd{AdminConfiguration: adminConfig2} |
||||
require.NoError(t, fakeAdminConfigStore.UpdateAdminConfiguration(cmd)) |
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.NotEqual(t, sched.sendersCfgHash[1], currentHash) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
// Finally, remove everything.
|
||||
require.NoError(t, fakeAdminConfigStore.DeleteAdminConfiguration(1)) |
||||
require.NoError(t, fakeAdminConfigStore.DeleteAdminConfiguration(2)) |
||||
require.NoError(t, sched.SyncAndApplyConfigFromDatabase()) |
||||
sched.sendersMtx.Lock() |
||||
require.Equal(t, 0, len(sched.senders)) |
||||
require.Equal(t, 0, len(sched.sendersCfgHash)) |
||||
sched.sendersMtx.Unlock() |
||||
|
||||
require.Eventually(t, func() bool { |
||||
NoAlertmanagerOrgOne := len(sched.AlertmanagersFor(1)) == 0 && len(sched.DroppedAlertmanagersFor(1)) == 0 |
||||
NoAlertmanagerOrgTwo := len(sched.AlertmanagersFor(2)) == 0 && len(sched.DroppedAlertmanagersFor(2)) == 0 |
||||
|
||||
return NoAlertmanagerOrgOne && NoAlertmanagerOrgTwo |
||||
}, 10*time.Second, 200*time.Millisecond) |
||||
} |
||||
|
||||
func setupScheduler(t *testing.T, rs store.RuleStore, is store.InstanceStore, acs store.AdminConfigurationStore) (*schedule, *clock.Mock) { |
||||
t.Helper() |
||||
|
||||
mockedClock := clock.NewMock() |
||||
logger := log.New("ngalert schedule test") |
||||
nilMetrics := metrics.NewMetrics(nil) |
||||
schedCfg := SchedulerCfg{ |
||||
C: mockedClock, |
||||
BaseInterval: time.Second, |
||||
MaxAttempts: 1, |
||||
Evaluator: eval.Evaluator{Cfg: &setting.Cfg{ExpressionsEnabled: true}, Log: logger}, |
||||
RuleStore: rs, |
||||
InstanceStore: is, |
||||
AdminConfigStore: acs, |
||||
Notifier: &fakeNotifier{}, |
||||
Logger: logger, |
||||
Metrics: metrics.NewMetrics(prometheus.NewRegistry()), |
||||
} |
||||
st := state.NewManager(schedCfg.Logger, nilMetrics, rs, is) |
||||
return NewScheduler(schedCfg, nil, "http://localhost", st), mockedClock |
||||
} |
||||
|
||||
// createTestAlertRule creates a dummy alert definition to be used by the tests.
|
||||
func CreateTestAlertRule(t *testing.T, dbstore *fakeRuleStore, intervalSeconds int64, orgID int64) *models.AlertRule { |
||||
t.Helper() |
||||
|
||||
d := rand.Intn(1000) |
||||
ruleGroup := fmt.Sprintf("ruleGroup-%d", d) |
||||
err := dbstore.UpdateRuleGroup(store.UpdateRuleGroupCmd{ |
||||
OrgID: orgID, |
||||
NamespaceUID: "namespace", |
||||
RuleGroupConfig: apimodels.PostableRuleGroupConfig{ |
||||
Name: ruleGroup, |
||||
Interval: model.Duration(time.Duration(intervalSeconds) * time.Second), |
||||
Rules: []apimodels.PostableExtendedRuleNode{ |
||||
{ |
||||
ApiRuleNode: &apimodels.ApiRuleNode{ |
||||
Annotations: map[string]string{"testAnnoKey": "testAnnoValue"}, |
||||
}, |
||||
GrafanaManagedAlert: &apimodels.PostableGrafanaRule{ |
||||
Title: fmt.Sprintf("an alert definition %d", d), |
||||
Condition: "A", |
||||
Data: []models.AlertQuery{ |
||||
{ |
||||
DatasourceUID: "-100", |
||||
Model: json.RawMessage(`{ |
||||
"datasourceUid": "-100", |
||||
"type":"math", |
||||
"expression":"2 + 2 > 1" |
||||
}`), |
||||
RelativeTimeRange: models.RelativeTimeRange{ |
||||
From: models.Duration(5 * time.Hour), |
||||
To: models.Duration(3 * time.Hour), |
||||
}, |
||||
RefID: "A", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}) |
||||
require.NoError(t, err) |
||||
|
||||
q := models.ListRuleGroupAlertRulesQuery{ |
||||
OrgID: orgID, |
||||
NamespaceUID: "namespace", |
||||
RuleGroup: ruleGroup, |
||||
} |
||||
err = dbstore.GetRuleGroupAlertRules(&q) |
||||
require.NoError(t, err) |
||||
require.NotEmpty(t, q.Result) |
||||
|
||||
rule := q.Result[0] |
||||
t.Logf("alert definition: %v with interval: %d created", rule.GetKey(), rule.IntervalSeconds) |
||||
return rule |
||||
} |
@ -0,0 +1,307 @@ |
||||
package schedule |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/http/httptest" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
models2 "github.com/grafana/grafana/pkg/models" |
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/models" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/store" |
||||
"github.com/grafana/grafana/pkg/util" |
||||
|
||||
amv2 "github.com/prometheus/alertmanager/api/v2/models" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/stretchr/testify/assert" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func newFakeRuleStore(t *testing.T) *fakeRuleStore { |
||||
return &fakeRuleStore{t: t, rules: map[int64]map[string]map[string][]*models.AlertRule{}} |
||||
} |
||||
|
||||
// FakeRuleStore mocks the RuleStore of the scheduler.
|
||||
type fakeRuleStore struct { |
||||
t *testing.T |
||||
mtx sync.Mutex |
||||
rules map[int64]map[string]map[string][]*models.AlertRule |
||||
} |
||||
|
||||
func (f *fakeRuleStore) DeleteAlertRuleByUID(_ int64, _ string) error { return nil } |
||||
func (f *fakeRuleStore) DeleteNamespaceAlertRules(_ int64, _ string) ([]string, error) { |
||||
return []string{}, nil |
||||
} |
||||
func (f *fakeRuleStore) DeleteRuleGroupAlertRules(_ int64, _ string, _ string) ([]string, error) { |
||||
return []string{}, nil |
||||
} |
||||
func (f *fakeRuleStore) DeleteAlertInstancesByRuleUID(_ int64, _ string) error { return nil } |
||||
func (f *fakeRuleStore) GetAlertRuleByUID(q *models.GetAlertRuleByUIDQuery) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
|
||||
rgs, ok := f.rules[q.OrgID] |
||||
if !ok { |
||||
return nil |
||||
} |
||||
|
||||
for _, rg := range rgs { |
||||
for _, rules := range rg { |
||||
for _, r := range rules { |
||||
if r.UID == q.UID { |
||||
q.Result = r |
||||
break |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// For now, we're not implementing namespace filtering.
|
||||
func (f *fakeRuleStore) GetAlertRulesForScheduling(q *models.ListAlertRulesQuery) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
|
||||
for _, rg := range f.rules { |
||||
for _, n := range rg { |
||||
for _, r := range n { |
||||
q.Result = append(q.Result, r...) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
func (f *fakeRuleStore) GetOrgAlertRules(_ *models.ListAlertRulesQuery) error { return nil } |
||||
func (f *fakeRuleStore) GetNamespaceAlertRules(_ *models.ListNamespaceAlertRulesQuery) error { |
||||
return nil |
||||
} |
||||
func (f *fakeRuleStore) GetRuleGroupAlertRules(q *models.ListRuleGroupAlertRulesQuery) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
rgs, ok := f.rules[q.OrgID] |
||||
if !ok { |
||||
return nil |
||||
} |
||||
|
||||
rg, ok := rgs[q.RuleGroup] |
||||
if !ok { |
||||
return nil |
||||
} |
||||
|
||||
if q.NamespaceUID != "" { |
||||
r, ok := rg[q.NamespaceUID] |
||||
if !ok { |
||||
return nil |
||||
} |
||||
q.Result = r |
||||
return nil |
||||
} |
||||
|
||||
for _, r := range rg { |
||||
q.Result = append(q.Result, r...) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
func (f *fakeRuleStore) GetNamespaces(_ int64, _ *models2.SignedInUser) (map[string]*models2.Folder, error) { |
||||
return nil, nil |
||||
} |
||||
func (f *fakeRuleStore) GetNamespaceByTitle(_ string, _ int64, _ *models2.SignedInUser, _ bool) (*models2.Folder, error) { |
||||
return nil, nil |
||||
} |
||||
func (f *fakeRuleStore) GetOrgRuleGroups(_ *models.ListOrgRuleGroupsQuery) error { return nil } |
||||
func (f *fakeRuleStore) UpsertAlertRules(_ []store.UpsertRule) error { return nil } |
||||
func (f *fakeRuleStore) UpdateRuleGroup(cmd store.UpdateRuleGroupCmd) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
rgs, ok := f.rules[cmd.OrgID] |
||||
if !ok { |
||||
f.rules[cmd.OrgID] = map[string]map[string][]*models.AlertRule{} |
||||
} |
||||
|
||||
rg, ok := rgs[cmd.RuleGroupConfig.Name] |
||||
if !ok { |
||||
f.rules[cmd.OrgID][cmd.RuleGroupConfig.Name] = map[string][]*models.AlertRule{} |
||||
} |
||||
|
||||
_, ok = rg[cmd.NamespaceUID] |
||||
if !ok { |
||||
f.rules[cmd.OrgID][cmd.RuleGroupConfig.Name][cmd.NamespaceUID] = []*models.AlertRule{} |
||||
} |
||||
|
||||
rules := []*models.AlertRule{} |
||||
for _, r := range cmd.RuleGroupConfig.Rules { |
||||
//TODO: Not sure why this is not being set properly, where is the code that sets this?
|
||||
for i := range r.GrafanaManagedAlert.Data { |
||||
r.GrafanaManagedAlert.Data[i].DatasourceUID = "-100" |
||||
} |
||||
|
||||
new := &models.AlertRule{ |
||||
OrgID: cmd.OrgID, |
||||
Title: r.GrafanaManagedAlert.Title, |
||||
Condition: r.GrafanaManagedAlert.Condition, |
||||
Data: r.GrafanaManagedAlert.Data, |
||||
UID: util.GenerateShortUID(), |
||||
IntervalSeconds: int64(time.Duration(cmd.RuleGroupConfig.Interval).Seconds()), |
||||
NamespaceUID: cmd.NamespaceUID, |
||||
RuleGroup: cmd.RuleGroupConfig.Name, |
||||
NoDataState: models.NoDataState(r.GrafanaManagedAlert.NoDataState), |
||||
ExecErrState: models.ExecutionErrorState(r.GrafanaManagedAlert.ExecErrState), |
||||
Version: 1, |
||||
} |
||||
|
||||
if r.ApiRuleNode != nil { |
||||
new.For = time.Duration(r.ApiRuleNode.For) |
||||
new.Annotations = r.ApiRuleNode.Annotations |
||||
new.Labels = r.ApiRuleNode.Labels |
||||
} |
||||
|
||||
if new.NoDataState == "" { |
||||
new.NoDataState = models.NoData |
||||
} |
||||
|
||||
if new.ExecErrState == "" { |
||||
new.ExecErrState = models.AlertingErrState |
||||
} |
||||
|
||||
err := new.PreSave(time.Now) |
||||
require.NoError(f.t, err) |
||||
|
||||
rules = append(rules, new) |
||||
} |
||||
|
||||
f.rules[cmd.OrgID][cmd.RuleGroupConfig.Name][cmd.NamespaceUID] = rules |
||||
return nil |
||||
} |
||||
|
||||
type fakeInstanceStore struct{} |
||||
|
||||
func (f *fakeInstanceStore) GetAlertInstance(_ *models.GetAlertInstanceQuery) error { return nil } |
||||
func (f *fakeInstanceStore) ListAlertInstances(_ *models.ListAlertInstancesQuery) error { return nil } |
||||
func (f *fakeInstanceStore) SaveAlertInstance(_ *models.SaveAlertInstanceCommand) error { return nil } |
||||
func (f *fakeInstanceStore) FetchOrgIds() ([]int64, error) { return []int64{}, nil } |
||||
func (f *fakeInstanceStore) DeleteAlertInstance(_ int64, _, _ string) error { return nil } |
||||
|
||||
func newFakeAdminConfigStore(t *testing.T) *fakeAdminConfigStore { |
||||
t.Helper() |
||||
return &fakeAdminConfigStore{configs: map[int64]*models.AdminConfiguration{}} |
||||
} |
||||
|
||||
type fakeAdminConfigStore struct { |
||||
mtx sync.Mutex |
||||
configs map[int64]*models.AdminConfiguration |
||||
} |
||||
|
||||
func (f *fakeAdminConfigStore) GetAdminConfiguration(orgID int64) (*models.AdminConfiguration, error) { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
return f.configs[orgID], nil |
||||
} |
||||
|
||||
func (f *fakeAdminConfigStore) GetAdminConfigurations() ([]*models.AdminConfiguration, error) { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
acs := make([]*models.AdminConfiguration, 0, len(f.configs)) |
||||
for _, ac := range f.configs { |
||||
acs = append(acs, ac) |
||||
} |
||||
|
||||
return acs, nil |
||||
} |
||||
|
||||
func (f *fakeAdminConfigStore) DeleteAdminConfiguration(orgID int64) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
delete(f.configs, orgID) |
||||
return nil |
||||
} |
||||
func (f *fakeAdminConfigStore) UpdateAdminConfiguration(cmd store.UpdateAdminConfigurationCmd) error { |
||||
f.mtx.Lock() |
||||
defer f.mtx.Unlock() |
||||
f.configs[cmd.AdminConfiguration.OrgID] = cmd.AdminConfiguration |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// fakeNotifier represents a fake internal Alertmanager.
|
||||
type fakeNotifier struct{} |
||||
|
||||
func (n *fakeNotifier) PutAlerts(alerts apimodels.PostableAlerts) error { |
||||
return nil |
||||
} |
||||
|
||||
type fakeExternalAlertmanager struct { |
||||
t *testing.T |
||||
mtx sync.Mutex |
||||
alerts amv2.PostableAlerts |
||||
server *httptest.Server |
||||
} |
||||
|
||||
func newFakeExternalAlertmanager(t *testing.T) *fakeExternalAlertmanager { |
||||
t.Helper() |
||||
|
||||
am := &fakeExternalAlertmanager{ |
||||
t: t, |
||||
alerts: amv2.PostableAlerts{}, |
||||
} |
||||
am.server = httptest.NewServer(http.HandlerFunc(am.Handler())) |
||||
|
||||
return am |
||||
} |
||||
|
||||
func (am *fakeExternalAlertmanager) AlertNamesCompare(expected []string) bool { |
||||
n := []string{} |
||||
alerts := am.Alerts() |
||||
|
||||
if len(expected) != len(alerts) { |
||||
return false |
||||
} |
||||
|
||||
for _, a := range am.Alerts() { |
||||
for k, v := range a.Alert.Labels { |
||||
if k == model.AlertNameLabel { |
||||
n = append(n, v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return assert.ObjectsAreEqual(expected, n) |
||||
} |
||||
|
||||
func (am *fakeExternalAlertmanager) AlertsCount() int { |
||||
am.mtx.Lock() |
||||
defer am.mtx.Unlock() |
||||
|
||||
return len(am.alerts) |
||||
} |
||||
|
||||
func (am *fakeExternalAlertmanager) Alerts() amv2.PostableAlerts { |
||||
am.mtx.Lock() |
||||
defer am.mtx.Unlock() |
||||
return am.alerts |
||||
} |
||||
|
||||
func (am *fakeExternalAlertmanager) Handler() func(w http.ResponseWriter, r *http.Request) { |
||||
return func(w http.ResponseWriter, r *http.Request) { |
||||
b, err := ioutil.ReadAll(r.Body) |
||||
require.NoError(am.t, err) |
||||
|
||||
a := amv2.PostableAlerts{} |
||||
require.NoError(am.t, json.Unmarshal(b, &a)) |
||||
|
||||
am.mtx.Lock() |
||||
am.alerts = append(am.alerts, a...) |
||||
am.mtx.Unlock() |
||||
} |
||||
} |
||||
|
||||
func (am *fakeExternalAlertmanager) Close() { |
||||
am.server.Close() |
||||
} |
@ -0,0 +1,202 @@ |
||||
package sender |
||||
|
||||
import ( |
||||
"context" |
||||
"net/url" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log" |
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/logging" |
||||
"github.com/grafana/grafana/pkg/services/ngalert/metrics" |
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models" |
||||
|
||||
gokit_log "github.com/go-kit/kit/log" |
||||
"github.com/prometheus/alertmanager/api/v2/models" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
common_config "github.com/prometheus/common/config" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/prometheus/prometheus/config" |
||||
"github.com/prometheus/prometheus/discovery" |
||||
"github.com/prometheus/prometheus/notifier" |
||||
"github.com/prometheus/prometheus/pkg/labels" |
||||
) |
||||
|
||||
const ( |
||||
defaultMaxQueueCapacity = 10000 |
||||
defaultTimeout = 10 * time.Second |
||||
) |
||||
|
||||
// Sender is responsible for dispatching alert notifications to an external Alertmanager service.
|
||||
type Sender struct { |
||||
logger log.Logger |
||||
gokitLogger gokit_log.Logger |
||||
wg sync.WaitGroup |
||||
|
||||
manager *notifier.Manager |
||||
|
||||
sdCancel context.CancelFunc |
||||
sdManager *discovery.Manager |
||||
} |
||||
|
||||
func New(metrics *metrics.Metrics) (*Sender, error) { |
||||
l := log.New("sender") |
||||
sdCtx, sdCancel := context.WithCancel(context.Background()) |
||||
s := &Sender{ |
||||
logger: l, |
||||
gokitLogger: gokit_log.NewLogfmtLogger(logging.NewWrapper(l)), |
||||
sdCancel: sdCancel, |
||||
} |
||||
|
||||
s.manager = notifier.NewManager( |
||||
¬ifier.Options{QueueCapacity: defaultMaxQueueCapacity, Registerer: prometheus.NewRegistry()}, |
||||
s.gokitLogger, |
||||
) |
||||
|
||||
s.sdManager = discovery.NewManager(sdCtx, s.gokitLogger) |
||||
|
||||
return s, nil |
||||
} |
||||
|
||||
// ApplyConfig syncs a configuration with the sender.
|
||||
func (s *Sender) ApplyConfig(cfg *ngmodels.AdminConfiguration) error { |
||||
notifierCfg, err := buildNotifierConfig(cfg) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := s.manager.ApplyConfig(notifierCfg); err != nil { |
||||
return err |
||||
} |
||||
|
||||
sdCfgs := make(map[string]discovery.Configs) |
||||
for k, v := range notifierCfg.AlertingConfig.AlertmanagerConfigs.ToMap() { |
||||
sdCfgs[k] = v.ServiceDiscoveryConfigs |
||||
} |
||||
|
||||
return s.sdManager.ApplyConfig(sdCfgs) |
||||
} |
||||
|
||||
func (s *Sender) Run() { |
||||
s.wg.Add(2) |
||||
|
||||
go func() { |
||||
if err := s.sdManager.Run(); err != nil { |
||||
s.logger.Error("failed to start the sender service discovery manager", "err", err) |
||||
} |
||||
s.wg.Done() |
||||
}() |
||||
|
||||
go func() { |
||||
s.manager.Run(s.sdManager.SyncCh()) |
||||
s.wg.Done() |
||||
}() |
||||
} |
||||
|
||||
// SendAlerts sends a set of alerts to the configured Alertmanager(s).
|
||||
func (s *Sender) SendAlerts(alerts apimodels.PostableAlerts) { |
||||
if len(alerts.PostableAlerts) == 0 { |
||||
s.logger.Debug("no alerts to send to external Alertmanager(s)") |
||||
return |
||||
} |
||||
as := make([]*notifier.Alert, 0, len(alerts.PostableAlerts)) |
||||
for _, a := range alerts.PostableAlerts { |
||||
na := alertToNotifierAlert(a) |
||||
as = append(as, na) |
||||
} |
||||
|
||||
s.logger.Debug("sending alerts to the external Alertmanager(s)", "am_count", len(s.manager.Alertmanagers()), "alert_count", len(as)) |
||||
s.manager.Send(as...) |
||||
} |
||||
|
||||
// Stop shuts down the sender.
|
||||
func (s *Sender) Stop() { |
||||
s.sdCancel() |
||||
s.manager.Stop() |
||||
s.wg.Wait() |
||||
} |
||||
|
||||
// Alertmanagers returns a list of the discovered Alertmanager(s).
|
||||
func (s *Sender) Alertmanagers() []*url.URL { |
||||
return s.manager.Alertmanagers() |
||||
} |
||||
|
||||
// DroppedAlertmanagers returns a list of Alertmanager(s) we no longer send alerts to.
|
||||
func (s *Sender) DroppedAlertmanagers() []*url.URL { |
||||
return s.manager.DroppedAlertmanagers() |
||||
} |
||||
|
||||
func buildNotifierConfig(cfg *ngmodels.AdminConfiguration) (*config.Config, error) { |
||||
amConfigs := make([]*config.AlertmanagerConfig, 0, len(cfg.Alertmanagers)) |
||||
for _, amURL := range cfg.Alertmanagers { |
||||
u, err := url.Parse(amURL) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
sdConfig := discovery.Configs{ |
||||
discovery.StaticConfig{ |
||||
{ |
||||
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(u.Host)}}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
amConfig := &config.AlertmanagerConfig{ |
||||
APIVersion: config.AlertmanagerAPIVersionV2, |
||||
Scheme: u.Scheme, |
||||
PathPrefix: u.Path, |
||||
Timeout: model.Duration(defaultTimeout), |
||||
ServiceDiscoveryConfigs: sdConfig, |
||||
} |
||||
|
||||
// Check the URL for basic authentication information first
|
||||
if u.User != nil { |
||||
amConfig.HTTPClientConfig.BasicAuth = &common_config.BasicAuth{ |
||||
Username: u.User.Username(), |
||||
} |
||||
|
||||
if password, isSet := u.User.Password(); isSet { |
||||
amConfig.HTTPClientConfig.BasicAuth.Password = common_config.Secret(password) |
||||
} |
||||
} |
||||
amConfigs = append(amConfigs, amConfig) |
||||
} |
||||
|
||||
notifierConfig := &config.Config{ |
||||
AlertingConfig: config.AlertingConfig{ |
||||
AlertmanagerConfigs: amConfigs, |
||||
}, |
||||
} |
||||
|
||||
return notifierConfig, nil |
||||
} |
||||
|
||||
func alertToNotifierAlert(alert models.PostableAlert) *notifier.Alert { |
||||
ls := make(labels.Labels, 0, len(alert.Alert.Labels)) |
||||
a := make(labels.Labels, 0, len(alert.Annotations)) |
||||
|
||||
// Prometheus does not allow spaces in labels or annotations while Grafana does, we need to make sure we
|
||||
// remove them before sending the alerts.
|
||||
for k, v := range alert.Alert.Labels { |
||||
ls = append(ls, labels.Label{Name: removeSpaces(k), Value: v}) |
||||
} |
||||
|
||||
for k, v := range alert.Annotations { |
||||
a = append(a, labels.Label{Name: removeSpaces(k), Value: v}) |
||||
} |
||||
|
||||
return ¬ifier.Alert{ |
||||
Labels: ls, |
||||
Annotations: a, |
||||
StartsAt: time.Time(alert.StartsAt), |
||||
EndsAt: time.Time(alert.EndsAt), |
||||
GeneratorURL: alert.Alert.GeneratorURL.String(), |
||||
} |
||||
} |
||||
|
||||
func removeSpaces(labelName string) string { |
||||
return strings.Join(strings.Fields(labelName), "") |
||||
} |
@ -0,0 +1,92 @@ |
||||
package store |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
|
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models" |
||||
"github.com/grafana/grafana/pkg/services/sqlstore" |
||||
) |
||||
|
||||
var ( |
||||
// ErrNoAdminConfiguration is an error for when no admin configuration is found.
|
||||
ErrNoAdminConfiguration = fmt.Errorf("no admin configuration available") |
||||
) |
||||
|
||||
type UpdateAdminConfigurationCmd struct { |
||||
AdminConfiguration *ngmodels.AdminConfiguration |
||||
} |
||||
|
||||
type AdminConfigurationStore interface { |
||||
GetAdminConfiguration(orgID int64) (*ngmodels.AdminConfiguration, error) |
||||
GetAdminConfigurations() ([]*ngmodels.AdminConfiguration, error) |
||||
DeleteAdminConfiguration(orgID int64) error |
||||
UpdateAdminConfiguration(UpdateAdminConfigurationCmd) error |
||||
} |
||||
|
||||
func (st *DBstore) GetAdminConfiguration(orgID int64) (*ngmodels.AdminConfiguration, error) { |
||||
cfg := &ngmodels.AdminConfiguration{} |
||||
err := st.SQLStore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error { |
||||
ok, err := sess.Table("ngalert_configuration").Where("org_id = ?", orgID).Get(cfg) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !ok { |
||||
return ErrNoAdminConfiguration |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return cfg, nil |
||||
} |
||||
|
||||
func (st DBstore) GetAdminConfigurations() ([]*ngmodels.AdminConfiguration, error) { |
||||
var cfg []*ngmodels.AdminConfiguration |
||||
err := st.SQLStore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error { |
||||
if err := sess.Table("ngalert_configuration").Find(&cfg); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return cfg, nil |
||||
} |
||||
|
||||
func (st DBstore) DeleteAdminConfiguration(orgID int64) error { |
||||
return st.SQLStore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error { |
||||
_, err := sess.Exec("DELETE FROM ngalert_configuration WHERE org_id = ?", orgID) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
func (st DBstore) UpdateAdminConfiguration(cmd UpdateAdminConfigurationCmd) error { |
||||
return st.SQLStore.WithTransactionalDbSession(context.Background(), func(sess *sqlstore.DBSession) error { |
||||
has, err := sess.Table("ngalert_configuration").Where("org_id = ?", cmd.AdminConfiguration.OrgID).Exist() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if !has { |
||||
_, err := sess.Table("ngalert_configuration").Insert(cmd.AdminConfiguration) |
||||
return err |
||||
} |
||||
|
||||
_, err = sess.Table("ngalert_configuration").AllCols().Update(cmd.AdminConfiguration) |
||||
return err |
||||
}) |
||||
} |
Loading…
Reference in new issue