mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Chore: Remove result fields from ngalert (#65410)
* remove result fields from ngalert * remove duplicate imports
This commit is contained in:
parent
42b58fbca6
commit
0beb768427
@ -55,7 +55,7 @@ type Alertmanager interface {
|
||||
}
|
||||
|
||||
type AlertingStore interface {
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error)
|
||||
}
|
||||
|
||||
// API handlers.
|
||||
|
@ -150,7 +150,8 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
||||
DashboardUID: dashboardUID,
|
||||
PanelID: panelID,
|
||||
}
|
||||
if err := srv.store.ListAlertRules(c.Req.Context(), &alertRuleQuery); err != nil {
|
||||
ruleList, err := srv.store.ListAlertRules(c.Req.Context(), &alertRuleQuery)
|
||||
if err != nil {
|
||||
ruleResponse.DiscoveryBase.Status = "error"
|
||||
ruleResponse.DiscoveryBase.Error = fmt.Sprintf("failure getting rules: %s", err.Error())
|
||||
ruleResponse.DiscoveryBase.ErrorType = apiv1.ErrServer
|
||||
@ -161,7 +162,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
||||
}
|
||||
|
||||
groupedRules := make(map[ngmodels.AlertRuleGroupKey][]*ngmodels.AlertRule)
|
||||
for _, rule := range alertRuleQuery.Result {
|
||||
for _, rule := range ruleList {
|
||||
key := rule.GetGroupKey()
|
||||
rulesInGroup := groupedRules[key]
|
||||
rulesInGroup = append(rulesInGroup, rule)
|
||||
|
@ -83,21 +83,22 @@ func (srv RulerSrv) RouteDeleteAlertRules(c *contextmodel.ReqContext, namespaceT
|
||||
NamespaceUIDs: []string{namespace.UID},
|
||||
RuleGroup: ruleGroup,
|
||||
}
|
||||
if err = srv.store.ListAlertRules(ctx, &q); err != nil {
|
||||
ruleList, err := srv.store.ListAlertRules(ctx, &q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(q.Result) == 0 {
|
||||
if len(ruleList) == 0 {
|
||||
logger.Debug("no alert rules to delete from namespace/group")
|
||||
return nil
|
||||
}
|
||||
|
||||
var deletionCandidates = make(map[ngmodels.AlertRuleGroupKey][]*ngmodels.AlertRule)
|
||||
for _, rule := range q.Result {
|
||||
for _, rule := range ruleList {
|
||||
key := rule.GetGroupKey()
|
||||
deletionCandidates[key] = append(deletionCandidates[key], rule)
|
||||
}
|
||||
rulesToDelete := make([]string, 0, len(q.Result))
|
||||
rulesToDelete := make([]string, 0, len(ruleList))
|
||||
for groupKey, rules := range deletionCandidates {
|
||||
if !authorizeAccessToRuleGroup(rules, hasAccess) {
|
||||
unauthz = true
|
||||
@ -156,7 +157,8 @@ func (srv RulerSrv) RouteGetNamespaceRulesConfig(c *contextmodel.ReqContext, nam
|
||||
OrgID: c.SignedInUser.OrgID,
|
||||
NamespaceUIDs: []string{namespace.UID},
|
||||
}
|
||||
if err := srv.store.ListAlertRules(c.Req.Context(), &q); err != nil {
|
||||
ruleList, err := srv.store.ListAlertRules(c.Req.Context(), &q)
|
||||
if err != nil {
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to update rule group")
|
||||
}
|
||||
|
||||
@ -172,7 +174,7 @@ func (srv RulerSrv) RouteGetNamespaceRulesConfig(c *contextmodel.ReqContext, nam
|
||||
}
|
||||
|
||||
ruleGroups := make(map[string]ngmodels.RulesGroup)
|
||||
for _, r := range q.Result {
|
||||
for _, r := range ruleList {
|
||||
ruleGroups[r.RuleGroup] = append(ruleGroups[r.RuleGroup], r)
|
||||
}
|
||||
|
||||
@ -199,7 +201,8 @@ func (srv RulerSrv) RouteGetRulesGroupConfig(c *contextmodel.ReqContext, namespa
|
||||
NamespaceUIDs: []string{namespace.UID},
|
||||
RuleGroup: ruleGroup,
|
||||
}
|
||||
if err := srv.store.ListAlertRules(c.Req.Context(), &q); err != nil {
|
||||
ruleList, err := srv.store.ListAlertRules(c.Req.Context(), &q)
|
||||
if err != nil {
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to get group alert rules")
|
||||
}
|
||||
|
||||
@ -212,12 +215,12 @@ func (srv RulerSrv) RouteGetRulesGroupConfig(c *contextmodel.ReqContext, namespa
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to get group alert rules")
|
||||
}
|
||||
|
||||
if !authorizeAccessToRuleGroup(q.Result, hasAccess) {
|
||||
if !authorizeAccessToRuleGroup(ruleList, hasAccess) {
|
||||
return ErrResp(http.StatusUnauthorized, fmt.Errorf("%w to access the group because it does not have access to one or many data sources one or many rules in the group use", ErrAuthorization), "")
|
||||
}
|
||||
|
||||
result := apimodels.RuleGroupConfigResponse{
|
||||
GettableRuleGroupConfig: toGettableRuleGroupConfig(ruleGroup, q.Result, namespace.ID, provenanceRecords),
|
||||
GettableRuleGroupConfig: toGettableRuleGroupConfig(ruleGroup, ruleList, namespace.ID, provenanceRecords),
|
||||
}
|
||||
return response.JSON(http.StatusAccepted, result)
|
||||
}
|
||||
@ -256,7 +259,8 @@ func (srv RulerSrv) RouteGetRulesConfig(c *contextmodel.ReqContext) response.Res
|
||||
PanelID: panelID,
|
||||
}
|
||||
|
||||
if err := srv.store.ListAlertRules(c.Req.Context(), &q); err != nil {
|
||||
ruleList, err := srv.store.ListAlertRules(c.Req.Context(), &q)
|
||||
if err != nil {
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to get alert rules")
|
||||
}
|
||||
|
||||
@ -270,7 +274,7 @@ func (srv RulerSrv) RouteGetRulesConfig(c *contextmodel.ReqContext) response.Res
|
||||
}
|
||||
|
||||
configs := make(map[ngmodels.AlertRuleGroupKey]ngmodels.RulesGroup)
|
||||
for _, r := range q.Result {
|
||||
for _, r := range ruleList {
|
||||
groupKey := r.GetGroupKey()
|
||||
group := configs[groupKey]
|
||||
group = append(group, r)
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
type RuleStore interface {
|
||||
GetUserVisibleNamespaces(context.Context, int64, *user.SignedInUser) (map[string]*folder.Folder, error)
|
||||
GetNamespaceByTitle(context.Context, string, int64, *user.SignedInUser, bool) (*folder.Folder, error)
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *ngmodels.GetAlertRulesGroupByRuleUIDQuery) error
|
||||
ListAlertRules(ctx context.Context, query *ngmodels.ListAlertRulesQuery) error
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *ngmodels.GetAlertRulesGroupByRuleUIDQuery) ([]*ngmodels.AlertRule, error)
|
||||
ListAlertRules(ctx context.Context, query *ngmodels.ListAlertRulesQuery) (ngmodels.RulesGroup, error)
|
||||
|
||||
// InsertAlertRules will insert all alert rules passed into the function
|
||||
// and return the map of uuid to id.
|
||||
|
@ -362,16 +362,12 @@ type AlertRuleVersion struct {
|
||||
type GetAlertRuleByUIDQuery struct {
|
||||
UID string
|
||||
OrgID int64
|
||||
|
||||
Result *AlertRule
|
||||
}
|
||||
|
||||
// GetAlertRulesGroupByRuleUIDQuery is the query for retrieving a group of alerts by UID of a rule that belongs to that group
|
||||
type GetAlertRulesGroupByRuleUIDQuery struct {
|
||||
UID string
|
||||
OrgID int64
|
||||
|
||||
Result []*AlertRule
|
||||
}
|
||||
|
||||
// ListAlertRulesQuery is the query for listing alert rules
|
||||
@ -385,8 +381,6 @@ type ListAlertRulesQuery struct {
|
||||
// to return just those for a dashboard and panel.
|
||||
DashboardUID string
|
||||
PanelID int64
|
||||
|
||||
Result RulesGroup
|
||||
}
|
||||
|
||||
// CountAlertRulesQuery is the query for counting alert rules
|
||||
@ -407,8 +401,6 @@ type ListNamespaceAlertRulesQuery struct {
|
||||
OrgID int64
|
||||
// Namespace is the folder slug
|
||||
NamespaceUID string
|
||||
|
||||
Result []*AlertRule
|
||||
}
|
||||
|
||||
// ListOrgRuleGroupsQuery is the query for listing unique rule groups
|
||||
@ -421,8 +413,6 @@ type ListOrgRuleGroupsQuery struct {
|
||||
// to return just those for a dashboard and panel.
|
||||
DashboardUID string
|
||||
PanelID int64
|
||||
|
||||
Result [][]string
|
||||
}
|
||||
|
||||
type UpdateRule struct {
|
||||
|
@ -25,8 +25,7 @@ type HistoricAlertConfiguration struct {
|
||||
|
||||
// GetLatestAlertmanagerConfigurationQuery is the query to get the latest alertmanager configuration.
|
||||
type GetLatestAlertmanagerConfigurationQuery struct {
|
||||
OrgID int64
|
||||
Result *AlertConfiguration
|
||||
OrgID int64
|
||||
}
|
||||
|
||||
// SaveAlertmanagerConfigurationCmd is the command to save an alertmanager configuration.
|
||||
@ -47,8 +46,7 @@ type MarkConfigurationAsAppliedCmd struct {
|
||||
|
||||
// GetAppliedConfigurationsQuery is the query for getting configurations that have been previously applied with no errors.
|
||||
type GetAppliedConfigurationsQuery struct {
|
||||
OrgID int64
|
||||
Result []*HistoricAlertConfiguration
|
||||
OrgID int64
|
||||
}
|
||||
|
||||
func HistoricConfigFromAlertConfig(config AlertConfiguration) HistoricAlertConfiguration {
|
||||
|
@ -52,8 +52,6 @@ func (i InstanceStateType) IsValid() bool {
|
||||
type ListAlertInstancesQuery struct {
|
||||
RuleUID string
|
||||
RuleOrgID int64 `json:"-"`
|
||||
|
||||
Result []*AlertInstance
|
||||
}
|
||||
|
||||
// ValidateAlertInstance validates that the alert instance contains an alert rule id,
|
||||
|
@ -28,16 +28,16 @@ func (e AlertmanagerConfigRejectedError) Error() string {
|
||||
}
|
||||
|
||||
type configurationStore interface {
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error)
|
||||
}
|
||||
|
||||
func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Context, org int64) (definitions.GettableUserConfig, error) {
|
||||
query := models.GetLatestAlertmanagerConfigurationQuery{OrgID: org}
|
||||
err := moa.configStore.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
amConfig, err := moa.configStore.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
if err != nil {
|
||||
return definitions.GettableUserConfig{}, fmt.Errorf("failed to get latest configuration: %w", err)
|
||||
}
|
||||
cfg, err := Load([]byte(query.Result.AlertmanagerConfiguration))
|
||||
cfg, err := Load([]byte(amConfig.AlertmanagerConfiguration))
|
||||
if err != nil {
|
||||
return definitions.GettableUserConfig{}, fmt.Errorf("failed to unmarshal alertmanager configuration: %w", err)
|
||||
}
|
||||
@ -93,7 +93,8 @@ func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Contex
|
||||
func (moa *MultiOrgAlertmanager) ApplyAlertmanagerConfiguration(ctx context.Context, org int64, config definitions.PostableUserConfig) error {
|
||||
// Get the last known working configuration
|
||||
query := models.GetLatestAlertmanagerConfigurationQuery{OrgID: org}
|
||||
if err := moa.configStore.GetLatestAlertmanagerConfiguration(ctx, &query); err != nil {
|
||||
_, err := moa.configStore.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
if err != nil {
|
||||
// If we don't have a configuration there's nothing for us to know and we should just continue saving the new one
|
||||
if !errors.Is(err, store.ErrNoAlertmanagerConfiguration) {
|
||||
return fmt.Errorf("failed to get latest configuration %w", err)
|
||||
|
@ -40,7 +40,8 @@ func NewCrypto(secrets secrets.Service, configs configurationStore, log log.Logg
|
||||
func (c *alertmanagerCrypto) LoadSecureSettings(ctx context.Context, orgId int64, receivers []*definitions.PostableApiReceiver) error {
|
||||
// Get the last known working configuration.
|
||||
query := models.GetLatestAlertmanagerConfigurationQuery{OrgID: orgId}
|
||||
if err := c.configs.GetLatestAlertmanagerConfiguration(ctx, &query); err != nil {
|
||||
amConfig, err := c.configs.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
if err != nil {
|
||||
// If we don't have a configuration there's nothing for us to know and we should just continue saving the new one.
|
||||
if !errors.Is(err, store.ErrNoAlertmanagerConfiguration) {
|
||||
return fmt.Errorf("failed to get latest configuration: %w", err)
|
||||
@ -48,8 +49,8 @@ func (c *alertmanagerCrypto) LoadSecureSettings(ctx context.Context, orgId int64
|
||||
}
|
||||
|
||||
currentReceiverMap := make(map[string]*definitions.PostableGrafanaReceiver)
|
||||
if query.Result != nil {
|
||||
currentConfig, err := Load([]byte(query.Result.AlertmanagerConfiguration))
|
||||
if amConfig != nil {
|
||||
currentConfig, err := Load([]byte(amConfig.AlertmanagerConfiguration))
|
||||
// If the current config is un-loadable, treat it as if it never existed. Providing a new, valid config should be able to "fix" this state.
|
||||
if err != nil {
|
||||
c.log.Warn("last known alertmanager configuration was invalid. Overwriting...")
|
||||
|
@ -51,14 +51,12 @@ func (f *fakeConfigStore) GetAllLatestAlertmanagerConfiguration(context.Context)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *fakeConfigStore) GetLatestAlertmanagerConfiguration(_ context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
var ok bool
|
||||
query.Result, ok = f.configs[query.OrgID]
|
||||
func (f *fakeConfigStore) GetLatestAlertmanagerConfiguration(_ context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error) {
|
||||
config, ok := f.configs[query.OrgID]
|
||||
if !ok {
|
||||
return store.ErrNoAlertmanagerConfiguration
|
||||
return nil, store.ErrNoAlertmanagerConfiguration
|
||||
}
|
||||
|
||||
return nil
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (f *fakeConfigStore) SaveAlertmanagerConfiguration(ctx context.Context, cmd *models.SaveAlertmanagerConfigurationCmd) error {
|
||||
|
@ -51,12 +51,12 @@ func (service *AlertRuleService) GetAlertRules(ctx context.Context, orgID int64)
|
||||
q := models.ListAlertRulesQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err := service.ruleStore.ListAlertRules(ctx, &q)
|
||||
rules, err := service.ruleStore.ListAlertRules(ctx, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: GET provenance
|
||||
return q.Result, nil
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (service *AlertRuleService) GetAlertRule(ctx context.Context, orgID int64, ruleUID string) (models.AlertRule, models.Provenance, error) {
|
||||
@ -64,15 +64,15 @@ func (service *AlertRuleService) GetAlertRule(ctx context.Context, orgID int64,
|
||||
OrgID: orgID,
|
||||
UID: ruleUID,
|
||||
}
|
||||
err := service.ruleStore.GetAlertRuleByUID(ctx, query)
|
||||
rules, err := service.ruleStore.GetAlertRuleByUID(ctx, query)
|
||||
if err != nil {
|
||||
return models.AlertRule{}, models.ProvenanceNone, err
|
||||
}
|
||||
provenance, err := service.provenanceStore.GetProvenance(ctx, query.Result, orgID)
|
||||
provenance, err := service.provenanceStore.GetProvenance(ctx, rules, orgID)
|
||||
if err != nil {
|
||||
return models.AlertRule{}, models.ProvenanceNone, err
|
||||
}
|
||||
return *query.Result, provenance, nil
|
||||
return *rules, provenance, nil
|
||||
}
|
||||
|
||||
type AlertRuleWithFolderTitle struct {
|
||||
@ -86,14 +86,14 @@ func (service *AlertRuleService) GetAlertRuleWithFolderTitle(ctx context.Context
|
||||
OrgID: orgID,
|
||||
UID: ruleUID,
|
||||
}
|
||||
err := service.ruleStore.GetAlertRuleByUID(ctx, query)
|
||||
rule, err := service.ruleStore.GetAlertRuleByUID(ctx, query)
|
||||
if err != nil {
|
||||
return AlertRuleWithFolderTitle{}, err
|
||||
}
|
||||
|
||||
dq := dashboards.GetDashboardQuery{
|
||||
OrgID: orgID,
|
||||
UID: query.Result.NamespaceUID,
|
||||
UID: rule.NamespaceUID,
|
||||
}
|
||||
|
||||
dash, err := service.dashboardService.GetDashboard(ctx, &dq)
|
||||
@ -102,7 +102,7 @@ func (service *AlertRuleService) GetAlertRuleWithFolderTitle(ctx context.Context
|
||||
}
|
||||
|
||||
return AlertRuleWithFolderTitle{
|
||||
AlertRule: *query.Result,
|
||||
AlertRule: *rule,
|
||||
FolderTitle: dash.Title,
|
||||
}, nil
|
||||
}
|
||||
@ -158,19 +158,20 @@ func (service *AlertRuleService) GetRuleGroup(ctx context.Context, orgID int64,
|
||||
NamespaceUIDs: []string{namespaceUID},
|
||||
RuleGroup: group,
|
||||
}
|
||||
if err := service.ruleStore.ListAlertRules(ctx, &q); err != nil {
|
||||
ruleList, err := service.ruleStore.ListAlertRules(ctx, &q)
|
||||
if err != nil {
|
||||
return models.AlertRuleGroup{}, err
|
||||
}
|
||||
if len(q.Result) == 0 {
|
||||
if len(ruleList) == 0 {
|
||||
return models.AlertRuleGroup{}, store.ErrAlertRuleGroupNotFound
|
||||
}
|
||||
res := models.AlertRuleGroup{
|
||||
Title: q.Result[0].RuleGroup,
|
||||
FolderUID: q.Result[0].NamespaceUID,
|
||||
Interval: q.Result[0].IntervalSeconds,
|
||||
Title: ruleList[0].RuleGroup,
|
||||
FolderUID: ruleList[0].NamespaceUID,
|
||||
Interval: ruleList[0].IntervalSeconds,
|
||||
Rules: []models.AlertRule{},
|
||||
}
|
||||
for _, r := range q.Result {
|
||||
for _, r := range ruleList {
|
||||
if r != nil {
|
||||
res.Rules = append(res.Rules, *r)
|
||||
}
|
||||
@ -189,12 +190,12 @@ func (service *AlertRuleService) UpdateRuleGroup(ctx context.Context, orgID int6
|
||||
NamespaceUIDs: []string{namespaceUID},
|
||||
RuleGroup: ruleGroup,
|
||||
}
|
||||
err := service.ruleStore.ListAlertRules(ctx, query)
|
||||
ruleList, err := service.ruleStore.ListAlertRules(ctx, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list alert rules: %w", err)
|
||||
}
|
||||
updateRules := make([]models.UpdateRule, 0, len(query.Result))
|
||||
for _, rule := range query.Result {
|
||||
updateRules := make([]models.UpdateRule, 0, len(ruleList))
|
||||
for _, rule := range ruleList {
|
||||
if rule.IntervalSeconds == intervalSeconds {
|
||||
continue
|
||||
}
|
||||
@ -222,11 +223,12 @@ func (service *AlertRuleService) ReplaceRuleGroup(ctx context.Context, orgID int
|
||||
NamespaceUIDs: []string{group.FolderUID},
|
||||
RuleGroup: group.Title,
|
||||
}
|
||||
if err := service.ruleStore.ListAlertRules(ctx, &listRulesQuery); err != nil {
|
||||
ruleList, err := service.ruleStore.ListAlertRules(ctx, &listRulesQuery)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list alert rules: %w", err)
|
||||
}
|
||||
group.Rules = make([]models.AlertRule, 0, len(listRulesQuery.Result))
|
||||
for _, r := range listRulesQuery.Result {
|
||||
group.Rules = make([]models.AlertRule, 0, len(ruleList))
|
||||
for _, r := range ruleList {
|
||||
if r != nil {
|
||||
group.Rules = append(group.Rules, *r)
|
||||
}
|
||||
@ -411,10 +413,11 @@ func (service *AlertRuleService) GetAlertRuleGroupWithFolderTitle(ctx context.Co
|
||||
NamespaceUIDs: []string{namespaceUID},
|
||||
RuleGroup: group,
|
||||
}
|
||||
if err := service.ruleStore.ListAlertRules(ctx, &q); err != nil {
|
||||
ruleList, err := service.ruleStore.ListAlertRules(ctx, &q)
|
||||
if err != nil {
|
||||
return file.AlertRuleGroupWithFolderTitle{}, err
|
||||
}
|
||||
if len(q.Result) == 0 {
|
||||
if len(ruleList) == 0 {
|
||||
return file.AlertRuleGroupWithFolderTitle{}, store.ErrAlertRuleGroupNotFound
|
||||
}
|
||||
|
||||
@ -429,15 +432,15 @@ func (service *AlertRuleService) GetAlertRuleGroupWithFolderTitle(ctx context.Co
|
||||
|
||||
res := file.AlertRuleGroupWithFolderTitle{
|
||||
AlertRuleGroup: &models.AlertRuleGroup{
|
||||
Title: q.Result[0].RuleGroup,
|
||||
FolderUID: q.Result[0].NamespaceUID,
|
||||
Interval: q.Result[0].IntervalSeconds,
|
||||
Title: ruleList[0].RuleGroup,
|
||||
FolderUID: ruleList[0].NamespaceUID,
|
||||
Interval: ruleList[0].IntervalSeconds,
|
||||
Rules: []models.AlertRule{},
|
||||
},
|
||||
OrgID: orgID,
|
||||
FolderTitle: dash.Title,
|
||||
}
|
||||
for _, r := range q.Result {
|
||||
for _, r := range ruleList {
|
||||
if r != nil {
|
||||
res.AlertRuleGroup.Rules = append(res.AlertRuleGroup.Rules, *r)
|
||||
}
|
||||
@ -451,13 +454,14 @@ func (service *AlertRuleService) GetAlertGroupsWithFolderTitle(ctx context.Conte
|
||||
OrgID: orgID,
|
||||
}
|
||||
|
||||
if err := service.ruleStore.ListAlertRules(ctx, &q); err != nil {
|
||||
ruleList, err := service.ruleStore.ListAlertRules(ctx, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groups := make(map[models.AlertRuleGroupKey][]models.AlertRule)
|
||||
namespaces := make(map[string][]*models.AlertRuleGroupKey)
|
||||
for _, r := range q.Result {
|
||||
for _, r := range ruleList {
|
||||
groupKey := r.GetGroupKey()
|
||||
group := groups[groupKey]
|
||||
group = append(group, *r)
|
||||
|
@ -31,16 +31,17 @@ func getLastConfiguration(ctx context.Context, orgID int64, store AMConfigStore)
|
||||
q := models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
if err := store.GetLatestAlertmanagerConfiguration(ctx, &q); err != nil {
|
||||
alertManagerConfig, err := store.GetLatestAlertmanagerConfiguration(ctx, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if q.Result == nil {
|
||||
if alertManagerConfig == nil {
|
||||
return nil, fmt.Errorf("no alertmanager configuration present in this org")
|
||||
}
|
||||
|
||||
concurrencyToken := q.Result.ConfigurationHash
|
||||
cfg, err := deserializeAlertmanagerConfig([]byte(q.Result.AlertmanagerConfiguration))
|
||||
concurrencyToken := alertManagerConfig.ConfigurationHash
|
||||
cfg, err := deserializeAlertmanagerConfig([]byte(alertManagerConfig.AlertmanagerConfiguration))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -48,6 +49,6 @@ func getLastConfiguration(ctx context.Context, orgID int64, store AMConfigStore)
|
||||
return &cfgRevision{
|
||||
cfg: cfg,
|
||||
concurrencyToken: concurrencyToken,
|
||||
version: q.Result.ConfigurationVersion,
|
||||
version: alertManagerConfig.ConfigurationVersion,
|
||||
}, nil
|
||||
}
|
||||
|
@ -224,9 +224,9 @@ func TestContactPointService(t *testing.T) {
|
||||
q := models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: 1,
|
||||
}
|
||||
err := sut.amStore.GetLatestAlertmanagerConfiguration(context.Background(), &q)
|
||||
config, err := sut.amStore.GetLatestAlertmanagerConfiguration(context.Background(), &q)
|
||||
require.NoError(t, err)
|
||||
expectedConcurrencyToken := q.Result.ConfigurationHash
|
||||
expectedConcurrencyToken := config.ConfigurationHash
|
||||
|
||||
_, err = sut.CreateContactPoint(context.Background(), 1, newCp, models.ProvenanceAPI)
|
||||
require.NoError(t, err)
|
||||
|
@ -47,7 +47,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
sut := createMuteTimingSvcSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
_, err := sut.GetMuteTimings(context.Background(), 1)
|
||||
|
||||
@ -70,7 +70,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
sut := createMuteTimingSvcSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
_, err := sut.GetMuteTimings(context.Background(), 1)
|
||||
|
||||
@ -98,7 +98,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
timing := createMuteTiming()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
_, err := sut.CreateMuteTiming(context.Background(), timing, 1)
|
||||
|
||||
@ -123,7 +123,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
timing := createMuteTiming()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
_, err := sut.CreateMuteTiming(context.Background(), timing, 1)
|
||||
|
||||
@ -204,7 +204,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
timing.Name = "asdf"
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
_, err := sut.UpdateMuteTiming(context.Background(), timing, 1)
|
||||
|
||||
@ -231,7 +231,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
timing.Name = "asdf"
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
_, err := sut.UpdateMuteTiming(context.Background(), timing, 1)
|
||||
|
||||
@ -296,7 +296,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
sut := createMuteTimingSvcSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
err := sut.DeleteMuteTiming(context.Background(), "asdf", 1)
|
||||
|
||||
@ -319,7 +319,7 @@ func TestMuteTimingService(t *testing.T) {
|
||||
sut := createMuteTimingSvcSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
err := sut.DeleteMuteTiming(context.Background(), "asdf", 1)
|
||||
|
||||
|
@ -37,12 +37,12 @@ func (nps *NotificationPolicyService) GetPolicyTree(ctx context.Context, orgID i
|
||||
q := models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err := nps.amStore.GetLatestAlertmanagerConfiguration(ctx, &q)
|
||||
alertManagerConfig, err := nps.amStore.GetLatestAlertmanagerConfiguration(ctx, &q)
|
||||
if err != nil {
|
||||
return definitions.Route{}, err
|
||||
}
|
||||
|
||||
cfg, err := deserializeAlertmanagerConfig([]byte(q.Result.AlertmanagerConfiguration))
|
||||
cfg, err := deserializeAlertmanagerConfig([]byte(alertManagerConfig.AlertmanagerConfiguration))
|
||||
if err != nil {
|
||||
return definitions.Route{}, err
|
||||
}
|
||||
|
@ -29,22 +29,16 @@ func TestNotificationPolicyService(t *testing.T) {
|
||||
t.Run("error if referenced mute time interval is not existing", func(t *testing.T) {
|
||||
sut := createNotificationPolicyServiceSut()
|
||||
sut.amStore = &MockAMConfigStore{}
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.MuteTimeIntervals = []config.MuteTimeInterval{
|
||||
{
|
||||
Name: "not-the-one-we-need",
|
||||
TimeIntervals: []timeinterval.TimeInterval{},
|
||||
},
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
sut.amStore.(*MockAMConfigStore).On("GetLatestAlertmanagerConfiguration", mock.Anything, mock.Anything).
|
||||
Return(
|
||||
func(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.MuteTimeIntervals = []config.MuteTimeInterval{
|
||||
{
|
||||
Name: "not-the-one-we-need",
|
||||
TimeIntervals: []timeinterval.TimeInterval{},
|
||||
},
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
query.Result = &models.AlertConfiguration{
|
||||
AlertmanagerConfiguration: string(data),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Return(&models.AlertConfiguration{AlertmanagerConfiguration: string(data)}, nil)
|
||||
sut.amStore.(*MockAMConfigStore).EXPECT().
|
||||
UpdateAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
@ -61,22 +55,16 @@ func TestNotificationPolicyService(t *testing.T) {
|
||||
t.Run("pass if referenced mute time interval is existing", func(t *testing.T) {
|
||||
sut := createNotificationPolicyServiceSut()
|
||||
sut.amStore = &MockAMConfigStore{}
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.MuteTimeIntervals = []config.MuteTimeInterval{
|
||||
{
|
||||
Name: "existing",
|
||||
TimeIntervals: []timeinterval.TimeInterval{},
|
||||
},
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
sut.amStore.(*MockAMConfigStore).On("GetLatestAlertmanagerConfiguration", mock.Anything, mock.Anything).
|
||||
Return(
|
||||
func(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.MuteTimeIntervals = []config.MuteTimeInterval{
|
||||
{
|
||||
Name: "existing",
|
||||
TimeIntervals: []timeinterval.TimeInterval{},
|
||||
},
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
query.Result = &models.AlertConfiguration{
|
||||
AlertmanagerConfiguration: string(data),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Return(&models.AlertConfiguration{AlertmanagerConfiguration: string(data)}, nil)
|
||||
sut.amStore.(*MockAMConfigStore).EXPECT().
|
||||
UpdateAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
@ -118,16 +106,10 @@ func TestNotificationPolicyService(t *testing.T) {
|
||||
t.Run("existing receiver reference will pass", func(t *testing.T) {
|
||||
sut := createNotificationPolicyServiceSut()
|
||||
sut.amStore = &MockAMConfigStore{}
|
||||
cfg := createTestAlertingConfig()
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
sut.amStore.(*MockAMConfigStore).On("GetLatestAlertmanagerConfiguration", mock.Anything, mock.Anything).
|
||||
Return(
|
||||
func(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
cfg := createTestAlertingConfig()
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
query.Result = &models.AlertConfiguration{
|
||||
AlertmanagerConfiguration: string(data),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Return(&models.AlertConfiguration{AlertmanagerConfiguration: string(data)}, nil)
|
||||
sut.amStore.(*MockAMConfigStore).EXPECT().
|
||||
UpdateAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
@ -167,9 +149,9 @@ func TestNotificationPolicyService(t *testing.T) {
|
||||
q := models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: 1,
|
||||
}
|
||||
err := sut.GetAMConfigStore().GetLatestAlertmanagerConfiguration(context.Background(), &q)
|
||||
config, err := sut.GetAMConfigStore().GetLatestAlertmanagerConfiguration(context.Background(), &q)
|
||||
require.NoError(t, err)
|
||||
expectedConcurrencyToken := q.Result.ConfigurationHash
|
||||
expectedConcurrencyToken := config.ConfigurationHash
|
||||
|
||||
err = sut.UpdatePolicyTree(context.Background(), 1, newRoute, models.ProvenanceAPI)
|
||||
require.NoError(t, err)
|
||||
@ -205,27 +187,21 @@ func TestNotificationPolicyService(t *testing.T) {
|
||||
t.Run("deleting route with missing default receiver restores receiver", func(t *testing.T) {
|
||||
sut := createNotificationPolicyServiceSut()
|
||||
sut.amStore = &MockAMConfigStore{}
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.Route = &definitions.Route{
|
||||
Receiver: "a new receiver",
|
||||
}
|
||||
cfg.AlertmanagerConfig.Receivers = []*definitions.PostableApiReceiver{
|
||||
{
|
||||
Receiver: config.Receiver{
|
||||
Name: "a new receiver",
|
||||
},
|
||||
},
|
||||
// No default receiver! Only our custom one.
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
sut.amStore.(*MockAMConfigStore).On("GetLatestAlertmanagerConfiguration", mock.Anything, mock.Anything).
|
||||
Return(
|
||||
func(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
cfg := createTestAlertingConfig()
|
||||
cfg.AlertmanagerConfig.Route = &definitions.Route{
|
||||
Receiver: "a new receiver",
|
||||
}
|
||||
cfg.AlertmanagerConfig.Receivers = []*definitions.PostableApiReceiver{
|
||||
{
|
||||
Receiver: config.Receiver{
|
||||
Name: "a new receiver",
|
||||
},
|
||||
},
|
||||
// No default receiver! Only our custom one.
|
||||
}
|
||||
data, _ := serializeAlertmanagerConfig(*cfg)
|
||||
query.Result = &models.AlertConfiguration{
|
||||
AlertmanagerConfiguration: string(data),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Return(&models.AlertConfiguration{AlertmanagerConfiguration: string(data)}, nil)
|
||||
var interceptedSave = models.SaveAlertmanagerConfigurationCmd{}
|
||||
sut.amStore.(*MockAMConfigStore).EXPECT().SaveSucceedsIntercept(&interceptedSave)
|
||||
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
//
|
||||
//go:generate mockery --name AMConfigStore --structname MockAMConfigStore --inpackage --filename persist_mock.go --with-expecter
|
||||
type AMConfigStore interface {
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error)
|
||||
UpdateAlertmanagerConfiguration(ctx context.Context, cmd *models.SaveAlertmanagerConfigurationCmd) error
|
||||
}
|
||||
|
||||
@ -35,13 +35,13 @@ type TransactionManager interface {
|
||||
|
||||
// RuleStore represents the ability to persist and query alert rules.
|
||||
type RuleStore interface {
|
||||
GetAlertRuleByUID(ctx context.Context, query *models.GetAlertRuleByUIDQuery) error
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) error
|
||||
GetAlertRuleByUID(ctx context.Context, query *models.GetAlertRuleByUIDQuery) (*models.AlertRule, error)
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) (models.RulesGroup, error)
|
||||
GetRuleGroupInterval(ctx context.Context, orgID int64, namespaceUID string, ruleGroup string) (int64, error)
|
||||
InsertAlertRules(ctx context.Context, rule []models.AlertRule) (map[string]int64, error)
|
||||
UpdateAlertRules(ctx context.Context, rule []models.UpdateRule) error
|
||||
DeleteAlertRulesByUID(ctx context.Context, orgID int64, ruleUID ...string) error
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *models.GetAlertRulesGroupByRuleUIDQuery) error
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *models.GetAlertRulesGroupByRuleUIDQuery) ([]*models.AlertRule, error)
|
||||
}
|
||||
|
||||
// QuotaChecker represents the ability to evaluate whether quotas are met.
|
||||
|
@ -1,14 +1,12 @@
|
||||
// Code generated by mockery v2.12.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.16.0. DO NOT EDIT.
|
||||
|
||||
package provisioning
|
||||
|
||||
import (
|
||||
context "context"
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
models "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockAMConfigStore is an autogenerated mock type for the AMConfigStore type
|
||||
@ -25,17 +23,26 @@ func (_m *MockAMConfigStore) EXPECT() *MockAMConfigStore_Expecter {
|
||||
}
|
||||
|
||||
// GetLatestAlertmanagerConfiguration provides a mock function with given fields: ctx, query
|
||||
func (_m *MockAMConfigStore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
func (_m *MockAMConfigStore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error) {
|
||||
ret := _m.Called(ctx, query)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.GetLatestAlertmanagerConfigurationQuery) error); ok {
|
||||
var r0 *models.AlertConfiguration
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.GetLatestAlertmanagerConfigurationQuery) *models.AlertConfiguration); ok {
|
||||
r0 = rf(ctx, query)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.AlertConfiguration)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *models.GetLatestAlertmanagerConfigurationQuery) error); ok {
|
||||
r1 = rf(ctx, query)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestAlertmanagerConfiguration'
|
||||
@ -44,8 +51,8 @@ type MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call struct {
|
||||
}
|
||||
|
||||
// GetLatestAlertmanagerConfiguration is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - query *models.GetLatestAlertmanagerConfigurationQuery
|
||||
// - ctx context.Context
|
||||
// - query *models.GetLatestAlertmanagerConfigurationQuery
|
||||
func (_e *MockAMConfigStore_Expecter) GetLatestAlertmanagerConfiguration(ctx interface{}, query interface{}) *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call {
|
||||
return &MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call{Call: _e.mock.On("GetLatestAlertmanagerConfiguration", ctx, query)}
|
||||
}
|
||||
@ -57,8 +64,8 @@ func (_c *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call) Run(run fun
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call) Return(_a0 error) *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call {
|
||||
_c.Call.Return(_a0)
|
||||
func (_c *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call) Return(_a0 *models.AlertConfiguration, _a1 error) *MockAMConfigStore_GetLatestAlertmanagerConfiguration_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
@ -82,8 +89,8 @@ type MockAMConfigStore_UpdateAlertmanagerConfiguration_Call struct {
|
||||
}
|
||||
|
||||
// UpdateAlertmanagerConfiguration is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - cmd *models.SaveAlertmanagerConfigurationCmd
|
||||
// - ctx context.Context
|
||||
// - cmd *models.SaveAlertmanagerConfigurationCmd
|
||||
func (_e *MockAMConfigStore_Expecter) UpdateAlertmanagerConfiguration(ctx interface{}, cmd interface{}) *MockAMConfigStore_UpdateAlertmanagerConfiguration_Call {
|
||||
return &MockAMConfigStore_UpdateAlertmanagerConfiguration_Call{Call: _e.mock.On("UpdateAlertmanagerConfiguration", ctx, cmd)}
|
||||
}
|
||||
@ -100,8 +107,13 @@ func (_c *MockAMConfigStore_UpdateAlertmanagerConfiguration_Call) Return(_a0 err
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockAMConfigStore creates a new instance of MockAMConfigStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewMockAMConfigStore(t testing.TB) *MockAMConfigStore {
|
||||
type mockConstructorTestingTNewMockAMConfigStore interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewMockAMConfigStore creates a new instance of MockAMConfigStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewMockAMConfigStore(t mockConstructorTestingTNewMockAMConfigStore) *MockAMConfigStore {
|
||||
mock := &MockAMConfigStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
|
@ -46,7 +46,7 @@ func TestTemplateService(t *testing.T) {
|
||||
sut := createTemplateServiceSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
_, err := sut.GetTemplates(context.Background(), 1)
|
||||
|
||||
@ -69,7 +69,7 @@ func TestTemplateService(t *testing.T) {
|
||||
sut := createTemplateServiceSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
_, err := sut.GetTemplates(context.Background(), 1)
|
||||
|
||||
@ -96,7 +96,7 @@ func TestTemplateService(t *testing.T) {
|
||||
tmpl := createNotificationTemplate()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
_, err := sut.SetTemplate(context.Background(), 1, tmpl)
|
||||
|
||||
@ -121,7 +121,7 @@ func TestTemplateService(t *testing.T) {
|
||||
tmpl := createNotificationTemplate()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
_, err := sut.SetTemplate(context.Background(), 1, tmpl)
|
||||
|
||||
@ -273,7 +273,7 @@ func TestTemplateService(t *testing.T) {
|
||||
sut := createTemplateServiceSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(fmt.Errorf("failed"))
|
||||
Return(nil, fmt.Errorf("failed"))
|
||||
|
||||
err := sut.DeleteTemplate(context.Background(), 1, "template")
|
||||
|
||||
@ -296,7 +296,7 @@ func TestTemplateService(t *testing.T) {
|
||||
sut := createTemplateServiceSut()
|
||||
sut.config.(*MockAMConfigStore).EXPECT().
|
||||
GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
Return(nil, nil)
|
||||
|
||||
err := sut.DeleteTemplate(context.Background(), 1, "template")
|
||||
|
||||
|
@ -72,11 +72,11 @@ func newFakeAMConfigStore() *fakeAMConfigStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeAMConfigStore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
query.Result = &f.config
|
||||
query.Result.OrgID = query.OrgID
|
||||
query.Result.ConfigurationHash = fmt.Sprintf("%x", md5.Sum([]byte(f.config.AlertmanagerConfiguration)))
|
||||
return nil
|
||||
func (f *fakeAMConfigStore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error) {
|
||||
result := &f.config
|
||||
result.OrgID = query.OrgID
|
||||
result.ConfigurationHash = fmt.Sprintf("%x", md5.Sum([]byte(f.config.AlertmanagerConfiguration)))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *fakeAMConfigStore) UpdateAlertmanagerConfiguration(ctx context.Context, cmd *models.SaveAlertmanagerConfigurationCmd) error {
|
||||
@ -148,11 +148,7 @@ func (n *NopTransactionManager) InTransaction(ctx context.Context, work func(ctx
|
||||
}
|
||||
|
||||
func (m *MockAMConfigStore_Expecter) GetsConfig(ac models.AlertConfiguration) *MockAMConfigStore_Expecter {
|
||||
m.GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).
|
||||
Run(func(ctx context.Context, q *models.GetLatestAlertmanagerConfigurationQuery) {
|
||||
q.Result = &ac
|
||||
}).
|
||||
Return(nil)
|
||||
m.GetLatestAlertmanagerConfiguration(mock.Anything, mock.Anything).Return(&ac, nil)
|
||||
return m
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ type AnnotationBackend struct {
|
||||
}
|
||||
|
||||
type RuleStore interface {
|
||||
GetAlertRuleByUID(ctx context.Context, query *ngmodels.GetAlertRuleByUIDQuery) error
|
||||
GetAlertRuleByUID(ctx context.Context, query *ngmodels.GetAlertRuleByUIDQuery) (*ngmodels.AlertRule, error)
|
||||
}
|
||||
|
||||
type AnnotationStore interface {
|
||||
@ -89,16 +89,16 @@ func (h *AnnotationBackend) Query(ctx context.Context, query ngmodels.HistoryQue
|
||||
UID: query.RuleUID,
|
||||
OrgID: query.OrgID,
|
||||
}
|
||||
err := h.rules.GetAlertRuleByUID(ctx, &rq)
|
||||
rule, err := h.rules.GetAlertRuleByUID(ctx, &rq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to look up the requested rule")
|
||||
}
|
||||
if rq.Result == nil {
|
||||
if rule == nil {
|
||||
return nil, fmt.Errorf("no such rule exists")
|
||||
}
|
||||
|
||||
q := annotations.ItemQuery{
|
||||
AlertID: rq.Result.ID,
|
||||
AlertID: rule.ID,
|
||||
OrgID: query.OrgID,
|
||||
From: query.From.Unix(),
|
||||
To: query.To.Unix(),
|
||||
|
@ -103,12 +103,13 @@ func (st *Manager) Warm(ctx context.Context, rulesReader RuleReader) {
|
||||
ruleCmd := ngModels.ListAlertRulesQuery{
|
||||
OrgID: orgId,
|
||||
}
|
||||
if err := rulesReader.ListAlertRules(ctx, &ruleCmd); err != nil {
|
||||
alertRules, err := rulesReader.ListAlertRules(ctx, &ruleCmd)
|
||||
if err != nil {
|
||||
st.log.Error("Unable to fetch previous state", "error", err)
|
||||
}
|
||||
|
||||
ruleByUID := make(map[string]*ngModels.AlertRule, len(ruleCmd.Result))
|
||||
for _, rule := range ruleCmd.Result {
|
||||
ruleByUID := make(map[string]*ngModels.AlertRule, len(alertRules))
|
||||
for _, rule := range alertRules {
|
||||
ruleByUID[rule.UID] = rule
|
||||
}
|
||||
|
||||
@ -119,11 +120,12 @@ func (st *Manager) Warm(ctx context.Context, rulesReader RuleReader) {
|
||||
cmd := ngModels.ListAlertInstancesQuery{
|
||||
RuleOrgID: orgId,
|
||||
}
|
||||
if err := st.instanceStore.ListAlertInstances(ctx, &cmd); err != nil {
|
||||
alertInstances, err := st.instanceStore.ListAlertInstances(ctx, &cmd)
|
||||
if err != nil {
|
||||
st.log.Error("Unable to fetch previous state", "error", err)
|
||||
}
|
||||
|
||||
for _, entry := range cmd.Result {
|
||||
for _, entry := range alertInstances {
|
||||
ruleForEntry, ok := ruleByUID[entry.RuleUID]
|
||||
if !ok {
|
||||
// TODO Should we delete the orphaned state from the db?
|
||||
|
@ -2639,12 +2639,12 @@ func TestDeleteStateByRuleUID(t *testing.T) {
|
||||
st := state.NewManager(cfg)
|
||||
st.Warm(ctx, dbstore)
|
||||
q := &models.ListAlertInstancesQuery{RuleOrgID: rule.OrgID, RuleUID: rule.UID}
|
||||
_ = dbstore.ListAlertInstances(ctx, q)
|
||||
alerts, _ := dbstore.ListAlertInstances(ctx, q)
|
||||
existingStatesForRule := st.GetStatesForRuleUID(rule.OrgID, rule.UID)
|
||||
|
||||
// We have loaded the expected number of entries from the db
|
||||
assert.Equal(t, tc.startingStateCacheCount, len(existingStatesForRule))
|
||||
assert.Equal(t, tc.startingInstanceDBCount, len(q.Result))
|
||||
assert.Equal(t, tc.startingInstanceDBCount, len(alerts))
|
||||
|
||||
expectedReason := util.GenerateShortUID()
|
||||
transitions := st.DeleteStateByRuleUID(ctx, rule.GetKey(), expectedReason)
|
||||
@ -2671,12 +2671,12 @@ func TestDeleteStateByRuleUID(t *testing.T) {
|
||||
}
|
||||
|
||||
q = &models.ListAlertInstancesQuery{RuleOrgID: rule.OrgID, RuleUID: rule.UID}
|
||||
_ = dbstore.ListAlertInstances(ctx, q)
|
||||
alertInstances, _ := dbstore.ListAlertInstances(ctx, q)
|
||||
existingStatesForRule = st.GetStatesForRuleUID(rule.OrgID, rule.UID)
|
||||
|
||||
// The expected number of state entries remains after states are deleted
|
||||
assert.Equal(t, tc.finalStateCacheCount, len(existingStatesForRule))
|
||||
assert.Equal(t, tc.finalInstanceDBCount, len(q.Result))
|
||||
assert.Equal(t, tc.finalInstanceDBCount, len(alertInstances))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -2776,12 +2776,12 @@ func TestResetStateByRuleUID(t *testing.T) {
|
||||
st := state.NewManager(cfg)
|
||||
st.Warm(ctx, dbstore)
|
||||
q := &models.ListAlertInstancesQuery{RuleOrgID: rule.OrgID, RuleUID: rule.UID}
|
||||
_ = dbstore.ListAlertInstances(ctx, q)
|
||||
alerts, _ := dbstore.ListAlertInstances(ctx, q)
|
||||
existingStatesForRule := st.GetStatesForRuleUID(rule.OrgID, rule.UID)
|
||||
|
||||
// We have loaded the expected number of entries from the db
|
||||
assert.Equal(t, tc.startingStateCacheCount, len(existingStatesForRule))
|
||||
assert.Equal(t, tc.startingInstanceDBCount, len(q.Result))
|
||||
assert.Equal(t, tc.startingInstanceDBCount, len(alerts))
|
||||
|
||||
transitions := st.ResetStateByRuleUID(ctx, rule, models.StateReasonPaused)
|
||||
|
||||
@ -2811,12 +2811,12 @@ func TestResetStateByRuleUID(t *testing.T) {
|
||||
assert.Equal(t, transitions, fakeHistorian.StateTransitions)
|
||||
|
||||
q = &models.ListAlertInstancesQuery{RuleOrgID: rule.OrgID, RuleUID: rule.UID}
|
||||
_ = dbstore.ListAlertInstances(ctx, q)
|
||||
alertInstances, _ := dbstore.ListAlertInstances(ctx, q)
|
||||
existingStatesForRule = st.GetStatesForRuleUID(rule.OrgID, rule.UID)
|
||||
|
||||
// The expected number of state entries remains after states are deleted
|
||||
assert.Equal(t, tc.finalStateCacheCount, len(existingStatesForRule))
|
||||
assert.Equal(t, tc.finalInstanceDBCount, len(q.Result))
|
||||
assert.Equal(t, tc.finalInstanceDBCount, len(alertInstances))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
// InstanceStore represents the ability to fetch and write alert instances.
|
||||
type InstanceStore interface {
|
||||
FetchOrgIds(ctx context.Context) ([]int64, error)
|
||||
ListAlertInstances(ctx context.Context, cmd *models.ListAlertInstancesQuery) error
|
||||
ListAlertInstances(ctx context.Context, cmd *models.ListAlertInstancesQuery) ([]*models.AlertInstance, error)
|
||||
SaveAlertInstances(ctx context.Context, cmd ...models.AlertInstance) error
|
||||
DeleteAlertInstances(ctx context.Context, keys ...models.AlertInstanceKey) error
|
||||
DeleteAlertInstancesByRule(ctx context.Context, key models.AlertRuleKey) error
|
||||
@ -18,7 +18,7 @@ type InstanceStore interface {
|
||||
|
||||
// RuleReader represents the ability to fetch alert rules.
|
||||
type RuleReader interface {
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) error
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) (models.RulesGroup, error)
|
||||
}
|
||||
|
||||
// Historian maintains an audit log of alert state history.
|
||||
|
@ -21,11 +21,11 @@ type FakeInstanceStoreOp struct {
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func (f *FakeInstanceStore) ListAlertInstances(_ context.Context, q *models.ListAlertInstancesQuery) error {
|
||||
func (f *FakeInstanceStore) ListAlertInstances(_ context.Context, q *models.ListAlertInstancesQuery) ([]*models.AlertInstance, error) {
|
||||
f.mtx.Lock()
|
||||
defer f.mtx.Unlock()
|
||||
f.RecordedOps = append(f.RecordedOps, *q)
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *FakeInstanceStore) SaveAlertInstances(_ context.Context, q ...models.AlertInstance) error {
|
||||
@ -57,8 +57,8 @@ func (f *FakeInstanceStore) DeleteAlertInstancesByRule(ctx context.Context, key
|
||||
|
||||
type FakeRuleReader struct{}
|
||||
|
||||
func (f *FakeRuleReader) ListAlertRules(_ context.Context, q *models.ListAlertRulesQuery) error {
|
||||
return nil
|
||||
func (f *FakeRuleReader) ListAlertRules(_ context.Context, q *models.ListAlertRulesQuery) (models.RulesGroup, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type FakeHistorian struct {
|
||||
|
@ -83,31 +83,33 @@ func (st DBstore) IncreaseVersionForAllRulesInNamespace(ctx context.Context, org
|
||||
|
||||
// GetAlertRuleByUID is a handler for retrieving an alert rule from that database by its UID and organisation ID.
|
||||
// It returns ngmodels.ErrAlertRuleNotFound if no alert rule is found for the provided ID.
|
||||
func (st DBstore) GetAlertRuleByUID(ctx context.Context, query *ngmodels.GetAlertRuleByUIDQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
func (st DBstore) GetAlertRuleByUID(ctx context.Context, query *ngmodels.GetAlertRuleByUIDQuery) (result *ngmodels.AlertRule, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
alertRule, err := getAlertRuleByUID(sess, query.UID, query.OrgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
query.Result = alertRule
|
||||
result = alertRule
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// GetAlertRulesGroupByRuleUID is a handler for retrieving a group of alert rules from that database by UID and organisation ID of one of rules that belong to that group.
|
||||
func (st DBstore) GetAlertRulesGroupByRuleUID(ctx context.Context, query *ngmodels.GetAlertRulesGroupByRuleUIDQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
var result []*ngmodels.AlertRule
|
||||
func (st DBstore) GetAlertRulesGroupByRuleUID(ctx context.Context, query *ngmodels.GetAlertRulesGroupByRuleUIDQuery) (result []*ngmodels.AlertRule, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
var rules []*ngmodels.AlertRule
|
||||
err := sess.Table("alert_rule").Alias("a").Join(
|
||||
"INNER",
|
||||
"alert_rule AS b", "a.org_id = b.org_id AND a.namespace_uid = b.namespace_uid AND a.rule_group = b.rule_group AND b.uid = ?", query.UID,
|
||||
).Where("a.org_id = ?", query.OrgID).Select("a.*").Find(&result)
|
||||
).Where("a.org_id = ?", query.OrgID).Select("a.*").Find(&rules)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
query.Result = result
|
||||
result = rules
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// InsertAlertRules is a handler for creating/updating alert rules.
|
||||
@ -243,8 +245,8 @@ func (st DBstore) CountAlertRulesInFolder(ctx context.Context, query *ngmodels.C
|
||||
}
|
||||
|
||||
// ListAlertRules is a handler for retrieving alert rules of specific organisation.
|
||||
func (st DBstore) ListAlertRules(ctx context.Context, query *ngmodels.ListAlertRulesQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
func (st DBstore) ListAlertRules(ctx context.Context, query *ngmodels.ListAlertRulesQuery) (result ngmodels.RulesGroup, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
q := sess.Table("alert_rule")
|
||||
|
||||
if query.OrgID >= 0 {
|
||||
@ -295,9 +297,10 @@ func (st DBstore) ListAlertRules(ctx context.Context, query *ngmodels.ListAlertR
|
||||
alertRules = append(alertRules, rule)
|
||||
}
|
||||
|
||||
query.Result = alertRules
|
||||
result = alertRules
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Count returns either the number of the alert rules under a specific org (if orgID is not zero)
|
||||
|
@ -24,8 +24,8 @@ var (
|
||||
|
||||
// GetLatestAlertmanagerConfiguration returns the lastest version of the alertmanager configuration.
|
||||
// It returns ErrNoAlertmanagerConfiguration if no configuration is found.
|
||||
func (st *DBstore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
func (st *DBstore) GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (result *models.AlertConfiguration, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
c := &models.AlertConfiguration{}
|
||||
// The ID is already an auto incremental column, using the ID as an order should guarantee the latest.
|
||||
ok, err := sess.Table("alert_configuration").Where("org_id = ?", query.OrgID).Get(c)
|
||||
@ -37,9 +37,10 @@ func (st *DBstore) GetLatestAlertmanagerConfiguration(ctx context.Context, query
|
||||
return ErrNoAlertmanagerConfiguration
|
||||
}
|
||||
|
||||
query.Result = c
|
||||
result = c
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// GetAllLatestAlertmanagerConfiguration returns the latest configuration of every organization
|
||||
@ -162,8 +163,8 @@ func (st *DBstore) MarkConfigurationAsApplied(ctx context.Context, cmd *models.M
|
||||
}
|
||||
|
||||
// GetAppliedConfigurations returns all configurations that have been marked as applied, ordered newest -> oldest by id.
|
||||
func (st *DBstore) GetAppliedConfigurations(ctx context.Context, query *models.GetAppliedConfigurationsQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
func (st *DBstore) GetAppliedConfigurations(ctx context.Context, query *models.GetAppliedConfigurationsQuery) (result []*models.HistoricAlertConfiguration, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
cfgs := []*models.HistoricAlertConfiguration{}
|
||||
err := sess.Table("alert_configuration_history").
|
||||
Desc("id").
|
||||
@ -174,9 +175,10 @@ func (st *DBstore) GetAppliedConfigurations(ctx context.Context, query *models.G
|
||||
return err
|
||||
}
|
||||
|
||||
query.Result = cfgs
|
||||
result = cfgs
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (st *DBstore) deleteOldConfigurations(ctx context.Context, orgID int64, limit int) (int64, error) {
|
||||
|
@ -30,10 +30,10 @@ func TestIntegrationAlertmanagerStore(t *testing.T) {
|
||||
OrgID: 1234,
|
||||
}
|
||||
|
||||
err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
config, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
|
||||
require.ErrorIs(t, err, ErrNoAlertmanagerConfiguration)
|
||||
require.Nil(t, req.Result)
|
||||
require.Nil(t, config)
|
||||
})
|
||||
|
||||
t.Run("GetLatestAlertmanagerConfiguration return the right config", func(t *testing.T) {
|
||||
@ -42,12 +42,12 @@ func TestIntegrationAlertmanagerStore(t *testing.T) {
|
||||
OrgID: 1,
|
||||
}
|
||||
|
||||
err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
config, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, req.Result)
|
||||
require.Equal(t, "my-config", req.Result.AlertmanagerConfiguration)
|
||||
require.Equal(t, configMD5, req.Result.ConfigurationHash)
|
||||
require.NotNil(t, config)
|
||||
require.Equal(t, "my-config", config.AlertmanagerConfiguration)
|
||||
require.Equal(t, configMD5, config.ConfigurationHash)
|
||||
})
|
||||
|
||||
t.Run("GetLatestAlertmanagerConfiguration after saving multiple times should return the latest config", func(t *testing.T) {
|
||||
@ -58,12 +58,12 @@ func TestIntegrationAlertmanagerStore(t *testing.T) {
|
||||
OrgID: 1,
|
||||
}
|
||||
|
||||
err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
config, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, req.Result)
|
||||
require.Equal(t, "my-config3", req.Result.AlertmanagerConfiguration)
|
||||
require.Equal(t, configMD5, req.Result.ConfigurationHash)
|
||||
require.NotNil(t, config)
|
||||
require.Equal(t, "my-config3", config.AlertmanagerConfiguration)
|
||||
require.Equal(t, configMD5, config.ConfigurationHash)
|
||||
})
|
||||
|
||||
t.Run("GetAllLatestAlertmanagerConfiguration gets latest config for all orgs", func(t *testing.T) {
|
||||
@ -109,9 +109,9 @@ func TestIntegrationAlertmanagerStore(t *testing.T) {
|
||||
require.ErrorContains(t, err, "callback failed")
|
||||
// Assert that we rolled back the transaction.
|
||||
get := &models.GetLatestAlertmanagerConfigurationQuery{OrgID: 1}
|
||||
err = store.GetLatestAlertmanagerConfiguration(context.Background(), get)
|
||||
config, err := store.GetLatestAlertmanagerConfiguration(context.Background(), get)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, get.Result.AlertmanagerConfiguration, "my-config")
|
||||
require.Equal(t, config.AlertmanagerConfiguration, "my-config")
|
||||
})
|
||||
|
||||
t.Run("UpdateAlertmanagerConfiguration returns error if existing config does not exist", func(t *testing.T) {
|
||||
@ -138,9 +138,9 @@ func TestIntegrationAlertmanagerHash(t *testing.T) {
|
||||
req := &models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: 1,
|
||||
}
|
||||
err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
config, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, configMD5, req.Result.ConfigurationHash)
|
||||
require.Equal(t, configMD5, config.ConfigurationHash)
|
||||
newConfig, newConfigMD5 := "my-config-new", fmt.Sprintf("%x", md5.Sum([]byte("my-config-new")))
|
||||
err = store.UpdateAlertmanagerConfiguration(context.Background(), &models.SaveAlertmanagerConfigurationCmd{
|
||||
AlertmanagerConfiguration: newConfig,
|
||||
@ -150,10 +150,10 @@ func TestIntegrationAlertmanagerHash(t *testing.T) {
|
||||
OrgID: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
config, err = store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newConfig, req.Result.AlertmanagerConfiguration)
|
||||
require.Equal(t, newConfigMD5, req.Result.ConfigurationHash)
|
||||
require.Equal(t, newConfig, config.AlertmanagerConfiguration)
|
||||
require.Equal(t, newConfigMD5, config.ConfigurationHash)
|
||||
})
|
||||
|
||||
t.Run("When passing the wrong hash the update should error", func(t *testing.T) {
|
||||
@ -161,9 +161,9 @@ func TestIntegrationAlertmanagerHash(t *testing.T) {
|
||||
req := &models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: 1,
|
||||
}
|
||||
err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
amConfig, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, configMD5, req.Result.ConfigurationHash)
|
||||
require.Equal(t, configMD5, amConfig.ConfigurationHash)
|
||||
err = store.UpdateAlertmanagerConfiguration(context.Background(), &models.SaveAlertmanagerConfigurationCmd{
|
||||
AlertmanagerConfiguration: config,
|
||||
FetchedConfigurationHash: "the-wrong-hash",
|
||||
@ -221,9 +221,9 @@ func TestIntegrationAlertmanagerConfigCleanup(t *testing.T) {
|
||||
req := &models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err = store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
amConfig, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "newest-record", req.Result.AlertmanagerConfiguration)
|
||||
require.Equal(t, "newest-record", amConfig.AlertmanagerConfiguration)
|
||||
})
|
||||
t.Run("when calling the cleanup only the oldest records surpassing the limit should be deleted", func(t *testing.T) {
|
||||
var orgID int64 = 2
|
||||
@ -261,9 +261,9 @@ func TestIntegrationAlertmanagerConfigCleanup(t *testing.T) {
|
||||
req := &models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err = store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
amConfig, err := store.GetLatestAlertmanagerConfiguration(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "newest-record", req.Result.AlertmanagerConfiguration)
|
||||
require.Equal(t, "newest-record", amConfig.AlertmanagerConfiguration)
|
||||
})
|
||||
t.Run("limit set to 0 should fail", func(t *testing.T) {
|
||||
_, err := store.deleteOldConfigurations(context.Background(), 1, 0)
|
||||
@ -311,19 +311,19 @@ func TestIntegrationMarkConfigurationAsApplied(t *testing.T) {
|
||||
appliedCfgsQuery := models.GetAppliedConfigurationsQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err = store.GetAppliedConfigurations(ctx, &appliedCfgsQuery)
|
||||
configs, err := store.GetAppliedConfigurations(ctx, &appliedCfgsQuery)
|
||||
require.NoError(tt, err)
|
||||
require.Len(tt, appliedCfgsQuery.Result, 0)
|
||||
require.Len(tt, configs, 0)
|
||||
|
||||
query := models.GetLatestAlertmanagerConfigurationQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err = store.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
amConfig, err := store.GetLatestAlertmanagerConfiguration(ctx, &query)
|
||||
require.NoError(tt, err)
|
||||
|
||||
cmd := models.MarkConfigurationAsAppliedCmd{
|
||||
OrgID: orgID,
|
||||
ConfigurationHash: query.Result.ConfigurationHash,
|
||||
ConfigurationHash: amConfig.ConfigurationHash,
|
||||
}
|
||||
err = store.MarkConfigurationAsApplied(ctx, &cmd)
|
||||
require.NoError(tt, err)
|
||||
@ -332,9 +332,9 @@ func TestIntegrationMarkConfigurationAsApplied(t *testing.T) {
|
||||
appliedCfgsQuery = models.GetAppliedConfigurationsQuery{
|
||||
OrgID: orgID,
|
||||
}
|
||||
err = store.GetAppliedConfigurations(ctx, &appliedCfgsQuery)
|
||||
configs, err = store.GetAppliedConfigurations(ctx, &appliedCfgsQuery)
|
||||
require.NoError(tt, err)
|
||||
require.Len(tt, appliedCfgsQuery.Result, 1)
|
||||
require.Len(tt, configs, 1)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ const AlertDefinitionMaxTitleLength = 190
|
||||
|
||||
// AlertingStore is the database interface used by the Alertmanager service.
|
||||
type AlertingStore interface {
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) error
|
||||
GetLatestAlertmanagerConfiguration(ctx context.Context, query *models.GetLatestAlertmanagerConfigurationQuery) (*models.AlertConfiguration, error)
|
||||
GetAllLatestAlertmanagerConfiguration(ctx context.Context) ([]*models.AlertConfiguration, error)
|
||||
SaveAlertmanagerConfiguration(ctx context.Context, cmd *models.SaveAlertmanagerConfigurationCmd) error
|
||||
SaveAlertmanagerConfigurationWithCallback(ctx context.Context, cmd *models.SaveAlertmanagerConfigurationCmd, callback SaveCallback) error
|
||||
|
@ -32,8 +32,8 @@ func (c *GroupDelta) IsEmpty() bool {
|
||||
}
|
||||
|
||||
type RuleReader interface {
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) error
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *models.GetAlertRulesGroupByRuleUIDQuery) error
|
||||
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) (models.RulesGroup, error)
|
||||
GetAlertRulesGroupByRuleUID(ctx context.Context, query *models.GetAlertRulesGroupByRuleUIDQuery) ([]*models.AlertRule, error)
|
||||
}
|
||||
|
||||
// CalculateChanges calculates the difference between rules in the group in the database and the submitted rules. If a submitted rule has UID it tries to find it in the database (in other groups).
|
||||
@ -45,10 +45,10 @@ func CalculateChanges(ctx context.Context, ruleReader RuleReader, groupKey model
|
||||
NamespaceUIDs: []string{groupKey.NamespaceUID},
|
||||
RuleGroup: groupKey.RuleGroup,
|
||||
}
|
||||
if err := ruleReader.ListAlertRules(ctx, q); err != nil {
|
||||
existingGroupRules, err := ruleReader.ListAlertRules(ctx, q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query database for rules in the group %s: %w", groupKey, err)
|
||||
}
|
||||
existingGroupRules := q.Result
|
||||
if len(existingGroupRules) > 0 {
|
||||
affectedGroups[groupKey] = existingGroupRules
|
||||
}
|
||||
@ -76,10 +76,11 @@ func CalculateChanges(ctx context.Context, ruleReader RuleReader, groupKey model
|
||||
} else if existing, ok = loadedRulesByUID[r.UID]; !ok { // check the "cache" and if there is no hit, query the database
|
||||
// Rule can be from other group or namespace
|
||||
q := &models.GetAlertRulesGroupByRuleUIDQuery{OrgID: groupKey.OrgID, UID: r.UID}
|
||||
if err := ruleReader.GetAlertRulesGroupByRuleUID(ctx, q); err != nil {
|
||||
ruleList, err := ruleReader.GetAlertRulesGroupByRuleUID(ctx, q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query database for a group of alert rules: %w", err)
|
||||
}
|
||||
for _, rule := range q.Result {
|
||||
for _, rule := range ruleList {
|
||||
if rule.UID == r.UID {
|
||||
existing = rule
|
||||
}
|
||||
@ -88,7 +89,7 @@ func CalculateChanges(ctx context.Context, ruleReader RuleReader, groupKey model
|
||||
if existing == nil {
|
||||
return nil, fmt.Errorf("failed to update rule with UID %s because %w", r.UID, models.ErrAlertRuleNotFound)
|
||||
}
|
||||
affectedGroups[existing.GetGroupKey()] = q.Result
|
||||
affectedGroups[existing.GetGroupKey()] = ruleList
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
|
||||
// ListAlertInstances is a handler for retrieving alert instances within specific organisation
|
||||
// based on various filters.
|
||||
func (st DBstore) ListAlertInstances(ctx context.Context, cmd *models.ListAlertInstancesQuery) error {
|
||||
return st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
func (st DBstore) ListAlertInstances(ctx context.Context, cmd *models.ListAlertInstancesQuery) (result []*models.AlertInstance, err error) {
|
||||
err = st.SQLStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
alertInstances := make([]*models.AlertInstance, 0)
|
||||
|
||||
s := strings.Builder{}
|
||||
@ -37,9 +37,10 @@ func (st DBstore) ListAlertInstances(ctx context.Context, cmd *models.ListAlertI
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.Result = alertInstances
|
||||
result = alertInstances
|
||||
return nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SaveAlertInstances saves all the provided alert instances to the store.
|
||||
|
@ -99,9 +99,9 @@ func TestIntegrationAlertInstanceBulkWrite(t *testing.T) {
|
||||
q := &models.ListAlertInstancesQuery{
|
||||
RuleOrgID: id,
|
||||
}
|
||||
err = dbstore.ListAlertInstances(ctx, q)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, q)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, counts[i], len(q.Result), "Org %v: Expected %v instances but got %v", id, counts[i], len(q.Result))
|
||||
require.Equal(t, counts[i], len(alerts), "Org %v: Expected %v instances but got %v", id, counts[i], len(alerts))
|
||||
}
|
||||
t.Log("Finished database read")
|
||||
|
||||
@ -113,9 +113,9 @@ func TestIntegrationAlertInstanceBulkWrite(t *testing.T) {
|
||||
q := &models.ListAlertInstancesQuery{
|
||||
RuleOrgID: id,
|
||||
}
|
||||
err = dbstore.ListAlertInstances(ctx, q)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, q)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(q.Result), "Org %v: Deleted instances but still had %v", id, len(q.Result))
|
||||
require.Zero(t, len(alerts), "Org %v: Deleted instances but still had %v", id, len(alerts))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -171,14 +171,14 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleOrgID: instance.RuleOrgID,
|
||||
RuleUID: instance.RuleUID,
|
||||
}
|
||||
err = dbstore.ListAlertInstances(ctx, listCmd)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listCmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listCmd.Result, 1)
|
||||
require.Equal(t, instance.Labels, listCmd.Result[0].Labels)
|
||||
require.Equal(t, alertRule1.OrgID, listCmd.Result[0].RuleOrgID)
|
||||
require.Equal(t, alertRule1.UID, listCmd.Result[0].RuleUID)
|
||||
require.Equal(t, instance.CurrentReason, listCmd.Result[0].CurrentReason)
|
||||
require.Len(t, alerts, 1)
|
||||
require.Equal(t, instance.Labels, alerts[0].Labels)
|
||||
require.Equal(t, alertRule1.OrgID, alerts[0].RuleOrgID)
|
||||
require.Equal(t, alertRule1.UID, alerts[0].RuleUID)
|
||||
require.Equal(t, instance.CurrentReason, alerts[0].CurrentReason)
|
||||
})
|
||||
|
||||
t.Run("can save and read new alert instance with no labels", func(t *testing.T) {
|
||||
@ -201,13 +201,13 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleUID: instance.RuleUID,
|
||||
}
|
||||
|
||||
err = dbstore.ListAlertInstances(ctx, listCmd)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listCmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listCmd.Result, 1)
|
||||
require.Equal(t, alertRule2.OrgID, listCmd.Result[0].RuleOrgID)
|
||||
require.Equal(t, alertRule2.UID, listCmd.Result[0].RuleUID)
|
||||
require.Equal(t, instance.Labels, listCmd.Result[0].Labels)
|
||||
require.Len(t, alerts, 1)
|
||||
require.Equal(t, alertRule2.OrgID, alerts[0].RuleOrgID)
|
||||
require.Equal(t, alertRule2.UID, alerts[0].RuleUID)
|
||||
require.Equal(t, instance.Labels, alerts[0].Labels)
|
||||
})
|
||||
|
||||
t.Run("can save two instances with same org_id, uid and different labels", func(t *testing.T) {
|
||||
@ -245,10 +245,10 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleUID: instance1.RuleUID,
|
||||
}
|
||||
|
||||
err = dbstore.ListAlertInstances(ctx, listQuery)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listQuery)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listQuery.Result, 2)
|
||||
require.Len(t, alerts, 2)
|
||||
})
|
||||
|
||||
t.Run("can list all added instances in org", func(t *testing.T) {
|
||||
@ -256,10 +256,10 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleOrgID: orgID,
|
||||
}
|
||||
|
||||
err := dbstore.ListAlertInstances(ctx, listQuery)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listQuery)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listQuery.Result, 4)
|
||||
require.Len(t, alerts, 4)
|
||||
})
|
||||
|
||||
t.Run("should ignore Normal state with no reason if feature flag is enabled", func(t *testing.T) {
|
||||
@ -291,10 +291,10 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleOrgID: orgID,
|
||||
}
|
||||
|
||||
err = dbstore.ListAlertInstances(ctx, listQuery)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listQuery)
|
||||
require.NoError(t, err)
|
||||
|
||||
containsHash(t, listQuery.Result, instance1.LabelsHash)
|
||||
containsHash(t, alerts, instance1.LabelsHash)
|
||||
|
||||
f := dbstore.FeatureToggles
|
||||
dbstore.FeatureToggles = featuremgmt.WithFeatures(featuremgmt.FlagAlertingNoNormalState)
|
||||
@ -302,12 +302,12 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
dbstore.FeatureToggles = f
|
||||
})
|
||||
|
||||
err = dbstore.ListAlertInstances(ctx, listQuery)
|
||||
alerts, err = dbstore.ListAlertInstances(ctx, listQuery)
|
||||
require.NoError(t, err)
|
||||
|
||||
containsHash(t, listQuery.Result, instance2.LabelsHash)
|
||||
containsHash(t, alerts, instance2.LabelsHash)
|
||||
|
||||
for _, instance := range listQuery.Result {
|
||||
for _, instance := range alerts {
|
||||
if instance.CurrentState == models.InstanceStateNormal && instance.CurrentReason == "" {
|
||||
require.Fail(t, "List operation expected to return all states except Normal but the result contains Normal states")
|
||||
}
|
||||
@ -347,14 +347,14 @@ func TestIntegrationAlertInstanceOperations(t *testing.T) {
|
||||
RuleUID: alertRule4.UID,
|
||||
}
|
||||
|
||||
err = dbstore.ListAlertInstances(ctx, listQuery)
|
||||
alerts, err := dbstore.ListAlertInstances(ctx, listQuery)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listQuery.Result, 1)
|
||||
require.Len(t, alerts, 1)
|
||||
|
||||
require.Equal(t, instance2.RuleOrgID, listQuery.Result[0].RuleOrgID)
|
||||
require.Equal(t, instance2.RuleUID, listQuery.Result[0].RuleUID)
|
||||
require.Equal(t, instance2.Labels, listQuery.Result[0].Labels)
|
||||
require.Equal(t, instance2.CurrentState, listQuery.Result[0].CurrentState)
|
||||
require.Equal(t, instance2.RuleOrgID, alerts[0].RuleOrgID)
|
||||
require.Equal(t, instance2.RuleUID, alerts[0].RuleUID)
|
||||
require.Equal(t, instance2.Labels, alerts[0].Labels)
|
||||
require.Equal(t, instance2.CurrentState, alerts[0].CurrentState)
|
||||
})
|
||||
}
|
||||
|
@ -120,37 +120,36 @@ func (f *RuleStore) DeleteAlertRulesByUID(_ context.Context, orgID int64, UIDs .
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *RuleStore) GetAlertRuleByUID(_ context.Context, q *models.GetAlertRuleByUIDQuery) error {
|
||||
func (f *RuleStore) GetAlertRuleByUID(_ context.Context, q *models.GetAlertRuleByUIDQuery) (*models.AlertRule, error) {
|
||||
f.mtx.Lock()
|
||||
defer f.mtx.Unlock()
|
||||
f.RecordedOps = append(f.RecordedOps, *q)
|
||||
if err := f.Hook(*q); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
rules, ok := f.Rules[q.OrgID]
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, rule := range rules {
|
||||
if rule.UID == q.UID {
|
||||
q.Result = rule
|
||||
break
|
||||
return rule, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *RuleStore) GetAlertRulesGroupByRuleUID(_ context.Context, q *models.GetAlertRulesGroupByRuleUIDQuery) error {
|
||||
func (f *RuleStore) GetAlertRulesGroupByRuleUID(_ context.Context, q *models.GetAlertRulesGroupByRuleUIDQuery) ([]*models.AlertRule, error) {
|
||||
f.mtx.Lock()
|
||||
defer f.mtx.Unlock()
|
||||
f.RecordedOps = append(f.RecordedOps, *q)
|
||||
if err := f.Hook(*q); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
rules, ok := f.Rules[q.OrgID]
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var selected *models.AlertRule
|
||||
@ -161,24 +160,25 @@ func (f *RuleStore) GetAlertRulesGroupByRuleUID(_ context.Context, q *models.Get
|
||||
}
|
||||
}
|
||||
if selected == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ruleList := []*models.AlertRule{}
|
||||
for _, rule := range rules {
|
||||
if rule.GetGroupKey() == selected.GetGroupKey() {
|
||||
q.Result = append(q.Result, rule)
|
||||
ruleList = append(ruleList, rule)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ruleList, nil
|
||||
}
|
||||
|
||||
func (f *RuleStore) ListAlertRules(_ context.Context, q *models.ListAlertRulesQuery) error {
|
||||
func (f *RuleStore) ListAlertRules(_ context.Context, q *models.ListAlertRulesQuery) (models.RulesGroup, error) {
|
||||
f.mtx.Lock()
|
||||
defer f.mtx.Unlock()
|
||||
f.RecordedOps = append(f.RecordedOps, *q)
|
||||
|
||||
if err := f.Hook(*q); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasDashboard := func(r *models.AlertRule, dashboardUID string, panelID int64) bool {
|
||||
@ -211,6 +211,7 @@ func (f *RuleStore) ListAlertRules(_ context.Context, q *models.ListAlertRulesQu
|
||||
return true
|
||||
}
|
||||
|
||||
ruleList := models.RulesGroup{}
|
||||
for _, r := range f.Rules[q.OrgID] {
|
||||
if !hasDashboard(r, q.DashboardUID, q.PanelID) {
|
||||
continue
|
||||
@ -221,10 +222,10 @@ func (f *RuleStore) ListAlertRules(_ context.Context, q *models.ListAlertRulesQu
|
||||
if q.RuleGroup != "" && r.RuleGroup != q.RuleGroup {
|
||||
continue
|
||||
}
|
||||
q.Result = append(q.Result, r)
|
||||
ruleList = append(ruleList, r)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ruleList, nil
|
||||
}
|
||||
|
||||
func (f *RuleStore) GetUserVisibleNamespaces(_ context.Context, orgID int64, _ *user.SignedInUser) (map[string]*folder.Folder, error) {
|
||||
|
@ -168,11 +168,11 @@ func CreateTestAlertRuleWithLabels(t testing.TB, ctx context.Context, dbstore *s
|
||||
NamespaceUIDs: []string{folderUID},
|
||||
RuleGroup: ruleGroup,
|
||||
}
|
||||
err = dbstore.ListAlertRules(ctx, &q)
|
||||
ruleList, err := dbstore.ListAlertRules(ctx, &q)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, q.Result)
|
||||
require.NotEmpty(t, ruleList)
|
||||
|
||||
rule := q.Result[0]
|
||||
rule := ruleList[0]
|
||||
t.Logf("alert definition: %v with title: %q interval: %d folder: %s created", rule.GetKey(), rule.Title, rule.IntervalSeconds, folderUID)
|
||||
return rule
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user