mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Chore: capitalise messages for alerting (#74335)
This commit is contained in:
parent
bd12ce0cbc
commit
58f6648505
@ -50,7 +50,7 @@ func (e *AlertEngine) mapRulesToUsageStats(ctx context.Context, rules []*models.
|
||||
for _, a := range rules {
|
||||
dss, err := e.parseAlertRuleModel(a.Settings)
|
||||
if err != nil {
|
||||
e.log.Debug("could not parse settings for alert rule", "id", a.ID)
|
||||
e.log.Debug("Could not parse settings for alert rule", "id", a.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -142,11 +142,11 @@ func (n *notificationService) sendAndMarkAsComplete(evalContext *EvalContext, no
|
||||
metrics.MAlertingNotificationSent.WithLabelValues(notifier.GetType()).Inc()
|
||||
|
||||
if err := evalContext.evaluateNotificationTemplateFields(); err != nil {
|
||||
n.log.Error("failed trying to evaluate notification template fields", "uid", notifier.GetNotifierUID(), "error", err)
|
||||
n.log.Error("Failed trying to evaluate notification template fields", "uid", notifier.GetNotifierUID(), "error", err)
|
||||
}
|
||||
|
||||
if err := notifier.Notify(evalContext); err != nil {
|
||||
n.log.Error("failed to send notification", "uid", notifier.GetNotifierUID(), "error", err)
|
||||
n.log.Error("Failed to send notification", "uid", notifier.GetNotifierUID(), "error", err)
|
||||
metrics.MAlertingNotificationFailed.WithLabelValues(notifier.GetType()).Inc()
|
||||
return err
|
||||
}
|
||||
@ -192,7 +192,7 @@ func (n *notificationService) sendNotifications(evalContext *EvalContext, notifi
|
||||
for _, notifierState := range notifierStates {
|
||||
err := n.sendNotification(evalContext, notifierState)
|
||||
if err != nil {
|
||||
n.log.Error("failed to send notification", "uid", notifierState.notifier.GetNotifierUID(), "error", err)
|
||||
n.log.Error("Failed to send notification", "uid", notifierState.notifier.GetNotifierUID(), "error", err)
|
||||
if evalContext.IsTestRun {
|
||||
return err
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func (dd *DingDingNotifier) genBody(evalContext *alerting.EvalContext, messageUR
|
||||
// Refer: https://open-doc.dingtalk.com/docs/doc.htm?treeId=385&articleId=104972&docType=1#s9
|
||||
messageURL = "dingtalk://dingtalkclient/page/link?" + q.Encode()
|
||||
|
||||
dd.log.Info("messageUrl:" + messageURL)
|
||||
dd.log.Info("MessageUrl:" + messageURL)
|
||||
|
||||
message := evalContext.Rule.Message
|
||||
picURL := evalContext.ImagePublicURL
|
||||
|
@ -172,7 +172,7 @@ func (dn *DiscordNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
} else {
|
||||
err := dn.embedImage(cmd, evalContext.ImageOnDiskPath, json)
|
||||
if err != nil {
|
||||
dn.log.Error("failed to embed image", "error", err)
|
||||
dn.log.Error("Failed to embed image", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ func (gcn *GoogleChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
ruleURL, err := evalContext.GetRuleURL()
|
||||
if err != nil {
|
||||
gcn.log.Error("evalContext returned an invalid rule URL")
|
||||
gcn.log.Error("EvalContext returned an invalid rule URL")
|
||||
}
|
||||
|
||||
widgets := []widget{}
|
||||
|
@ -103,11 +103,11 @@ func (handler *defaultResultHandler) handle(evalContext *EvalContext) error {
|
||||
if err := handler.notifier.SendIfNeeded(evalContext); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
handler.log.Debug("handler.notifier.SendIfNeeded returned context.Canceled")
|
||||
handler.log.Debug("Handler.notifier.SendIfNeeded returned context.Canceled")
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
handler.log.Debug("handler.notifier.SendIfNeeded returned context.DeadlineExceeded")
|
||||
handler.log.Debug("Handler.notifier.SendIfNeeded returned context.DeadlineExceeded")
|
||||
default:
|
||||
handler.log.Error("handler.notifier.SendIfNeeded failed", "err", err)
|
||||
handler.log.Error("Handler.notifier.SendIfNeeded failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -493,7 +493,7 @@ func (ss *sqlStore) SetAlertNotificationStateToCompleteCommand(ctx context.Conte
|
||||
}
|
||||
|
||||
if current.Version != version {
|
||||
ss.log.Error("notification state out of sync. the notification is marked as complete but has been modified between set as pending and completion.", "notifierId", current.NotifierID)
|
||||
ss.log.Error("Notification state out of sync. the notification is marked as complete but has been modified between set as pending and completion.", "notifierId", current.NotifierID)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -54,7 +54,7 @@ func (srv AlertmanagerSrv) RouteGetAMStatus(c *contextmodel.ReqContext) response
|
||||
func (srv AlertmanagerSrv) RouteCreateSilence(c *contextmodel.ReqContext, postableSilence apimodels.PostableSilence) response.Response {
|
||||
err := postableSilence.Validate(strfmt.Default)
|
||||
if err != nil {
|
||||
srv.log.Error("silence failed validation", "error", err)
|
||||
srv.log.Error("Silence failed validation", "error", err)
|
||||
return ErrResp(http.StatusBadRequest, err, "silence failed validation")
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ func (srv AlertmanagerSrv) RouteDeleteAlertingConfig(c *contextmodel.ReqContext)
|
||||
}
|
||||
|
||||
if err := am.SaveAndApplyDefaultConfig(c.Req.Context()); err != nil {
|
||||
srv.log.Error("unable to save and apply default alertmanager configuration", "error", err)
|
||||
srv.log.Error("Unable to save and apply default alertmanager configuration", "error", err)
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to save and apply default Alertmanager configuration")
|
||||
}
|
||||
|
||||
@ -471,6 +471,6 @@ func (srv AlertmanagerSrv) AlertmanagerFor(orgID int64) (Alertmanager, *response
|
||||
return am, response.Error(http.StatusConflict, err.Error(), err)
|
||||
}
|
||||
|
||||
srv.log.Error("unable to obtain the org's Alertmanager", "error", err)
|
||||
srv.log.Error("Unable to obtain the org's Alertmanager", "error", err)
|
||||
return nil, response.Error(http.StatusInternalServerError, "unable to obtain org's Alertmanager", err)
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (srv ConfigSrv) RouteDeleteNGalertConfig(c *contextmodel.ReqContext) respon
|
||||
|
||||
err := srv.store.DeleteAdminConfiguration(c.OrgID)
|
||||
if err != nil {
|
||||
srv.log.Error("unable to delete configuration", "error", err)
|
||||
srv.log.Error("Unable to delete configuration", "error", err)
|
||||
return ErrResp(http.StatusInternalServerError, err, "")
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
||||
}
|
||||
|
||||
if len(namespaceMap) == 0 {
|
||||
srv.log.Debug("user does not have access to any namespaces")
|
||||
srv.log.Debug("User does not have access to any namespaces")
|
||||
return response.JSON(http.StatusOK, ruleResponse)
|
||||
}
|
||||
|
||||
@ -236,7 +236,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
||||
for groupKey, rules := range groupedRules {
|
||||
folder := namespaceMap[groupKey.NamespaceUID]
|
||||
if folder == nil {
|
||||
srv.log.Warn("query returned rules that belong to folder the user does not have access to. All rules that belong to that namespace will not be added to the response", "folder_uid", groupKey.NamespaceUID)
|
||||
srv.log.Warn("Query returned rules that belong to folder the user does not have access to. All rules that belong to that namespace will not be added to the response", "folder_uid", groupKey.NamespaceUID)
|
||||
continue
|
||||
}
|
||||
if !authorizeAccessToRuleGroup(rules, hasAccess) {
|
||||
@ -443,7 +443,7 @@ func ruleToQuery(logger log.Logger, rule *ngmodels.AlertRule) string {
|
||||
}
|
||||
|
||||
// For any other type of error, it is unexpected abort and return the whole JSON.
|
||||
logger.Debug("failed to parse a query", "error", err)
|
||||
logger.Debug("Failed to parse a query", "error", err)
|
||||
queryErr = err
|
||||
break
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (srv RulerSrv) RouteDeleteAlertRules(c *contextmodel.ReqContext, namespaceT
|
||||
}
|
||||
|
||||
if len(ruleList) == 0 {
|
||||
logger.Debug("no alert rules to delete from namespace/group")
|
||||
logger.Debug("No alert rules to delete from namespace/group")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -235,7 +235,7 @@ func (srv RulerSrv) RouteGetRulesConfig(c *contextmodel.ReqContext) response.Res
|
||||
result := apimodels.NamespaceConfigResponse{}
|
||||
|
||||
if len(namespaceMap) == 0 {
|
||||
srv.log.Debug("user has no access to any namespaces")
|
||||
srv.log.Debug("User has no access to any namespaces")
|
||||
return response.JSON(http.StatusOK, result)
|
||||
}
|
||||
|
||||
@ -285,7 +285,7 @@ func (srv RulerSrv) RouteGetRulesConfig(c *contextmodel.ReqContext) response.Res
|
||||
for groupKey, rules := range configs {
|
||||
folder, ok := namespaceMap[groupKey.NamespaceUID]
|
||||
if !ok {
|
||||
srv.log.Error("namespace not visible to the user", "user", c.SignedInUser.UserID, "namespace", groupKey.NamespaceUID)
|
||||
srv.log.Error("Namespace not visible to the user", "user", c.SignedInUser.UserID, "namespace", groupKey.NamespaceUID)
|
||||
continue
|
||||
}
|
||||
if !authorizeAccessToRuleGroup(rules, hasAccess) {
|
||||
@ -331,7 +331,7 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *contextmodel.ReqContext, groupKey
|
||||
|
||||
if groupChanges.IsEmpty() {
|
||||
finalChanges = groupChanges
|
||||
logger.Info("no changes detected in the request. Do nothing")
|
||||
logger.Info("No changes detected in the request. Do nothing")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -351,7 +351,7 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *contextmodel.ReqContext, groupKey
|
||||
}
|
||||
|
||||
finalChanges = store.UpdateCalculatedRuleFields(groupChanges)
|
||||
logger.Debug("updating database with the authorized changes", "add", len(finalChanges.New), "update", len(finalChanges.New), "delete", len(finalChanges.Delete))
|
||||
logger.Debug("Updating database with the authorized changes", "add", len(finalChanges.New), "update", len(finalChanges.New), "delete", len(finalChanges.Delete))
|
||||
|
||||
// Delete first as this could prevent future unique constraint violations.
|
||||
if len(finalChanges.Delete) > 0 {
|
||||
@ -368,7 +368,7 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *contextmodel.ReqContext, groupKey
|
||||
if len(finalChanges.Update) > 0 {
|
||||
updates := make([]ngmodels.UpdateRule, 0, len(finalChanges.Update))
|
||||
for _, update := range finalChanges.Update {
|
||||
logger.Debug("updating rule", "rule_uid", update.New.UID, "diff", update.Diff.String())
|
||||
logger.Debug("Updating rule", "rule_uid", update.New.UID, "diff", update.Diff.String())
|
||||
updates = append(updates, ngmodels.UpdateRule{
|
||||
Existing: update.Existing,
|
||||
New: *update.New,
|
||||
|
@ -25,7 +25,7 @@ func NewHooks(logger log.Logger) *Hooks {
|
||||
// Add creates a new request hook for a path, causing requests to the path to
|
||||
// be handled by the hook function, and not the original handler.
|
||||
func (h *Hooks) Set(path string, hook RequestHandlerFunc) {
|
||||
h.logger.Info("setting hook override for the specified route", "path", path)
|
||||
h.logger.Info("Setting hook override for the specified route", "path", path)
|
||||
h.hooks[path] = hook
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ func (h *Hooks) Set(path string, hook RequestHandlerFunc) {
|
||||
func (h *Hooks) Wrap(next RequestHandlerFunc) RequestHandlerFunc {
|
||||
return func(req *contextmodel.ReqContext) response.Response {
|
||||
if hook, ok := h.hooks[req.Context.Req.URL.Path]; ok {
|
||||
h.logger.Debug("hook defined - invoking new handler", "path", req.Context.Req.URL.Path)
|
||||
h.logger.Debug("Hook defined - invoking new handler", "path", req.Context.Req.URL.Path)
|
||||
return hook(req)
|
||||
}
|
||||
return next(req)
|
||||
|
@ -55,7 +55,7 @@ type conditionEvaluator struct {
|
||||
func (r *conditionEvaluator) EvaluateRaw(ctx context.Context, now time.Time) (resp *backend.QueryDataResponse, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
logger.FromContext(ctx).Error("alert rule panic", "error", e, "stack", string(debug.Stack()))
|
||||
logger.FromContext(ctx).Error("Alert rule panic", "error", e, "stack", string(debug.Stack()))
|
||||
panicErr := fmt.Errorf("alert rule panic; please check the logs for the full stack")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("queries and expressions execution failed: %w; %v", err, panicErr.Error())
|
||||
|
@ -66,10 +66,10 @@ func (moa *MultiOrgAlertmanager) ActivateHistoricalConfiguration(ctx context.Con
|
||||
}
|
||||
|
||||
if err := am.SaveAndApplyConfig(ctx, cfg); err != nil {
|
||||
moa.logger.Error("unable to save and apply historical alertmanager configuration", "error", err, "org", orgId, "id", id)
|
||||
moa.logger.Error("Unable to save and apply historical alertmanager configuration", "error", err, "org", orgId, "id", id)
|
||||
return AlertmanagerConfigRejectedError{err}
|
||||
}
|
||||
moa.logger.Info("applied historical alertmanager configuration", "org", orgId, "id", id)
|
||||
moa.logger.Info("Applied historical alertmanager configuration", "org", orgId, "id", id)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -184,7 +184,7 @@ func (moa *MultiOrgAlertmanager) ApplyAlertmanagerConfiguration(ctx context.Cont
|
||||
}
|
||||
|
||||
if err := am.SaveAndApplyConfig(ctx, &config); err != nil {
|
||||
moa.logger.Error("unable to save and apply alertmanager configuration", "error", err)
|
||||
moa.logger.Error("Unable to save and apply alertmanager configuration", "error", err)
|
||||
return AlertmanagerConfigRejectedError{err}
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ func (moa *MultiOrgAlertmanager) setupClustering(cfg *setting.Cfg) error {
|
||||
|
||||
err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout)
|
||||
if err != nil {
|
||||
moa.logger.Error("msg", "Unable to join gossip mesh while initializing cluster for high availability mode", "error", err)
|
||||
moa.logger.Error("Msg", "Unable to join gossip mesh while initializing cluster for high availability mode", "error", err)
|
||||
}
|
||||
// Attempt to verify the number of peers for 30s every 2s. The risk here is what we send a notification "too soon".
|
||||
// Which should _never_ happen given we share the notification log via the database so the risk of double notification is very low.
|
||||
@ -152,14 +152,14 @@ func (moa *MultiOrgAlertmanager) Run(ctx context.Context) error {
|
||||
return nil
|
||||
case <-time.After(moa.settings.UnifiedAlerting.AlertmanagerConfigPollInterval):
|
||||
if err := moa.LoadAndSyncAlertmanagersForOrgs(ctx); err != nil {
|
||||
moa.logger.Error("error while synchronizing Alertmanager orgs", "error", err)
|
||||
moa.logger.Error("Error while synchronizing Alertmanager orgs", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (moa *MultiOrgAlertmanager) LoadAndSyncAlertmanagersForOrgs(ctx context.Context) error {
|
||||
moa.logger.Debug("synchronizing Alertmanagers for orgs")
|
||||
moa.logger.Debug("Synchronizing Alertmanagers for orgs")
|
||||
// First, load all the organizations from the database.
|
||||
orgIDs, err := moa.orgStore.GetOrgs(ctx)
|
||||
if err != nil {
|
||||
@ -170,7 +170,7 @@ func (moa *MultiOrgAlertmanager) LoadAndSyncAlertmanagersForOrgs(ctx context.Con
|
||||
moa.metrics.DiscoveredConfigurations.Set(float64(len(orgIDs)))
|
||||
moa.SyncAlertmanagersForOrgs(ctx, orgIDs)
|
||||
|
||||
moa.logger.Debug("done synchronizing Alertmanagers for orgs")
|
||||
moa.logger.Debug("Done synchronizing Alertmanagers for orgs")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -195,13 +195,13 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
|
||||
orgsFound := make(map[int64]struct{}, len(orgIDs))
|
||||
dbConfigs, err := moa.getLatestConfigs(ctx)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to load Alertmanager configurations", "error", err)
|
||||
moa.logger.Error("Failed to load Alertmanager configurations", "error", err)
|
||||
return
|
||||
}
|
||||
moa.alertmanagersMtx.Lock()
|
||||
for _, orgID := range orgIDs {
|
||||
if _, isDisabledOrg := moa.settings.UnifiedAlerting.DisabledOrgs[orgID]; isDisabledOrg {
|
||||
moa.logger.Debug("skipping syncing Alertmanager for disabled org", "org", orgID)
|
||||
moa.logger.Debug("Skipping syncing Alertmanager for disabled org", "org", orgID)
|
||||
continue
|
||||
}
|
||||
orgsFound[orgID] = struct{}{}
|
||||
@ -215,7 +215,7 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
|
||||
m := metrics.NewAlertmanagerMetrics(moa.metrics.GetOrCreateOrgRegistry(orgID))
|
||||
am, err := newAlertmanager(ctx, orgID, moa.settings, moa.configStore, moa.kvStore, moa.peer, moa.decryptFn, moa.ns, m)
|
||||
if err != nil {
|
||||
moa.logger.Error("unable to create Alertmanager for org", "org", orgID, "error", err)
|
||||
moa.logger.Error("Unable to create Alertmanager for org", "org", orgID, "error", err)
|
||||
}
|
||||
moa.alertmanagers[orgID] = am
|
||||
alertmanager = am
|
||||
@ -229,7 +229,7 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
|
||||
}
|
||||
err := alertmanager.SaveAndApplyDefaultConfig(ctx)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to apply the default Alertmanager configuration", "org", orgID)
|
||||
moa.logger.Error("Failed to apply the default Alertmanager configuration", "org", orgID)
|
||||
continue
|
||||
}
|
||||
moa.alertmanagers[orgID] = alertmanager
|
||||
@ -238,7 +238,7 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
|
||||
|
||||
err := alertmanager.ApplyConfig(ctx, dbConfig)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to apply Alertmanager config for org", "org", orgID, "id", dbConfig.ID, "error", err)
|
||||
moa.logger.Error("Failed to apply Alertmanager config for org", "org", orgID, "id", dbConfig.ID, "error", err)
|
||||
continue
|
||||
}
|
||||
moa.alertmanagers[orgID] = alertmanager
|
||||
@ -257,9 +257,9 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
|
||||
|
||||
// Now, we can stop the Alertmanagers without having to hold a lock.
|
||||
for orgID, am := range amsToStop {
|
||||
moa.logger.Info("stopping Alertmanager", "org", orgID)
|
||||
moa.logger.Info("Stopping Alertmanager", "org", orgID)
|
||||
am.StopAndWait()
|
||||
moa.logger.Info("stopped Alertmanager", "org", orgID)
|
||||
moa.logger.Info("Stopped Alertmanager", "org", orgID)
|
||||
// Cleanup all the remaining resources from this alertmanager.
|
||||
am.fileStore.CleanUp()
|
||||
}
|
||||
@ -278,22 +278,22 @@ func (moa *MultiOrgAlertmanager) cleanupOrphanLocalOrgState(ctx context.Context,
|
||||
dataDir := filepath.Join(moa.settings.DataPath, workingDir)
|
||||
files, err := os.ReadDir(dataDir)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to list local working directory", "dir", dataDir, "error", err)
|
||||
moa.logger.Error("Failed to list local working directory", "dir", dataDir, "error", err)
|
||||
return
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
moa.logger.Warn("ignoring unexpected file while scanning local working directory", "filename", filepath.Join(dataDir, file.Name()))
|
||||
moa.logger.Warn("Ignoring unexpected file while scanning local working directory", "filename", filepath.Join(dataDir, file.Name()))
|
||||
continue
|
||||
}
|
||||
orgID, err := strconv.ParseInt(file.Name(), 10, 64)
|
||||
if err != nil {
|
||||
moa.logger.Error("unable to parse orgID from directory name", "name", file.Name(), "error", err)
|
||||
moa.logger.Error("Unable to parse orgID from directory name", "name", file.Name(), "error", err)
|
||||
continue
|
||||
}
|
||||
_, exists := activeOrganizations[orgID]
|
||||
if !exists {
|
||||
moa.logger.Info("found orphan organization directory", "orgID", orgID)
|
||||
moa.logger.Info("Found orphan organization directory", "orgID", orgID)
|
||||
workingDirPath := filepath.Join(dataDir, strconv.FormatInt(orgID, 10))
|
||||
fileStore := NewFileStore(orgID, moa.kvStore, workingDirPath)
|
||||
// Cleanup all the remaining resources from this alertmanager.
|
||||
@ -307,7 +307,7 @@ func (moa *MultiOrgAlertmanager) cleanupOrphanLocalOrgState(ctx context.Context,
|
||||
for _, fileName := range storedFiles {
|
||||
keys, err := moa.kvStore.Keys(ctx, kvstore.AllOrganizations, KVNamespace, fileName)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to fetch items from kvstore", "error", err,
|
||||
moa.logger.Error("Failed to fetch items from kvstore", "error", err,
|
||||
"namespace", KVNamespace, "key", fileName)
|
||||
}
|
||||
for _, key := range keys {
|
||||
@ -316,7 +316,7 @@ func (moa *MultiOrgAlertmanager) cleanupOrphanLocalOrgState(ctx context.Context,
|
||||
}
|
||||
err = moa.kvStore.Del(ctx, key.OrgId, key.Namespace, key.Key)
|
||||
if err != nil {
|
||||
moa.logger.Error("failed to delete item from kvstore", "error", err,
|
||||
moa.logger.Error("Failed to delete item from kvstore", "error", err,
|
||||
"orgID", key.OrgId, "namespace", KVNamespace, "key", key.Key)
|
||||
}
|
||||
}
|
||||
@ -335,7 +335,7 @@ func (moa *MultiOrgAlertmanager) StopAndWait() {
|
||||
if ok {
|
||||
moa.settleCancel()
|
||||
if err := p.Leave(10 * time.Second); err != nil {
|
||||
moa.logger.Warn("unable to leave the gossip mesh", "error", err)
|
||||
moa.logger.Warn("Unable to leave the gossip mesh", "error", err)
|
||||
}
|
||||
}
|
||||
r, ok := moa.peer.(*redisPeer)
|
||||
|
@ -40,7 +40,7 @@ func (c *RedisChannel) handleMessages() {
|
||||
// The state will eventually be propagated to other members by the full sync.
|
||||
if pub.Err() != nil {
|
||||
c.p.messagesPublishFailures.WithLabelValues(c.msgType, reasonRedisIssue).Inc()
|
||||
c.p.logger.Error("error publishing a message to redis", "err", pub.Err(), "channel", c.channel)
|
||||
c.p.logger.Error("Error publishing a message to redis", "err", pub.Err(), "channel", c.channel)
|
||||
continue
|
||||
}
|
||||
c.p.messagesSent.WithLabelValues(c.msgType).Inc()
|
||||
@ -52,7 +52,7 @@ func (c *RedisChannel) handleMessages() {
|
||||
func (c *RedisChannel) Broadcast(b []byte) {
|
||||
b, err := proto.Marshal(&clusterpb.Part{Key: c.key, Data: b})
|
||||
if err != nil {
|
||||
c.p.logger.Error("error marshalling broadcast into proto", "err", err, "channel", c.channel)
|
||||
c.p.logger.Error("Error marshalling broadcast into proto", "err", err, "channel", c.channel)
|
||||
return
|
||||
}
|
||||
select {
|
||||
@ -60,6 +60,6 @@ func (c *RedisChannel) Broadcast(b []byte) {
|
||||
default:
|
||||
// This is not the end of the world, we will catch up when we do a full state sync.
|
||||
c.p.messagesPublishFailures.WithLabelValues(c.msgType, reasonBufferOverflow).Inc()
|
||||
c.p.logger.Warn("buffer full, droping message", "channel", c.channel)
|
||||
c.p.logger.Warn("Buffer full, droping message", "channel", c.channel)
|
||||
}
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ func (p *redisPeer) heartbeatLoop() {
|
||||
reqDur := time.Since(startTime)
|
||||
if cmd.Err() != nil {
|
||||
p.nodePingFailures.Inc()
|
||||
p.logger.Error("error setting the heartbeat key", "err", cmd.Err(), "peer", p.withPrefix(p.name))
|
||||
p.logger.Error("Error setting the heartbeat key", "err", cmd.Err(), "peer", p.withPrefix(p.name))
|
||||
continue
|
||||
}
|
||||
p.nodePingDuration.WithLabelValues(redisServerLabel).Observe(reqDur.Seconds())
|
||||
@ -250,7 +250,7 @@ func (p *redisPeer) membersSync() {
|
||||
startTime := time.Now()
|
||||
members, err := p.membersScan()
|
||||
if err != nil {
|
||||
p.logger.Error("error getting keys from redis", "err", err, "pattern", p.withPrefix(peerPattern))
|
||||
p.logger.Error("Error getting keys from redis", "err", err, "pattern", p.withPrefix(peerPattern))
|
||||
// To prevent a spike of duplicate messages, we return for the duration of
|
||||
// membersValidFor the last known members and only empty the list if we do
|
||||
// not eventually recover.
|
||||
@ -260,7 +260,7 @@ func (p *redisPeer) membersSync() {
|
||||
p.membersMtx.Unlock()
|
||||
return
|
||||
}
|
||||
p.logger.Warn("fetching members from redis failed, falling back to last known members", "last_known", p.members)
|
||||
p.logger.Warn("Fetching members from redis failed, falling back to last known members", "last_known", p.members)
|
||||
return
|
||||
}
|
||||
// This might happen on startup, when no value is in the store yet.
|
||||
@ -272,7 +272,7 @@ func (p *redisPeer) membersSync() {
|
||||
}
|
||||
values := p.redis.MGet(context.Background(), members...)
|
||||
if values.Err() != nil {
|
||||
p.logger.Error("error getting values from redis", "err", values.Err(), "keys", members)
|
||||
p.logger.Error("Error getting values from redis", "err", values.Err(), "keys", members)
|
||||
}
|
||||
// After getting the list of possible members from redis, we filter
|
||||
// those out that have failed to send a heartbeat during the heartbeatTimeout.
|
||||
@ -284,7 +284,7 @@ func (p *redisPeer) membersSync() {
|
||||
peers = slices.Compact(peers)
|
||||
|
||||
dur := time.Since(startTime)
|
||||
p.logger.Debug("membership sync done", "duration_ms", dur.Milliseconds())
|
||||
p.logger.Debug("Membership sync done", "duration_ms", dur.Milliseconds())
|
||||
p.membersMtx.Lock()
|
||||
p.members = peers
|
||||
p.membersMtx.Unlock()
|
||||
@ -326,7 +326,7 @@ func (p *redisPeer) filterUnhealthyMembers(members []string, values []any) []str
|
||||
}
|
||||
ts, err := strconv.ParseInt(val.(string), 10, 64)
|
||||
if err != nil {
|
||||
p.logger.Error("error parsing timestamp value", "err", err, "peer", peer, "val", val)
|
||||
p.logger.Error("Error parsing timestamp value", "err", err, "peer", peer, "val", val)
|
||||
continue
|
||||
}
|
||||
tm := time.Unix(ts, 0)
|
||||
@ -341,11 +341,11 @@ func (p *redisPeer) filterUnhealthyMembers(members []string, values []any) []str
|
||||
func (p *redisPeer) Position() int {
|
||||
for i, peer := range p.Members() {
|
||||
if peer == p.withPrefix(p.name) {
|
||||
p.logger.Debug("cluster position found", "name", p.name, "position", i)
|
||||
p.logger.Debug("Cluster position found", "name", p.name, "position", i)
|
||||
return i
|
||||
}
|
||||
}
|
||||
p.logger.Warn("failed to look up position, falling back to position 0")
|
||||
p.logger.Warn("Failed to look up position, falling back to position 0")
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ func (p *redisPeer) Position() int {
|
||||
func (p *redisPeer) ClusterSize() int {
|
||||
members, err := p.membersScan()
|
||||
if err != nil {
|
||||
p.logger.Error("error getting keys from redis", "err", err, "pattern", p.withPrefix(peerPattern))
|
||||
p.logger.Error("Error getting keys from redis", "err", err, "pattern", p.withPrefix(peerPattern))
|
||||
return 0
|
||||
}
|
||||
return len(members)
|
||||
@ -400,7 +400,7 @@ func (p *redisPeer) Settle(ctx context.Context, interval time.Duration) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
elapsed := time.Since(start)
|
||||
p.logger.Info("gossip not settled but continuing anyway", "polls", totalPolls, "elapsed", elapsed)
|
||||
p.logger.Info("Gossip not settled but continuing anyway", "polls", totalPolls, "elapsed", elapsed)
|
||||
close(p.readyc)
|
||||
return
|
||||
case <-time.After(interval):
|
||||
@ -408,15 +408,15 @@ func (p *redisPeer) Settle(ctx context.Context, interval time.Duration) {
|
||||
elapsed := time.Since(start)
|
||||
n := len(p.Members())
|
||||
if nOkay >= NumOkayRequired {
|
||||
p.logger.Info("gossip settled; proceeding", "elapsed", elapsed)
|
||||
p.logger.Info("Gossip settled; proceeding", "elapsed", elapsed)
|
||||
break
|
||||
}
|
||||
if n == nPeers {
|
||||
nOkay++
|
||||
p.logger.Debug("gossip looks settled", "elapsed", elapsed)
|
||||
p.logger.Debug("Gossip looks settled", "elapsed", elapsed)
|
||||
} else {
|
||||
nOkay = 0
|
||||
p.logger.Info("gossip not settled", "polls", totalPolls, "before", nPeers, "now", n, "elapsed", elapsed)
|
||||
p.logger.Info("Gossip not settled", "polls", totalPolls, "before", nPeers, "now", n, "elapsed", elapsed)
|
||||
}
|
||||
nPeers = n
|
||||
totalPolls++
|
||||
@ -455,7 +455,7 @@ func (p *redisPeer) mergePartialState(buf []byte) {
|
||||
|
||||
var part clusterpb.Part
|
||||
if err := proto.Unmarshal(buf, &part); err != nil {
|
||||
p.logger.Warn("error decoding the received broadcast message", "err", err)
|
||||
p.logger.Warn("Error decoding the received broadcast message", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -467,10 +467,10 @@ func (p *redisPeer) mergePartialState(buf []byte) {
|
||||
return
|
||||
}
|
||||
if err := s.Merge(part.Data); err != nil {
|
||||
p.logger.Warn("error merging the received broadcast message", "err", err, "key", part.Key)
|
||||
p.logger.Warn("Error merging the received broadcast message", "err", err, "key", part.Key)
|
||||
return
|
||||
}
|
||||
p.logger.Debug("partial state was successfully merged", "key", part.Key)
|
||||
p.logger.Debug("Partial state was successfully merged", "key", part.Key)
|
||||
}
|
||||
|
||||
func (p *redisPeer) fullStateReqReceiveLoop() {
|
||||
@ -512,7 +512,7 @@ func (p *redisPeer) mergeFullState(buf []byte) {
|
||||
|
||||
var fs clusterpb.FullState
|
||||
if err := proto.Unmarshal(buf, &fs); err != nil {
|
||||
p.logger.Warn("error unmarshaling the received remote state", "err", err)
|
||||
p.logger.Warn("Error unmarshaling the received remote state", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -521,22 +521,22 @@ func (p *redisPeer) mergeFullState(buf []byte) {
|
||||
for _, part := range fs.Parts {
|
||||
s, ok := p.states[part.Key]
|
||||
if !ok {
|
||||
p.logger.Warn("received", "unknown state key", "len", len(buf), "key", part.Key)
|
||||
p.logger.Warn("Received", "unknown state key", "len", len(buf), "key", part.Key)
|
||||
continue
|
||||
}
|
||||
if err := s.Merge(part.Data); err != nil {
|
||||
p.logger.Warn("error merging the received remote state", "err", err, "key", part.Key)
|
||||
p.logger.Warn("Error merging the received remote state", "err", err, "key", part.Key)
|
||||
return
|
||||
}
|
||||
}
|
||||
p.logger.Debug("full state was successfully merged")
|
||||
p.logger.Debug("Full state was successfully merged")
|
||||
}
|
||||
|
||||
func (p *redisPeer) fullStateSyncPublish() {
|
||||
pub := p.redis.Publish(context.Background(), p.withPrefix(fullStateChannel), p.LocalState())
|
||||
if pub.Err() != nil {
|
||||
p.messagesPublishFailures.WithLabelValues(fullState, reasonRedisIssue).Inc()
|
||||
p.logger.Error("error publishing a message to redis", "err", pub.Err(), "channel", p.withPrefix(fullStateChannel))
|
||||
p.logger.Error("Error publishing a message to redis", "err", pub.Err(), "channel", p.withPrefix(fullStateChannel))
|
||||
}
|
||||
}
|
||||
|
||||
@ -557,7 +557,7 @@ func (p *redisPeer) requestFullState() {
|
||||
pub := p.redis.Publish(context.Background(), p.withPrefix(fullStateChannelReq), p.name)
|
||||
if pub.Err() != nil {
|
||||
p.messagesPublishFailures.WithLabelValues(fullState, reasonRedisIssue).Inc()
|
||||
p.logger.Error("error publishing a message to redis", "err", pub.Err(), "channel", p.withPrefix(fullStateChannelReq))
|
||||
p.logger.Error("Error publishing a message to redis", "err", pub.Err(), "channel", p.withPrefix(fullStateChannelReq))
|
||||
}
|
||||
}
|
||||
|
||||
@ -571,13 +571,13 @@ func (p *redisPeer) LocalState() []byte {
|
||||
for key, s := range p.states {
|
||||
b, err := s.MarshalBinary()
|
||||
if err != nil {
|
||||
p.logger.Warn("error encoding the local state", "err", err, "key", key)
|
||||
p.logger.Warn("Error encoding the local state", "err", err, "key", key)
|
||||
}
|
||||
all.Parts = append(all.Parts, clusterpb.Part{Key: key, Data: b})
|
||||
}
|
||||
b, err := proto.Marshal(all)
|
||||
if err != nil {
|
||||
p.logger.Warn("error encoding the local state to proto", "err", err)
|
||||
p.logger.Warn("Error encoding the local state to proto", "err", err)
|
||||
}
|
||||
p.messagesSent.WithLabelValues(fullState).Inc()
|
||||
p.messagesSentSize.WithLabelValues(fullState).Add(float64(len(b)))
|
||||
@ -592,6 +592,6 @@ func (p *redisPeer) Shutdown() {
|
||||
defer cancel()
|
||||
del := p.redis.Del(ctx, p.withPrefix(p.name))
|
||||
if del.Err() != nil {
|
||||
p.logger.Error("error deleting the redis key on shutdown", "err", del.Err(), "key", p.withPrefix(p.name))
|
||||
p.logger.Error("Error deleting the redis key on shutdown", "err", del.Err(), "key", p.withPrefix(p.name))
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ func (am *Alertmanager) GetStatus() apimodels.GettableStatus {
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(status, config); err != nil {
|
||||
am.logger.Error("unable to unmarshall alertmanager config", "Err", err)
|
||||
am.logger.Error("Unable to unmarshall alertmanager config", "Err", err)
|
||||
}
|
||||
|
||||
return *apimodels.NewGettableStatus(&config.AlertmanagerConfig)
|
||||
|
@ -404,7 +404,7 @@ func (service *AlertRuleService) deleteRules(ctx context.Context, orgID int64, t
|
||||
for _, uid := range uids {
|
||||
if err := service.provenanceStore.DeleteProvenance(ctx, &models.AlertRule{UID: uid}, orgID); err != nil {
|
||||
// We failed to clean up the record, but this doesn't break things. Log it and move on.
|
||||
service.log.Warn("failed to delete provenance record for rule: %w", err)
|
||||
service.log.Warn("Failed to delete provenance record for rule: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -99,7 +99,7 @@ func (ecp *ContactPointService) GetContactPoints(ctx context.Context, q ContactP
|
||||
for k, v := range contactPoint.SecureSettings {
|
||||
decryptedValue, err := ecp.decryptValue(v)
|
||||
if err != nil {
|
||||
ecp.log.Warn("decrypting value failed", "error", err.Error())
|
||||
ecp.log.Warn("Decrypting value failed", "error", err.Error())
|
||||
continue
|
||||
}
|
||||
if decryptedValue == "" {
|
||||
@ -151,7 +151,7 @@ func (ecp *ContactPointService) getContactPointDecrypted(ctx context.Context, or
|
||||
for k, v := range receiver.SecureSettings {
|
||||
decryptedValue, err := ecp.decryptValue(v)
|
||||
if err != nil {
|
||||
ecp.log.Warn("decrypting value failed", "error", err.Error())
|
||||
ecp.log.Warn("Decrypting value failed", "error", err.Error())
|
||||
continue
|
||||
}
|
||||
if decryptedValue == "" {
|
||||
|
@ -126,7 +126,7 @@ func (nps *NotificationPolicyService) UpdatePolicyTree(ctx context.Context, orgI
|
||||
func (nps *NotificationPolicyService) ResetPolicyTree(ctx context.Context, orgID int64) (definitions.Route, error) {
|
||||
defaultCfg, err := deserializeAlertmanagerConfig([]byte(nps.settings.DefaultConfiguration))
|
||||
if err != nil {
|
||||
nps.log.Error("failed to parse default alertmanager config: %w", err)
|
||||
nps.log.Error("Failed to parse default alertmanager config: %w", err)
|
||||
return definitions.Route{}, fmt.Errorf("failed to parse default alertmanager config: %w", err)
|
||||
}
|
||||
route := defaultCfg.AlertmanagerConfig.Route
|
||||
|
@ -53,19 +53,19 @@ func (st DBstore) DeleteAlertRulesByUID(ctx context.Context, orgID int64, ruleUI
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debug("deleted alert rules", "count", rows)
|
||||
logger.Debug("Deleted alert rules", "count", rows)
|
||||
|
||||
rows, err = sess.Table("alert_rule_version").Where("rule_org_id = ?", orgID).In("rule_uid", ruleUID).Delete(ngmodels.AlertRule{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debug("deleted alert rule versions", "count", rows)
|
||||
logger.Debug("Deleted alert rule versions", "count", rows)
|
||||
|
||||
rows, err = sess.Table("alert_instance").Where("rule_org_id = ?", orgID).In("rule_uid", ruleUID).Delete(ngmodels.AlertRule{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debug("deleted alert instances", "count", rows)
|
||||
logger.Debug("Deleted alert instances", "count", rows)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@ -265,7 +265,7 @@ func (st DBstore) preventIntermediateUniqueConstraintViolations(sess *db.Session
|
||||
if !newTitlesOverlapExisting(titleUpdates) {
|
||||
return nil
|
||||
}
|
||||
st.Logger.Debug("detected possible intermediate unique constraint violation, creating temporary title updates", "updates", len(titleUpdates))
|
||||
st.Logger.Debug("Detected possible intermediate unique constraint violation, creating temporary title updates", "updates", len(titleUpdates))
|
||||
|
||||
for _, update := range titleUpdates {
|
||||
r := update.Existing
|
||||
@ -537,7 +537,7 @@ func (st DBstore) GetAlertRulesForScheduling(ctx context.Context, query *ngmodel
|
||||
}
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
st.Logger.Error("unable to close rows session", "error", err)
|
||||
st.Logger.Error("Unable to close rows session", "error", err)
|
||||
}
|
||||
}()
|
||||
lokiRangeToInstantEnabled := st.FeatureToggles.IsEnabled(featuremgmt.FlagAlertingLokiRangeToInstant)
|
||||
|
@ -96,7 +96,7 @@ func (st DBstore) SaveAlertmanagerConfigurationWithCallback(ctx context.Context,
|
||||
}
|
||||
|
||||
if _, err := st.deleteOldConfigurations(ctx, cmd.OrgID, ConfigRecordsLimit); err != nil {
|
||||
st.Logger.Warn("failed to delete old am configs", "org", cmd.OrgID, "error", err)
|
||||
st.Logger.Warn("Failed to delete old am configs", "org", cmd.OrgID, "error", err)
|
||||
}
|
||||
|
||||
if err := callback(); err != nil {
|
||||
@ -133,7 +133,7 @@ func (st *DBstore) UpdateAlertmanagerConfiguration(ctx context.Context, cmd *mod
|
||||
return err
|
||||
}
|
||||
if _, err := st.deleteOldConfigurations(ctx, cmd.OrgID, ConfigRecordsLimit); err != nil {
|
||||
st.Logger.Warn("failed to delete old am configs", "org", cmd.OrgID, "error", err)
|
||||
st.Logger.Warn("Failed to delete old am configs", "org", cmd.OrgID, "error", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@ -255,7 +255,7 @@ func (st *DBstore) deleteOldConfigurations(ctx context.Context, orgID int64, lim
|
||||
}
|
||||
affectedRows = rows
|
||||
if affectedRows > 0 {
|
||||
st.Logger.Info("deleted old alert_configuration(s)", "org", orgID, "limit", limit, "delete_count", affectedRows)
|
||||
st.Logger.Info("Deleted old alert_configuration(s)", "org", orgID, "limit", limit, "delete_count", affectedRows)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -153,11 +153,11 @@ func (m *migration) makeAlertRule(l log.Logger, cond condition, da dashAlert, fo
|
||||
ar.Labels[n] = v
|
||||
|
||||
if err := m.addErrorSilence(da, ar); err != nil {
|
||||
m.mg.Logger.Error("alert migration error: failed to create silence for Error", "rule_name", ar.Title, "err", err)
|
||||
m.mg.Logger.Error("Alert migration error: failed to create silence for Error", "rule_name", ar.Title, "err", err)
|
||||
}
|
||||
|
||||
if err := m.addNoDataSilence(da, ar); err != nil {
|
||||
m.mg.Logger.Error("alert migration error: failed to create silence for NoData", "rule_name", ar.Title, "err", err)
|
||||
m.mg.Logger.Error("Alert migration error: failed to create silence for NoData", "rule_name", ar.Title, "err", err)
|
||||
}
|
||||
|
||||
return ar, nil
|
||||
|
@ -76,7 +76,7 @@ func (m *migration) setupAlertmanagerConfigs(rulesPerOrg map[int64]map[*alertRul
|
||||
|
||||
// No need to create an Alertmanager configuration if there are no receivers left that aren't obsolete.
|
||||
if len(receivers) == 0 {
|
||||
m.mg.Logger.Warn("no available receivers", "orgId", orgID)
|
||||
m.mg.Logger.Warn("No available receivers", "orgId", orgID)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ func (m *migration) getNotificationChannelMap() (channelsPerOrg, defaultChannels
|
||||
defaultChannelsMap := make(defaultChannelsPerOrg)
|
||||
for i, c := range allChannels {
|
||||
if c.Type == "hipchat" || c.Type == "sensu" {
|
||||
m.mg.Logger.Error("alert migration error: discontinued notification channel found", "type", c.Type, "name", c.Name, "uid", c.Uid)
|
||||
m.mg.Logger.Error("Alert migration error: discontinued notification channel found", "type", c.Type, "name", c.Name, "uid", c.Uid)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ func (m *migration) createReceivers(allChannels []*notificationChannel) (map[uid
|
||||
// There can be name collisions after we sanitize. We check for this and attempt to make the name unique again using a short hash of the original name.
|
||||
if _, ok := set[sanitizedName]; ok {
|
||||
sanitizedName = sanitizedName + fmt.Sprintf("_%.3x", md5.Sum([]byte(c.Name)))
|
||||
m.mg.Logger.Warn("alert contains duplicate contact name after sanitization, appending unique suffix", "type", c.Type, "name", c.Name, "new_name", sanitizedName, "uid", c.Uid)
|
||||
m.mg.Logger.Warn("Alert contains duplicate contact name after sanitization, appending unique suffix", "type", c.Type, "name", c.Name, "new_name", sanitizedName, "uid", c.Uid)
|
||||
}
|
||||
notifier.Name = sanitizedName
|
||||
|
||||
@ -351,7 +351,7 @@ func (m *migration) filterReceiversForAlert(name string, channelIDs []uidOrID, r
|
||||
if ok {
|
||||
filteredReceiverNames[recv.Name] = struct{}{} // Deduplicate on contact point name.
|
||||
} else {
|
||||
m.mg.Logger.Warn("alert linked to obsolete notification channel, ignoring", "alert", name, "uid", uidOrId)
|
||||
m.mg.Logger.Warn("Alert linked to obsolete notification channel, ignoring", "alert", name, "uid", uidOrId)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ func (e *MigrationError) Unwrap() error { return e.Err }
|
||||
func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration failure: could not get migration log", "error", err)
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
migrationID: rmMigTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration error: could not clear alert migration for removing data", "error", err)
|
||||
mg.Logger.Error("Alert migration error: could not clear alert migration for removing data", "error", err)
|
||||
}
|
||||
mg.AddMigration(migTitle, &migration{
|
||||
// We deduplicate for case-insensitive matching in MySQL-compatible backend flavours because they use case-insensitive collation.
|
||||
@ -96,7 +96,7 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
migrationID: migTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration error: could not clear dashboard alert migration", "error", err)
|
||||
mg.Logger.Error("Alert migration error: could not clear dashboard alert migration", "error", err)
|
||||
}
|
||||
mg.AddMigration(rmMigTitle, &rmMigration{})
|
||||
}
|
||||
@ -107,7 +107,7 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
func RerunDashAlertMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration failure: could not get migration log", "error", err)
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func RerunDashAlertMigration(mg *migrator.Migrator) {
|
||||
func AddDashboardUIDPanelIDMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration failure: could not get migration log", "error", err)
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mg.Logger.Info("alerts found to migrate", "alerts", len(dashAlerts))
|
||||
mg.Logger.Info("Alerts found to migrate", "alerts", len(dashAlerts))
|
||||
|
||||
// [orgID, dataSourceId] -> UID
|
||||
dsIDMap, err := m.slurpDSIDs()
|
||||
@ -293,7 +293,7 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
|
||||
for _, da := range dashAlerts {
|
||||
l := mg.Logger.New("ruleID", da.Id, "ruleName", da.Name, "dashboardUID", da.DashboardUID, "orgID", da.OrgId)
|
||||
l.Debug("migrating alert rule to Unified Alerting")
|
||||
l.Debug("Migrating alert rule to Unified Alerting")
|
||||
newCond, err := transConditions(*da.ParsedSettings, da.OrgId, dsIDMap)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -323,7 +323,7 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
folderName := getAlertFolderNameFromDashboard(&dash)
|
||||
f, ok := folderCache[folderName]
|
||||
if !ok {
|
||||
l.Info("create a new folder for alerts that belongs to dashboard because it has custom permissions", "folder", folderName)
|
||||
l.Info("Create a new folder for alerts that belongs to dashboard because it has custom permissions", "folder", folderName)
|
||||
// create folder and assign the permissions of the dashboard (included default and inherited)
|
||||
f, err = folderHelper.createFolder(dash.OrgId, folderName)
|
||||
if err != nil {
|
||||
@ -395,7 +395,7 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
|
||||
for orgID := range rulesPerOrg {
|
||||
if err := m.writeSilencesFile(orgID); err != nil {
|
||||
m.mg.Logger.Error("alert migration error: failed to write silence file", "err", err)
|
||||
m.mg.Logger.Error("Alert migration error: failed to write silence file", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,7 +500,7 @@ func (m *migration) validateAlertmanagerConfig(config *PostableUserConfig) error
|
||||
if value, ok := sjd[key]; ok {
|
||||
decryptedData, err := util.Decrypt(value, setting.SecretKey)
|
||||
if err != nil {
|
||||
m.mg.Logger.Warn("unable to decrypt key '%s' for %s receiver with uid %s, returning fallback.", key, gr.Type, gr.UID)
|
||||
m.mg.Logger.Warn("Unable to decrypt key '%s' for %s receiver with uid %s, returning fallback.", key, gr.Type, gr.UID)
|
||||
return fallback
|
||||
}
|
||||
return string(decryptedData)
|
||||
@ -591,7 +591,7 @@ func (m *rmMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
}
|
||||
for _, f := range files {
|
||||
if err := os.Remove(f); err != nil {
|
||||
mg.Logger.Error("alert migration error: failed to remove silence file", "file", f, "err", err)
|
||||
mg.Logger.Error("Alert migration error: failed to remove silence file", "file", f, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -793,7 +793,7 @@ func (c createDefaultFoldersForAlertingMigration) Exec(sess *xorm.Session, migra
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the default alerting folder for organization %s (ID: %d): %w", row.Name, row.Id, err)
|
||||
}
|
||||
migrator.Logger.Info("created the default folder for alerting", "org_id", row.Id, "folder_name", folder.Title, "folder_uid", folder.Uid)
|
||||
migrator.Logger.Info("Created the default folder for alerting", "org_id", row.Id, "folder_name", folder.Title, "folder_uid", folder.Uid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -868,15 +868,15 @@ func (c updateRulesOrderInGroup) Exec(sess *xorm.Session, migrator *migrator.Mig
|
||||
rule.Version++
|
||||
_, err := sess.ID(rule.ID).Cols("version", "updated", "rule_group_idx").Update(rule)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("failed to update alert rule", "uid", rule.UID, "err", err)
|
||||
migrator.Logger.Error("Failed to update alert rule", "uid", rule.UID, "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
migrator.Logger.Debug("updated group index for alert rule", "rule_uid", rule.UID)
|
||||
migrator.Logger.Debug("Updated group index for alert rule", "rule_uid", rule.UID)
|
||||
versions = append(versions, version)
|
||||
}
|
||||
_, err := sess.Insert(versions...)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("failed to insert changes to alert_rule_version", "err", err)
|
||||
migrator.Logger.Error("Failed to insert changes to alert_rule_version", "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
Loading…
Reference in New Issue
Block a user