Alerting: Improve log messages (#67688)

* Rename base logger and capatilize messages
* Remove cflogger from config.go
This commit is contained in:
Sladyn 2023-05-25 08:55:01 -07:00 committed by GitHub
parent e00260465b
commit a06a5a7393
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 15 additions and 15 deletions

View File

@ -125,7 +125,7 @@ func newAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A
Nflog: nflogOptions,
}
l := log.New("alertmanager", "org", orgID)
l := log.New("ngalert.notifier.alertmanager", orgID)
gam, err := alertingNotify.NewGrafanaAlertmanager("orgID", orgID, amcfg, peer, l, alertingNotify.NewGrafanaAlertmanagerMetrics(m.Registerer))
if err != nil {
return nil, err
@ -263,14 +263,14 @@ func (am *Alertmanager) applyConfig(cfg *apimodels.PostableUserConfig, rawConfig
cfg.AlertmanagerConfig.Templates = append(cfg.AlertmanagerConfig.Templates, "__default__.tmpl")
// next, we need to make sure we persist the templates to disk.
_, templatesChanged, err := PersistTemplates(cfg, am.Base.WorkingDirectory())
_, templatesChanged, err := PersistTemplates(am.logger, cfg, am.Base.WorkingDirectory())
if err != nil {
return false, err
}
// If neither the configuration nor templates have changed, we've got nothing to do.
if !amConfigChanged && !templatesChanged {
am.logger.Debug("neither config nor template have changed, skipping configuration sync.")
am.logger.Debug("Neither config nor template have changed, skipping configuration sync.")
return false, nil
}

View File

@ -14,9 +14,7 @@ import (
api "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
)
var cfglogger = log.New("notifier.config")
func PersistTemplates(cfg *api.PostableUserConfig, path string) ([]string, bool, error) {
func PersistTemplates(logger log.Logger, cfg *api.PostableUserConfig, path string) ([]string, bool, error) {
if len(cfg.TemplateFiles) < 1 {
return nil, false, nil
}
@ -58,7 +56,7 @@ func PersistTemplates(cfg *api.PostableUserConfig, path string) ([]string, bool,
// Now that we have the list of _actual_ templates, let's remove the ones that we don't need.
existingFiles, err := os.ReadDir(path)
if err != nil {
cfglogger.Error("unable to read directory for deleting Alertmanager templates", "error", err, "path", path)
logger.Error("Unable to read directory for deleting Alertmanager templates", "error", err, "path", path)
}
for _, existingFile := range existingFiles {
p := filepath.Join(path, existingFile.Name())
@ -67,7 +65,7 @@ func PersistTemplates(cfg *api.PostableUserConfig, path string) ([]string, bool,
templatesChanged = true
err := os.Remove(p)
if err != nil {
cfglogger.Error("unable to delete template", "error", err, "file", p)
logger.Error("Unable to delete template", "error", err, "file", p)
}
}
}

View File

@ -6,6 +6,7 @@ import (
"path/filepath"
"testing"
"github.com/grafana/grafana/pkg/infra/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -77,7 +78,8 @@ func TestPersistTemplates(t *testing.T) {
}
c := &api.PostableUserConfig{TemplateFiles: tt.templates}
paths, changed, persistErr := PersistTemplates(c, dir)
testLogger := logtest.Fake{}
paths, changed, persistErr := PersistTemplates(&testLogger, c, dir)
files := map[string]string{}
readFiles, err := os.ReadDir(dir)

View File

@ -53,7 +53,7 @@ func (c *alertmanagerCrypto) LoadSecureSettings(ctx context.Context, orgId int64
currentConfig, err := Load([]byte(amConfig.AlertmanagerConfiguration))
// If the current config is un-loadable, treat it as if it never existed. Providing a new, valid config should be able to "fix" this state.
if err != nil {
c.log.Warn("last known alertmanager configuration was invalid. Overwriting...")
c.log.Warn("Last known alertmanager configuration was invalid. Overwriting...")
} else {
currentReceiverMap = currentConfig.GetGrafanaReceiverMap()
}

View File

@ -29,7 +29,7 @@ func NewFileStore(orgID int64, store kvstore.KVStore, workingDirPath string) *Fi
workingDirPath: workingDirPath,
orgID: orgID,
kv: kvstore.WithNamespace(store, orgID, KVNamespace),
logger: log.New("filestore", "org", orgID),
logger: log.New("ngalert.notifier.alertmanager.file_store", orgID),
}
}
@ -92,11 +92,11 @@ func (fileStore *FileStore) WriteFileToDisk(fn string, content []byte) error {
// CleanUp will remove the working directory from disk.
func (fileStore *FileStore) CleanUp() {
if err := os.RemoveAll(fileStore.workingDirPath); err != nil {
fileStore.logger.Warn("unable to delete the local working directory", "dir", fileStore.workingDirPath,
fileStore.logger.Warn("Unable to delete the local working directory", "dir", fileStore.workingDirPath,
"error", err)
return
}
fileStore.logger.Info("successfully deleted working directory", "dir", fileStore.workingDirPath)
fileStore.logger.Info("Successfully deleted working directory", "dir", fileStore.workingDirPath)
}
func (fileStore *FileStore) pathFor(fn string) string {

View File

@ -129,7 +129,7 @@ func (moa *MultiOrgAlertmanager) setupClustering(cfg *setting.Cfg) error {
err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout)
if err != nil {
moa.logger.Error("msg", "unable to join gossip mesh while initializing cluster for high availability mode", "error", err)
moa.logger.Error("msg", "Unable to join gossip mesh while initializing cluster for high availability mode", "error", err)
}
// Attempt to verify the number of peers for 30s every 2s. The risk here is what we send a notification "too soon".
// Which should _never_ happen given we share the notification log via the database so the risk of double notification is very low.
@ -143,7 +143,7 @@ func (moa *MultiOrgAlertmanager) setupClustering(cfg *setting.Cfg) error {
}
func (moa *MultiOrgAlertmanager) Run(ctx context.Context) error {
moa.logger.Info("starting MultiOrg Alertmanager")
moa.logger.Info("Starting MultiOrg Alertmanager")
for {
select {