Alerting: Improve logs in state manager and historian (#57374)

* Touch up log statements, fix casing, add and normalize contexts

* Dedicated logger for dashboard resolver

* Avoid injecting logger to historian

* More minor log touch-ups

* Dedicated logger for state manager

* Use rule context in annotation creator

* Rename base logger and avoid redundant contextual loggers
This commit is contained in:
Alexander Weaver 2022-10-21 16:16:51 -05:00 committed by GitHub
parent 910154c102
commit de46c1b002
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 54 additions and 59 deletions

View File

@ -189,8 +189,8 @@ func (ng *AlertNG) init() error {
AlertSender: alertsRouter,
}
historian := historian.NewAnnotationHistorian(ng.annotationsRepo, ng.dashboardService, ng.Log)
stateManager := state.NewManager(ng.Log, ng.Metrics.GetStateMetrics(), appUrl, store, store, ng.imageService, clk, historian)
historian := historian.NewAnnotationHistorian(ng.annotationsRepo, ng.dashboardService)
stateManager := state.NewManager(ng.Metrics.GetStateMetrics(), appUrl, store, store, ng.imageService, clk, historian)
scheduler := schedule.NewScheduler(schedCfg, appUrl, stateManager)
// if it is required to include folder title to the alerts, we need to subscribe to changes of alert title

View File

@ -18,7 +18,6 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/ngalert/eval"
"github.com/grafana/grafana/pkg/services/ngalert/image"
"github.com/grafana/grafana/pkg/services/ngalert/metrics"
@ -107,7 +106,7 @@ func TestWarmStateCache(t *testing.T) {
Labels: labels,
}
_ = dbstore.SaveAlertInstances(ctx, instance2)
st := state.NewManager(log.New("test"), testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.NewMock(), &state.FakeHistorian{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.NewMock(), &state.FakeHistorian{})
st.Warm(ctx)
t.Run("instance cache has expected entries", func(t *testing.T) {
@ -158,7 +157,7 @@ func TestAlertingTicker(t *testing.T) {
Metrics: testMetrics.GetSchedulerMetrics(),
AlertSender: notifier,
}
st := state.NewManager(log.New("test"), testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.NewMock(), &state.FakeHistorian{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.NewMock(), &state.FakeHistorian{})
appUrl := &url.URL{
Scheme: "http",
Host: "localhost",

View File

@ -527,7 +527,7 @@ func setupScheduler(t *testing.T, rs *fakeRulesStore, is *state.FakeInstanceStor
}
stateRs := state.FakeRuleReader{}
st := state.NewManager(log.New("test"), m.GetStateMetrics(), nil, &stateRs, is, &image.NoopImageService{}, mockedClock, &state.FakeHistorian{})
st := state.NewManager(m.GetStateMetrics(), nil, &stateRs, is, &image.NoopImageService{}, mockedClock, &state.FakeHistorian{})
return NewScheduler(schedCfg, appUrl, st)
}

View File

@ -90,13 +90,13 @@ func (rs *ruleStates) getOrCreate(ctx context.Context, log log.Logger, alertRule
}
}
if len(dupes) > 0 {
log.Warn("evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored", "labels", dupes)
log.Warn("Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored", "labels", dupes)
}
il := ngModels.InstanceLabels(lbs)
id, err := il.StringKey()
if err != nil {
log.Error("error getting cacheId for entry", "error", err.Error())
log.Error("Error getting cacheId for entry", "error", err)
}
if state, ok := rs.states[id]; ok {
@ -145,7 +145,7 @@ func (rs *ruleStates) expandRuleLabelsAndAnnotations(ctx context.Context, log lo
ev, err := expandTemplate(ctx, alertRule.Title, v, templateLabels, alertInstance, externalURL)
expanded[k] = ev
if err != nil {
log.Error("error in expanding template", "name", k, "value", v, "error", err.Error())
log.Error("Error in expanding template", "name", k, "value", v, "error", err)
// Store the original template on error.
expanded[k] = v
}

View File

@ -22,16 +22,17 @@ type AnnotationStateHistorian struct {
log log.Logger
}
func NewAnnotationHistorian(annotations annotations.Repository, dashboards dashboards.DashboardService, log log.Logger) *AnnotationStateHistorian {
func NewAnnotationHistorian(annotations annotations.Repository, dashboards dashboards.DashboardService) *AnnotationStateHistorian {
return &AnnotationStateHistorian{
annotations: annotations,
dashboards: newDashboardResolver(dashboards, log, defaultDashboardCacheExpiry),
log: log,
dashboards: newDashboardResolver(dashboards, defaultDashboardCacheExpiry),
log: log.New("ngalert.state.historian"),
}
}
func (h *AnnotationStateHistorian) RecordState(ctx context.Context, rule *ngmodels.AlertRule, labels data.Labels, evaluatedAt time.Time, currentData, previousData state.InstanceStateAndReason) {
h.log.Debug("alert state changed creating annotation", "alertRuleUID", rule.UID, "newState", currentData.String(), "oldState", previousData.String())
logger := h.log.New(rule.GetKey().LogContext()...)
logger.Debug("Alert state changed creating annotation", "newState", currentData.String(), "oldState", previousData.String())
labels = removePrivateLabels(labels)
annotationText := fmt.Sprintf("%s {%s} - %s", rule.Title, labels.String(), currentData.String())
@ -51,13 +52,13 @@ func (h *AnnotationStateHistorian) RecordState(ctx context.Context, rule *ngmode
panelId, err := strconv.ParseInt(panelUid, 10, 64)
if err != nil {
h.log.Error("error parsing panelUID for alert annotation", "panelUID", panelUid, "alertRuleUID", rule.UID, "error", err.Error())
logger.Error("Error parsing panelUID for alert annotation", "panelUID", panelUid, "error", err)
return
}
dashID, err := h.dashboards.getID(ctx, rule.OrgID, dashUid)
if err != nil {
h.log.Error("error getting dashboard for alert annotation", "dashboardUID", dashUid, "alertRuleUID", rule.UID, "error", err.Error())
logger.Error("Error getting dashboard for alert annotation", "dashboardUID", dashUid, "error", err)
return
}
@ -66,7 +67,7 @@ func (h *AnnotationStateHistorian) RecordState(ctx context.Context, rule *ngmode
}
if err := h.annotations.Save(ctx, item); err != nil {
h.log.Error("error saving alert annotation", "alertRuleUID", rule.UID, "error", err.Error())
logger.Error("Error saving alert annotation", "error", err)
return
}
}

View File

@ -27,12 +27,12 @@ type dashboardResolver struct {
log log.Logger
}
func newDashboardResolver(dbs dashboards.DashboardService, log log.Logger, expiry time.Duration) *dashboardResolver {
func newDashboardResolver(dbs dashboards.DashboardService, expiry time.Duration) *dashboardResolver {
return &dashboardResolver{
dashboards: dbs,
cache: cache.New(expiry, maxDuration(2*expiry, minCleanupInterval)),
singleflight: singleflight.Group{},
log: log,
log: log.New("ngalert.dashboard-resolver"),
}
}
@ -48,7 +48,7 @@ func (r *dashboardResolver) getID(ctx context.Context, orgID int64, uid string)
}
id, err, _ := r.singleflight.Do(key, func() (interface{}, error) {
r.log.Debug("dashboard cache miss, querying dashboards", "dashboardUID", uid)
r.log.Debug("Dashboard cache miss, querying dashboards", "dashboardUID", uid)
var result interface{}
query := &models.GetDashboardQuery{

View File

@ -5,7 +5,6 @@ import (
"testing"
"time"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/stretchr/testify/mock"
@ -42,5 +41,5 @@ func TestDashboardResolver(t *testing.T) {
}
func createDashboardResolverSut(dbs *dashboards.FakeDashboardService) *dashboardResolver {
return newDashboardResolver(dbs, log.NewNopLogger(), 1*time.Nanosecond)
return newDashboardResolver(dbs, 1*time.Nanosecond)
}

View File

@ -43,13 +43,13 @@ type Manager struct {
externalURL *url.URL
}
func NewManager(logger log.Logger, metrics *metrics.State, externalURL *url.URL,
func NewManager(metrics *metrics.State, externalURL *url.URL,
ruleStore RuleReader, instanceStore InstanceStore, imageService image.ImageService, clock clock.Clock, historian Historian) *Manager {
manager := &Manager{
cache: newCache(),
quit: make(chan struct{}),
ResendDelay: ResendDelay, // TODO: make this configurable
log: logger,
log: log.New("ngalert.state.manager"),
metrics: metrics,
ruleStore: ruleStore,
instanceStore: instanceStore,
@ -72,7 +72,7 @@ func (st *Manager) Warm(ctx context.Context) {
orgIds, err := st.instanceStore.FetchOrgIds(ctx)
if err != nil {
st.log.Error("unable to fetch orgIds", "error", err.Error())
st.log.Error("Unable to fetch orgIds", "error", err)
}
statesCount := 0
@ -83,7 +83,7 @@ func (st *Manager) Warm(ctx context.Context) {
OrgID: orgId,
}
if err := st.ruleStore.ListAlertRules(ctx, &ruleCmd); err != nil {
st.log.Error("unable to fetch previous state", "msg", err.Error())
st.log.Error("Unable to fetch previous state", "error", err)
}
ruleByUID := make(map[string]*ngModels.AlertRule, len(ruleCmd.Result))
@ -99,7 +99,7 @@ func (st *Manager) Warm(ctx context.Context) {
RuleOrgID: orgId,
}
if err := st.instanceStore.ListAlertInstances(ctx, &cmd); err != nil {
st.log.Error("unable to fetch previous state", "msg", err.Error())
st.log.Error("Unable to fetch previous state", "error", err)
}
for _, entry := range cmd.Result {
@ -118,7 +118,7 @@ func (st *Manager) Warm(ctx context.Context) {
lbs := map[string]string(entry.Labels)
cacheID, err := entry.Labels.StringKey()
if err != nil {
st.log.Error("error getting cacheId for entry", "msg", err.Error())
st.log.Error("Error getting cacheId for entry", "error", err)
}
rulesStates.states[cacheID] = &State{
AlertRuleUID: entry.RuleUID,
@ -137,7 +137,7 @@ func (st *Manager) Warm(ctx context.Context) {
}
}
st.cache.setAllStates(states)
st.log.Info("State cache has been initialized", "loaded_states", statesCount, "duration", time.Since(startTime))
st.log.Info("State cache has been initialized", "states", statesCount, "duration", time.Since(startTime))
}
func (st *Manager) Get(orgID int64, alertRuleUID, stateId string) *State {
@ -147,15 +147,15 @@ func (st *Manager) Get(orgID int64, alertRuleUID, stateId string) *State {
// ResetStateByRuleUID deletes all entries in the state manager that match the given rule UID.
func (st *Manager) ResetStateByRuleUID(ctx context.Context, ruleKey ngModels.AlertRuleKey) []*State {
logger := st.log.New(ruleKey.LogContext()...)
logger.Debug("resetting state of the rule")
logger.Debug("Resetting state of the rule")
states := st.cache.removeByRuleUID(ruleKey.OrgID, ruleKey.UID)
if len(states) > 0 {
err := st.instanceStore.DeleteAlertInstancesByRule(ctx, ruleKey)
if err != nil {
logger.Error("failed to delete states that belong to a rule from database", ruleKey.LogContext()...)
logger.Error("Failed to delete states that belong to a rule from database", "error", err)
}
}
logger.Info("rules state was reset", "deleted_states", len(states))
logger.Info("Rules state was reset", "states", len(states))
return states
}
@ -163,17 +163,17 @@ func (st *Manager) ResetStateByRuleUID(ctx context.Context, ruleKey ngModels.Ale
// if extraLabels is not empty, those labels will be added to every state. The extraLabels take precedence over rule labels and result labels
func (st *Manager) ProcessEvalResults(ctx context.Context, evaluatedAt time.Time, alertRule *ngModels.AlertRule, results eval.Results, extraLabels data.Labels) []*State {
logger := st.log.New(alertRule.GetKey().LogContext()...)
logger.Debug("state manager processing evaluation results", "resultCount", len(results))
logger.Debug("State manager processing evaluation results", "resultCount", len(results))
var states []*State
processedResults := make(map[string]*State, len(results))
for _, result := range results {
s := st.setNextState(ctx, alertRule, result, extraLabels)
s := st.setNextState(ctx, alertRule, result, extraLabels, logger)
states = append(states, s)
processedResults[s.CacheID] = s
}
resolvedStates := st.staleResultsHandler(ctx, evaluatedAt, alertRule, processedResults)
resolvedStates := st.staleResultsHandler(ctx, evaluatedAt, alertRule, processedResults, logger)
if len(states) > 0 {
logger.Debug("saving new states to the database", "count", len(states))
logger.Debug("Saving new states to the database", "count", len(states))
_, _ = st.saveAlertStates(ctx, states...)
}
return append(states, resolvedStates...)
@ -211,7 +211,7 @@ func (st *Manager) maybeTakeScreenshot(
}
// Set the current state based on evaluation results
func (st *Manager) setNextState(ctx context.Context, alertRule *ngModels.AlertRule, result eval.Result, extraLabels data.Labels) *State {
func (st *Manager) setNextState(ctx context.Context, alertRule *ngModels.AlertRule, result eval.Result, extraLabels data.Labels, logger log.Logger) *State {
currentState := st.cache.getOrCreate(ctx, st.log, alertRule, result, extraLabels, st.externalURL)
currentState.LastEvaluationTime = result.EvaluatedAt
@ -227,7 +227,7 @@ func (st *Manager) setNextState(ctx context.Context, alertRule *ngModels.AlertRu
oldState := currentState.State
oldReason := currentState.StateReason
st.log.Debug("setting alert state", "uid", alertRule.UID)
logger.Debug("Setting alert state")
switch result.State {
case eval.Normal:
currentState.resultNormal(alertRule, result)
@ -255,8 +255,7 @@ func (st *Manager) setNextState(ctx context.Context, alertRule *ngModels.AlertRu
err := st.maybeTakeScreenshot(ctx, alertRule, currentState, oldState)
if err != nil {
st.log.Warn("failed to generate a screenshot for an alert instance",
"alert_rule", alertRule.UID,
logger.Warn("Failed to generate a screenshot for an alert instance",
"dashboard", alertRule.DashboardUID,
"panel", alertRule.PanelID,
"error", err)
@ -287,10 +286,10 @@ func (st *Manager) recordMetrics() {
for {
select {
case <-ticker.C:
st.log.Debug("recording state cache metrics", "now", st.clock.Now())
st.log.Debug("Recording state cache metrics", "now", st.clock.Now())
st.cache.recordMetrics(st.metrics)
case <-st.quit:
st.log.Debug("stopping state cache metrics recording", "now", st.clock.Now())
st.log.Debug("Stopping state cache metrics recording", "now", st.clock.Now())
ticker.Stop()
return
}
@ -305,7 +304,7 @@ func (st *Manager) Put(states []*State) {
// TODO: Is the `State` type necessary? Should it embed the instance?
func (st *Manager) saveAlertStates(ctx context.Context, states ...*State) (saved, failed int) {
st.log.Debug("saving alert states", "count", len(states))
st.log.Debug("Saving alert states", "count", len(states))
instances := make([]ngModels.AlertInstance, 0, len(states))
type debugInfo struct {
@ -321,7 +320,7 @@ func (st *Manager) saveAlertStates(ctx context.Context, states ...*State) (saved
_, hash, err := labels.StringAndHash()
if err != nil {
debug = append(debug, debugInfo{s.OrgID, s.AlertRuleUID, s.State.String(), s.Labels.String()})
st.log.Error("failed to save alert instance with invalid labels", "orgID", s.OrgID, "ruleUID", s.AlertRuleUID, "error", err)
st.log.Error("Failed to save alert instance with invalid labels", "orgID", s.OrgID, "rule", s.AlertRuleUID, "error", err)
continue
}
fields := ngModels.AlertInstance{
@ -344,7 +343,7 @@ func (st *Manager) saveAlertStates(ctx context.Context, states ...*State) (saved
for _, inst := range instances {
debug = append(debug, debugInfo{inst.RuleOrgID, inst.RuleUID, string(inst.CurrentState), data.Labels(inst.Labels).String()})
}
st.log.Error("failed to save alert states", "states", debug, "error", err)
st.log.Error("Failed to save alert states", "states", debug, "error", err)
return 0, len(debug)
}
@ -377,7 +376,7 @@ func (i InstanceStateAndReason) String() string {
return s
}
func (st *Manager) staleResultsHandler(ctx context.Context, evaluatedAt time.Time, alertRule *ngModels.AlertRule, states map[string]*State) []*State {
func (st *Manager) staleResultsHandler(ctx context.Context, evaluatedAt time.Time, alertRule *ngModels.AlertRule, states map[string]*State, logger log.Logger) []*State {
var resolvedStates []*State
allStates := st.GetStatesForRuleUID(alertRule.OrgID, alertRule.UID)
toDelete := make([]ngModels.AlertInstanceKey, 0)
@ -385,12 +384,12 @@ func (st *Manager) staleResultsHandler(ctx context.Context, evaluatedAt time.Tim
for _, s := range allStates {
// Is the cached state in our recently processed results? If not, is it stale?
if _, ok := states[s.CacheID]; !ok && stateIsStale(evaluatedAt, s.LastEvaluationTime, alertRule.IntervalSeconds) {
st.log.Debug("removing stale state entry", "orgID", s.OrgID, "alertRuleUID", s.AlertRuleUID, "cacheID", s.CacheID)
logger.Info("Removing stale state entry", "cacheID", s.CacheID, "state", s.State, "reason", s.StateReason)
st.cache.deleteEntry(s.OrgID, s.AlertRuleUID, s.CacheID)
ilbs := ngModels.InstanceLabels(s.Labels)
_, labelsHash, err := ilbs.StringAndHash()
if err != nil {
st.log.Error("unable to get labelsHash", "error", err.Error(), "orgID", s.OrgID, "alertRuleUID", s.AlertRuleUID)
logger.Error("Unable to get labelsHash", "error", err.Error(), s.AlertRuleUID)
}
toDelete = append(toDelete, ngModels.AlertInstanceKey{RuleOrgID: s.OrgID, RuleUID: s.AlertRuleUID, LabelsHash: labelsHash})
@ -411,8 +410,7 @@ func (st *Manager) staleResultsHandler(ctx context.Context, evaluatedAt time.Tim
}
if err := st.instanceStore.DeleteAlertInstances(ctx, toDelete...); err != nil {
st.log.Error("unable to delete stale instances from database", "error", err.Error(),
"orgID", alertRule.OrgID, "alertRuleUID", alertRule.UID, "count", len(toDelete))
logger.Error("Unable to delete stale instances from database", "error", err, "count", len(toDelete))
}
return resolvedStates
}

View File

@ -11,7 +11,6 @@ import (
"github.com/benbjohnson/clock"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/ngalert/eval"
"github.com/grafana/grafana/pkg/services/ngalert/metrics"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
@ -91,7 +90,7 @@ func Test_maybeNewImage(t *testing.T) {
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
imageService := &CountingImageService{}
mgr := NewManager(log.NewNopLogger(), &metrics.State{}, nil,
mgr := NewManager(&metrics.State{}, nil,
&FakeRuleReader{}, &FakeInstanceStore{},
imageService, clock.NewMock(), &FakeHistorian{})
err := mgr.maybeTakeScreenshot(context.Background(), &ngmodels.AlertRule{}, test.state, test.oldState)

View File

@ -16,7 +16,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/expr"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/annotations"
"github.com/grafana/grafana/pkg/services/annotations/annotationstest"
"github.com/grafana/grafana/pkg/services/dashboards"
@ -39,8 +38,8 @@ func TestDashboardAnnotations(t *testing.T) {
_, dbstore := tests.SetupTestEnv(t, 1)
fakeAnnoRepo := annotationstest.NewFakeAnnotationsRepo()
hist := historian.NewAnnotationHistorian(fakeAnnoRepo, &dashboards.FakeDashboardService{}, log.NewNopLogger())
st := state.NewManager(log.New("test_stale_results_handler"), testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.New(), hist)
hist := historian.NewAnnotationHistorian(fakeAnnoRepo, &dashboards.FakeDashboardService{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.New(), hist)
const mainOrgID int64 = 1
@ -2012,8 +2011,8 @@ func TestProcessEvalResults(t *testing.T) {
for _, tc := range testCases {
fakeAnnoRepo := annotationstest.NewFakeAnnotationsRepo()
hist := historian.NewAnnotationHistorian(fakeAnnoRepo, &dashboards.FakeDashboardService{}, log.NewNopLogger())
st := state.NewManager(log.New("test_state_manager"), testMetrics.GetStateMetrics(), nil, nil, &state.FakeInstanceStore{}, &image.NotAvailableImageService{}, clock.New(), hist)
hist := historian.NewAnnotationHistorian(fakeAnnoRepo, &dashboards.FakeDashboardService{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, nil, &state.FakeInstanceStore{}, &image.NotAvailableImageService{}, clock.New(), hist)
t.Run(tc.desc, func(t *testing.T) {
for _, res := range tc.evalResults {
_ = st.ProcessEvalResults(context.Background(), evaluationTime, tc.alertRule, res, data.Labels{
@ -2040,7 +2039,7 @@ func TestProcessEvalResults(t *testing.T) {
t.Run("should save state to database", func(t *testing.T) {
instanceStore := &state.FakeInstanceStore{}
clk := clock.New()
st := state.NewManager(log.New("test_state_manager"), testMetrics.GetStateMetrics(), nil, nil, instanceStore, &image.NotAvailableImageService{}, clk, &state.FakeHistorian{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, nil, instanceStore, &image.NotAvailableImageService{}, clk, &state.FakeHistorian{})
rule := models.AlertRuleGen()()
var results = eval.GenerateResults(rand.Intn(4)+1, eval.ResultGen(eval.WithEvaluatedAt(clk.Now())))
@ -2169,7 +2168,7 @@ func TestStaleResultsHandler(t *testing.T) {
for _, tc := range testCases {
ctx := context.Background()
st := state.NewManager(log.New("test_stale_results_handler"), testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.New(), &state.FakeHistorian{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clock.New(), &state.FakeHistorian{})
st.Warm(ctx)
existingStatesForRule := st.GetStatesForRuleUID(rule.OrgID, rule.UID)
@ -2231,7 +2230,7 @@ func TestStaleResults(t *testing.T) {
clk := clock.NewMock()
clk.Set(time.Now())
st := state.NewManager(log.New("test_stale_results_handler"), testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clk, &state.FakeHistorian{})
st := state.NewManager(testMetrics.GetStateMetrics(), nil, dbstore, dbstore, &image.NoopImageService{}, clk, &state.FakeHistorian{})
orgID := rand.Int63()
rule := tests.CreateTestAlertRule(t, ctx, dbstore, 10, orgID)