mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Revert "Alerting: Remove vendored models in migration service" (#76387)
Revert "Alerting: Remove vendored models in migration service (#74503)"
This reverts commit 6a8649d544.
This commit is contained in:
@@ -3,16 +3,14 @@ package migrations
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrations/ualert"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
@@ -77,9 +75,9 @@ func (e externalAlertmanagerToDatasources) Exec(sess *xorm.Session, mg *migrator
|
||||
ds.BasicAuth = true
|
||||
ds.BasicAuthUser = u.User.Username()
|
||||
if password, ok := u.User.Password(); ok {
|
||||
ds.SecureJsonData = getEncryptedJsonData(map[string]string{
|
||||
ds.SecureJsonData = ualert.GetEncryptedJsonData(map[string]string{
|
||||
"basicAuthPassword": password,
|
||||
}, log.New("securejsondata"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,22 +124,3 @@ func generateNewDatasourceUid(sess *xorm.Session, orgId int64) (string, error) {
|
||||
|
||||
return "", datasources.ErrDataSourceFailedGenerateUniqueUid
|
||||
}
|
||||
|
||||
// SecureJsonData is used to store encrypted data (for example in data_source table). Only values are separately
|
||||
// encrypted.
|
||||
type secureJsonData map[string][]byte
|
||||
|
||||
// getEncryptedJsonData returns map where all keys are encrypted.
|
||||
func getEncryptedJsonData(sjd map[string]string, log log.Logger) secureJsonData {
|
||||
encrypted := make(secureJsonData)
|
||||
for key, data := range sjd {
|
||||
encryptedData, err := util.Encrypt([]byte(data), setting.SecretKey)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
encrypted[key] = encryptedData
|
||||
}
|
||||
return encrypted
|
||||
}
|
||||
|
||||
@@ -54,9 +54,10 @@ func (*OSSMigrations) AddMigration(mg *Migrator) {
|
||||
addCacheMigration(mg)
|
||||
addShortURLMigrations(mg)
|
||||
ualert.AddTablesMigrations(mg)
|
||||
ualert.AddDashAlertMigration(mg)
|
||||
addLibraryElementsMigrations(mg)
|
||||
|
||||
ualert.FixEarlyMigration(mg)
|
||||
ualert.RerunDashAlertMigration(mg)
|
||||
addSecretsMigration(mg)
|
||||
addKVStoreMigrations(mg)
|
||||
ualert.AddDashboardUIDPanelIDMigration(mg)
|
||||
@@ -75,6 +76,7 @@ func (*OSSMigrations) AddMigration(mg *Migrator) {
|
||||
addEntityEventsTableMigration(mg)
|
||||
|
||||
addPublicDashboardMigration(mg)
|
||||
ualert.CreateDefaultFoldersForAlertingMigration(mg)
|
||||
addDbFileStorageMigration(mg)
|
||||
|
||||
accesscontrol.AddManagedPermissionsMigration(mg, accesscontrol.ManagedPermissionsMigrationID)
|
||||
@@ -99,9 +101,6 @@ func (*OSSMigrations) AddMigration(mg *Migrator) {
|
||||
|
||||
anonservice.AddMigration(mg)
|
||||
signingkeys.AddMigration(mg)
|
||||
|
||||
ualert.MigrationServiceMigration(mg)
|
||||
ualert.CreatedFoldersMigration(mg)
|
||||
}
|
||||
|
||||
func addStarMigrations(mg *Migrator) {
|
||||
|
||||
393
pkg/services/sqlstore/migrations/ualert/alert_rule.go
Normal file
393
pkg/services/sqlstore/migrations/ualert/alert_rule.go
Normal file
@@ -0,0 +1,393 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
|
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb/graphite"
|
||||
)
|
||||
|
||||
const (
|
||||
// ContactLabel is a private label created during migration and used in notification policies.
|
||||
// It stores a string array of all contact point names an alert rule should send to.
|
||||
// It was created as a means to simplify post-migration notification policies.
|
||||
ContactLabel = "__contacts__"
|
||||
)
|
||||
|
||||
type alertRule struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
Title string
|
||||
Condition string
|
||||
Data []alertQuery
|
||||
IntervalSeconds int64
|
||||
Version int64
|
||||
UID string `xorm:"uid"`
|
||||
NamespaceUID string `xorm:"namespace_uid"`
|
||||
RuleGroup string
|
||||
RuleGroupIndex int `xorm:"rule_group_idx"`
|
||||
NoDataState string
|
||||
ExecErrState string
|
||||
For duration
|
||||
Updated time.Time
|
||||
Annotations map[string]string
|
||||
Labels map[string]string
|
||||
IsPaused bool
|
||||
}
|
||||
|
||||
type alertRuleVersion struct {
|
||||
RuleOrgID int64 `xorm:"rule_org_id"`
|
||||
RuleUID string `xorm:"rule_uid"`
|
||||
RuleNamespaceUID string `xorm:"rule_namespace_uid"`
|
||||
RuleGroup string
|
||||
RuleGroupIndex int `xorm:"rule_group_idx"`
|
||||
ParentVersion int64
|
||||
RestoredFrom int64
|
||||
Version int64
|
||||
|
||||
Created time.Time
|
||||
Title string
|
||||
Condition string
|
||||
Data []alertQuery
|
||||
IntervalSeconds int64
|
||||
NoDataState string
|
||||
ExecErrState string
|
||||
// ideally this field should have been apimodels.ApiDuration
|
||||
// but this is currently not possible because of circular dependencies
|
||||
For duration
|
||||
Annotations map[string]string
|
||||
Labels map[string]string
|
||||
IsPaused bool
|
||||
}
|
||||
|
||||
func (a *alertRule) makeVersion() *alertRuleVersion {
|
||||
return &alertRuleVersion{
|
||||
RuleOrgID: a.OrgID,
|
||||
RuleUID: a.UID,
|
||||
RuleNamespaceUID: a.NamespaceUID,
|
||||
RuleGroup: a.RuleGroup,
|
||||
RuleGroupIndex: a.RuleGroupIndex,
|
||||
ParentVersion: 0,
|
||||
RestoredFrom: 0,
|
||||
Version: 1,
|
||||
|
||||
Created: time.Now().UTC(),
|
||||
Title: a.Title,
|
||||
Condition: a.Condition,
|
||||
Data: a.Data,
|
||||
IntervalSeconds: a.IntervalSeconds,
|
||||
NoDataState: a.NoDataState,
|
||||
ExecErrState: a.ExecErrState,
|
||||
For: a.For,
|
||||
Annotations: a.Annotations,
|
||||
Labels: map[string]string{},
|
||||
IsPaused: a.IsPaused,
|
||||
}
|
||||
}
|
||||
|
||||
func addMigrationInfo(da *dashAlert) (map[string]string, map[string]string) {
|
||||
tagsMap := simplejson.NewFromAny(da.ParsedSettings.AlertRuleTags).MustMap()
|
||||
lbls := make(map[string]string, len(tagsMap))
|
||||
|
||||
for k, v := range tagsMap {
|
||||
lbls[k] = simplejson.NewFromAny(v).MustString()
|
||||
}
|
||||
|
||||
annotations := make(map[string]string, 3)
|
||||
annotations[ngmodels.DashboardUIDAnnotation] = da.DashboardUID
|
||||
annotations[ngmodels.PanelIDAnnotation] = fmt.Sprintf("%v", da.PanelId)
|
||||
annotations["__alertId__"] = fmt.Sprintf("%v", da.Id)
|
||||
|
||||
return lbls, annotations
|
||||
}
|
||||
|
||||
func (m *migration) makeAlertRule(l log.Logger, cond condition, da dashAlert, folderUID string) (*alertRule, error) {
|
||||
lbls, annotations := addMigrationInfo(&da)
|
||||
|
||||
message := MigrateTmpl(l.New("field", "message"), da.Message)
|
||||
annotations["message"] = message
|
||||
|
||||
data, err := migrateAlertRuleQueries(l, cond.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to migrate alert rule queries: %w", err)
|
||||
}
|
||||
|
||||
uid, err := m.seenUIDs.generateUid()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to migrate alert rule: %w", err)
|
||||
}
|
||||
|
||||
name := normalizeRuleName(da.Name, uid)
|
||||
|
||||
isPaused := false
|
||||
if da.State == "paused" {
|
||||
isPaused = true
|
||||
}
|
||||
|
||||
ar := &alertRule{
|
||||
OrgID: da.OrgId,
|
||||
Title: name, // TODO: Make sure all names are unique, make new name on constraint insert error.
|
||||
UID: uid,
|
||||
Condition: cond.Condition,
|
||||
Data: data,
|
||||
IntervalSeconds: ruleAdjustInterval(da.Frequency),
|
||||
Version: 1,
|
||||
NamespaceUID: folderUID, // Folder already created, comes from env var.
|
||||
RuleGroup: name,
|
||||
For: duration(da.For),
|
||||
Updated: time.Now().UTC(),
|
||||
Annotations: annotations,
|
||||
Labels: lbls,
|
||||
RuleGroupIndex: 1,
|
||||
IsPaused: isPaused,
|
||||
NoDataState: transNoData(l, da.ParsedSettings.NoDataState),
|
||||
ExecErrState: transExecErr(l, da.ParsedSettings.ExecutionErrorState),
|
||||
}
|
||||
|
||||
// Label for routing and silences.
|
||||
n, v := getLabelForSilenceMatching(ar.UID)
|
||||
ar.Labels[n] = v
|
||||
|
||||
if err := m.addErrorSilence(da, ar); err != nil {
|
||||
m.mg.Logger.Error("Alert migration error: failed to create silence for Error", "rule_name", ar.Title, "err", err)
|
||||
}
|
||||
|
||||
if err := m.addNoDataSilence(da, ar); err != nil {
|
||||
m.mg.Logger.Error("Alert migration error: failed to create silence for NoData", "rule_name", ar.Title, "err", err)
|
||||
}
|
||||
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
// migrateAlertRuleQueries attempts to fix alert rule queries so they can work in unified alerting. Queries of some data sources are not compatible with unified alerting.
|
||||
func migrateAlertRuleQueries(l log.Logger, data []alertQuery) ([]alertQuery, error) {
|
||||
result := make([]alertQuery, 0, len(data))
|
||||
for _, d := range data {
|
||||
// queries that are expression are not relevant, skip them.
|
||||
if d.DatasourceUID == expressionDatasourceUID {
|
||||
result = append(result, d)
|
||||
continue
|
||||
}
|
||||
var fixedData map[string]json.RawMessage
|
||||
err := json.Unmarshal(d.Model, &fixedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// remove hidden tag from the query (if exists)
|
||||
delete(fixedData, "hide")
|
||||
fixedData = fixGraphiteReferencedSubQueries(fixedData)
|
||||
fixedData = fixPrometheusBothTypeQuery(l, fixedData)
|
||||
updatedModel, err := json.Marshal(fixedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Model = updatedModel
|
||||
result = append(result, d)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fixGraphiteReferencedSubQueries attempts to fix graphite referenced sub queries, given unified alerting does not support this.
|
||||
// targetFull of Graphite data source contains the expanded version of field 'target', so let's copy that.
|
||||
func fixGraphiteReferencedSubQueries(queryData map[string]json.RawMessage) map[string]json.RawMessage {
|
||||
fullQuery, ok := queryData[graphite.TargetFullModelField]
|
||||
if ok {
|
||||
delete(queryData, graphite.TargetFullModelField)
|
||||
queryData[graphite.TargetModelField] = fullQuery
|
||||
}
|
||||
|
||||
return queryData
|
||||
}
|
||||
|
||||
// fixPrometheusBothTypeQuery converts Prometheus 'Both' type queries to range queries.
|
||||
func fixPrometheusBothTypeQuery(l log.Logger, queryData map[string]json.RawMessage) map[string]json.RawMessage {
|
||||
// There is the possibility to support this functionality by:
|
||||
// - Splitting the query into two: one for instant and one for range.
|
||||
// - Splitting the condition into two: one for each query, separated by OR.
|
||||
// However, relying on a 'Both' query instead of multiple conditions to do this in legacy is likely
|
||||
// to be unintentional. In addition, this would require more robust operator precedence in classic conditions.
|
||||
// Given these reasons, we opt to convert them to range queries and log a warning.
|
||||
|
||||
var instant bool
|
||||
if instantRaw, ok := queryData["instant"]; ok {
|
||||
if err := json.Unmarshal(instantRaw, &instant); err != nil {
|
||||
// Nothing to do here, we can't parse the instant field.
|
||||
if isPrometheus, _ := isPrometheusQuery(queryData); isPrometheus {
|
||||
l.Info("Failed to parse instant field on Prometheus query", "instant", string(instantRaw), "err", err)
|
||||
}
|
||||
return queryData
|
||||
}
|
||||
}
|
||||
var rng bool
|
||||
if rangeRaw, ok := queryData["range"]; ok {
|
||||
if err := json.Unmarshal(rangeRaw, &rng); err != nil {
|
||||
// Nothing to do here, we can't parse the range field.
|
||||
if isPrometheus, _ := isPrometheusQuery(queryData); isPrometheus {
|
||||
l.Info("Failed to parse range field on Prometheus query", "range", string(rangeRaw), "err", err)
|
||||
}
|
||||
return queryData
|
||||
}
|
||||
}
|
||||
|
||||
if !instant || !rng {
|
||||
// Only apply this fix to 'Both' type queries.
|
||||
return queryData
|
||||
}
|
||||
|
||||
isPrometheus, err := isPrometheusQuery(queryData)
|
||||
if err != nil {
|
||||
l.Info("Unable to convert alert rule that resembles a Prometheus 'Both' type query to 'Range'", "err", err)
|
||||
return queryData
|
||||
}
|
||||
if !isPrometheus {
|
||||
// Only apply this fix to Prometheus.
|
||||
return queryData
|
||||
}
|
||||
|
||||
// Convert 'Both' type queries to `Range` queries by disabling the `Instant` portion.
|
||||
l.Warn("Prometheus 'Both' type queries are not supported in unified alerting. Converting to range query.")
|
||||
queryData["instant"] = []byte("false")
|
||||
|
||||
return queryData
|
||||
}
|
||||
|
||||
// isPrometheusQuery checks if the query is for Prometheus.
|
||||
func isPrometheusQuery(queryData map[string]json.RawMessage) (bool, error) {
|
||||
ds, ok := queryData["datasource"]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("missing datasource field")
|
||||
}
|
||||
var datasource struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
if err := json.Unmarshal(ds, &datasource); err != nil {
|
||||
return false, fmt.Errorf("failed to parse datasource '%s': %w", string(ds), err)
|
||||
}
|
||||
if datasource.Type == "" {
|
||||
return false, fmt.Errorf("missing type field '%s'", string(ds))
|
||||
}
|
||||
return datasource.Type == "prometheus", nil
|
||||
}
|
||||
|
||||
type alertQuery struct {
|
||||
// RefID is the unique identifier of the query, set by the frontend call.
|
||||
RefID string `json:"refId"`
|
||||
|
||||
// QueryType is an optional identifier for the type of query.
|
||||
// It can be used to distinguish different types of queries.
|
||||
QueryType string `json:"queryType"`
|
||||
|
||||
// RelativeTimeRange is the relative Start and End of the query as sent by the frontend.
|
||||
RelativeTimeRange relativeTimeRange `json:"relativeTimeRange"`
|
||||
|
||||
DatasourceUID string `json:"datasourceUid"`
|
||||
|
||||
// JSON is the raw JSON query and includes the above properties as well as custom properties.
|
||||
Model json.RawMessage `json:"model"`
|
||||
}
|
||||
|
||||
// RelativeTimeRange is the per query start and end time
|
||||
// for requests.
|
||||
type relativeTimeRange struct {
|
||||
From duration `json:"from"`
|
||||
To duration `json:"to"`
|
||||
}
|
||||
|
||||
// duration is a type used for marshalling durations.
|
||||
type duration time.Duration
|
||||
|
||||
func (d duration) String() string {
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
func (d duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(time.Duration(d).Seconds())
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalJSON(b []byte) error {
|
||||
var v any
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
switch value := v.(type) {
|
||||
case float64:
|
||||
*d = duration(time.Duration(value) * time.Second)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("invalid duration %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func ruleAdjustInterval(freq int64) int64 {
|
||||
// 10 corresponds to the SchedulerCfg, but TODO not worrying about fetching for now.
|
||||
var baseFreq int64 = 10
|
||||
if freq <= baseFreq {
|
||||
return 10
|
||||
}
|
||||
return freq - (freq % baseFreq)
|
||||
}
|
||||
|
||||
func transNoData(l log.Logger, s string) string {
|
||||
switch legacymodels.NoDataOption(s) {
|
||||
case legacymodels.NoDataSetOK:
|
||||
return string(ngmodels.OK) // values from ngalert/models/rule
|
||||
case "", legacymodels.NoDataSetNoData:
|
||||
return string(ngmodels.NoData)
|
||||
case legacymodels.NoDataSetAlerting:
|
||||
return string(ngmodels.Alerting)
|
||||
case legacymodels.NoDataKeepState:
|
||||
return string(ngmodels.NoData) // "keep last state" translates to no data because we now emit a special alert when the state is "noData". The result is that the evaluation will not return firing and instead we'll raise the special alert.
|
||||
default:
|
||||
l.Warn("Unable to translate execution of NoData state. Using default execution", "old", s, "new", ngmodels.NoData)
|
||||
return string(ngmodels.NoData)
|
||||
}
|
||||
}
|
||||
|
||||
func transExecErr(l log.Logger, s string) string {
|
||||
switch legacymodels.ExecutionErrorOption(s) {
|
||||
case "", legacymodels.ExecutionErrorSetAlerting:
|
||||
return string(ngmodels.AlertingErrState)
|
||||
case legacymodels.ExecutionErrorKeepState:
|
||||
// Keep last state is translated to error as we now emit a
|
||||
// DatasourceError alert when the state is error
|
||||
return string(ngmodels.ErrorErrState)
|
||||
case legacymodels.ExecutionErrorSetOk:
|
||||
return string(ngmodels.OkErrState)
|
||||
default:
|
||||
l.Warn("Unable to translate execution of Error state. Using default execution", "old", s, "new", ngmodels.ErrorErrState)
|
||||
return string(ngmodels.ErrorErrState)
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeRuleName(daName string, uid string) string {
|
||||
// If we have to truncate, we're losing data and so there is higher risk of uniqueness conflicts.
|
||||
// Append the UID to the suffix to forcibly break any collisions.
|
||||
if len(daName) > DefaultFieldMaxLength {
|
||||
trunc := DefaultFieldMaxLength - 1 - len(uid)
|
||||
daName = daName[:trunc] + "_" + uid
|
||||
}
|
||||
|
||||
return daName
|
||||
}
|
||||
|
||||
func extractChannelIDs(d dashAlert) (channelUids []uidOrID) {
|
||||
// Extracting channel UID/ID.
|
||||
for _, ui := range d.ParsedSettings.Notifications {
|
||||
if ui.UID != "" {
|
||||
channelUids = append(channelUids, ui.UID)
|
||||
continue
|
||||
}
|
||||
// In certain circumstances, id is used instead of uid.
|
||||
// We add this if there was no uid.
|
||||
if ui.ID > 0 {
|
||||
channelUids = append(channelUids, ui.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return channelUids
|
||||
}
|
||||
226
pkg/services/sqlstore/migrations/ualert/alert_rule_test.go
Normal file
226
pkg/services/sqlstore/migrations/ualert/alert_rule_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/log/logtest"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
)
|
||||
|
||||
func TestMigrateAlertRuleQueries(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
input *simplejson.Json
|
||||
expected string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "when a query has a sub query - it is extracted",
|
||||
input: simplejson.NewFromAny(map[string]any{"targetFull": "thisisafullquery", "target": "ahalfquery"}),
|
||||
expected: `{"target":"thisisafullquery"}`,
|
||||
},
|
||||
{
|
||||
name: "when a query does not have a sub query - it no-ops",
|
||||
input: simplejson.NewFromAny(map[string]any{"target": "ahalfquery"}),
|
||||
expected: `{"target":"ahalfquery"}`,
|
||||
},
|
||||
{
|
||||
name: "when query was hidden, it removes the flag",
|
||||
input: simplejson.NewFromAny(map[string]any{"hide": true}),
|
||||
expected: `{}`,
|
||||
},
|
||||
{
|
||||
name: "when prometheus both type query, convert to range",
|
||||
input: simplejson.NewFromAny(map[string]any{
|
||||
"datasource": map[string]string{
|
||||
"type": "prometheus",
|
||||
},
|
||||
"instant": true,
|
||||
"range": true,
|
||||
}),
|
||||
expected: `{"datasource":{"type":"prometheus"},"instant":false,"range":true}`,
|
||||
},
|
||||
{
|
||||
name: "when prometheus instant type query, do nothing",
|
||||
input: simplejson.NewFromAny(map[string]any{
|
||||
"datasource": map[string]string{
|
||||
"type": "prometheus",
|
||||
},
|
||||
"instant": true,
|
||||
}),
|
||||
expected: `{"datasource":{"type":"prometheus"},"instant":true}`,
|
||||
},
|
||||
{
|
||||
name: "when non-prometheus with instant and range, do nothing",
|
||||
input: simplejson.NewFromAny(map[string]any{
|
||||
"datasource": map[string]string{
|
||||
"type": "something",
|
||||
},
|
||||
"instant": true,
|
||||
"range": true,
|
||||
}),
|
||||
expected: `{"datasource":{"type":"something"},"instant":true,"range":true}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
model, err := tt.input.Encode()
|
||||
require.NoError(t, err)
|
||||
queries, err := migrateAlertRuleQueries(&logtest.Fake{}, []alertQuery{{Model: model}})
|
||||
if tt.err != nil {
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, tt.err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
r, err := queries[0].Model.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, tt.expected, string(r))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMigrationInfo(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
tagsJSON string
|
||||
expectedLabels map[string]string
|
||||
expectedAnnotations map[string]string
|
||||
}{
|
||||
{
|
||||
name: "when alert rule tags are a JSON array, they're ignored.",
|
||||
tagsJSON: `{ "alertRuleTags": ["one", "two", "three", "four"] }`,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"__alertId__": "0", "__dashboardUid__": "", "__panelId__": "0"},
|
||||
},
|
||||
{
|
||||
name: "when alert rule tags are a JSON object",
|
||||
tagsJSON: `{ "alertRuleTags": { "key": "value", "key2": "value2" } }`,
|
||||
expectedLabels: map[string]string{"key": "value", "key2": "value2"},
|
||||
expectedAnnotations: map[string]string{"__alertId__": "0", "__dashboardUid__": "", "__panelId__": "0"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var settings dashAlertSettings
|
||||
require.NoError(t, json.Unmarshal([]byte(tc.tagsJSON), &settings))
|
||||
|
||||
labels, annotations := addMigrationInfo(&dashAlert{ParsedSettings: &settings})
|
||||
require.Equal(t, tc.expectedLabels, labels)
|
||||
require.Equal(t, tc.expectedAnnotations, annotations)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeAlertRule(t *testing.T) {
|
||||
t.Run("when mapping rule names", func(t *testing.T) {
|
||||
t.Run("leaves basic names untouched", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, da.Name, ar.Title)
|
||||
require.Equal(t, ar.Title, ar.RuleGroup)
|
||||
})
|
||||
|
||||
t.Run("truncates very long names to max length", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
da.Name = strings.Repeat("a", DefaultFieldMaxLength+1)
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ar.Title, DefaultFieldMaxLength)
|
||||
parts := strings.SplitN(ar.Title, "_", 2)
|
||||
require.Len(t, parts, 2)
|
||||
require.Greater(t, len(parts[1]), 8, "unique identifier should be longer than 9 characters")
|
||||
require.Equal(t, DefaultFieldMaxLength-1, len(parts[0])+len(parts[1]), "truncated name + underscore + unique identifier should together be DefaultFieldMaxLength")
|
||||
require.Equal(t, ar.Title, ar.RuleGroup)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("alert is not paused", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsPaused)
|
||||
})
|
||||
|
||||
t.Run("paused dash alert is paused", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
da.State = "paused"
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ar.IsPaused)
|
||||
})
|
||||
|
||||
t.Run("use default if execution of NoData is not known", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
da.ParsedSettings.NoDataState = uuid.NewString()
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, string(models.NoData), ar.NoDataState)
|
||||
})
|
||||
|
||||
t.Run("use default if execution of Error is not known", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
da.ParsedSettings.ExecutionErrorState = uuid.NewString()
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, string(models.ErrorErrState), ar.ExecErrState)
|
||||
})
|
||||
|
||||
t.Run("migrate message template", func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
da := createTestDashAlert()
|
||||
da.Message = "Instance ${instance} is down"
|
||||
cnd := createTestDashAlertCondition()
|
||||
|
||||
ar, err := m.makeAlertRule(&logtest.Fake{}, cnd, da, "folder")
|
||||
require.Nil(t, err)
|
||||
expected :=
|
||||
"{{- $mergedLabels := mergeLabelValues $values -}}\n" +
|
||||
"Instance {{$mergedLabels.instance}} is down"
|
||||
require.Equal(t, expected, ar.Annotations["message"])
|
||||
})
|
||||
}
|
||||
|
||||
func createTestDashAlert() dashAlert {
|
||||
return dashAlert{
|
||||
Id: 1,
|
||||
Name: "test",
|
||||
ParsedSettings: &dashAlertSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
func createTestDashAlertCondition() condition {
|
||||
return condition{
|
||||
Condition: "A",
|
||||
}
|
||||
}
|
||||
512
pkg/services/sqlstore/migrations/ualert/channel.go
Normal file
512
pkg/services/sqlstore/migrations/ualert/channel.go
Normal file
@@ -0,0 +1,512 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
ngModels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
)
|
||||
|
||||
const (
|
||||
// DisabledRepeatInterval is a large duration that will be used as a pseudo-disable in case a legacy channel doesn't have SendReminders enabled.
|
||||
DisabledRepeatInterval = model.Duration(time.Duration(8736) * time.Hour) // 1y
|
||||
)
|
||||
|
||||
type notificationChannel struct {
|
||||
ID int64 `xorm:"id"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
Uid string `xorm:"uid"`
|
||||
Name string `xorm:"name"`
|
||||
Type string `xorm:"type"`
|
||||
DisableResolveMessage bool `xorm:"disable_resolve_message"`
|
||||
IsDefault bool `xorm:"is_default"`
|
||||
Settings *simplejson.Json `xorm:"settings"`
|
||||
SecureSettings SecureJsonData `xorm:"secure_settings"`
|
||||
SendReminder bool `xorm:"send_reminder"`
|
||||
Frequency model.Duration `xorm:"frequency"`
|
||||
}
|
||||
|
||||
// channelsPerOrg maps notification channels per organisation
|
||||
type channelsPerOrg map[int64][]*notificationChannel
|
||||
|
||||
// channelMap maps notification channels per organisation
|
||||
type defaultChannelsPerOrg map[int64][]*notificationChannel
|
||||
|
||||
// uidOrID for both uid and ID, primarily used for mapping legacy channel to migrated receiver.
|
||||
type uidOrID any
|
||||
|
||||
// channelReceiver is a convenience struct that contains a notificationChannel and its corresponding migrated PostableApiReceiver.
|
||||
type channelReceiver struct {
|
||||
channel *notificationChannel
|
||||
receiver *PostableApiReceiver
|
||||
}
|
||||
|
||||
// setupAlertmanagerConfigs creates Alertmanager configs with migrated receivers and routes.
|
||||
func (m *migration) setupAlertmanagerConfigs(rulesPerOrg map[int64]map[*alertRule][]uidOrID) (amConfigsPerOrg, error) {
|
||||
// allChannels: channelUID -> channelConfig
|
||||
allChannelsPerOrg, defaultChannelsPerOrg, err := m.getNotificationChannelMap()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load notification channels: %w", err)
|
||||
}
|
||||
|
||||
amConfigPerOrg := make(amConfigsPerOrg, len(allChannelsPerOrg))
|
||||
for orgID, channels := range allChannelsPerOrg {
|
||||
amConfig := &PostableUserConfig{
|
||||
AlertmanagerConfig: PostableApiAlertingConfig{
|
||||
Receivers: make([]*PostableApiReceiver, 0),
|
||||
},
|
||||
}
|
||||
amConfigPerOrg[orgID] = amConfig
|
||||
|
||||
// Create all newly migrated receivers from legacy notification channels.
|
||||
receiversMap, receivers, err := m.createReceivers(channels)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create receiver in orgId %d: %w", orgID, err)
|
||||
}
|
||||
|
||||
// No need to create an Alertmanager configuration if there are no receivers left that aren't obsolete.
|
||||
if len(receivers) == 0 {
|
||||
m.mg.Logger.Warn("No available receivers", "orgId", orgID)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, cr := range receivers {
|
||||
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, cr.receiver)
|
||||
}
|
||||
|
||||
defaultReceivers := make(map[string]struct{})
|
||||
defaultChannels, ok := defaultChannelsPerOrg[orgID]
|
||||
if ok {
|
||||
// If the organization has default channels build a map of default receivers, used to create alert-specific routes later.
|
||||
for _, c := range defaultChannels {
|
||||
defaultReceivers[c.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
defaultReceiver, defaultRoute, err := m.createDefaultRouteAndReceiver(defaultChannels)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create default route & receiver in orgId %d: %w", orgID, err)
|
||||
}
|
||||
amConfig.AlertmanagerConfig.Route = defaultRoute
|
||||
if defaultReceiver != nil {
|
||||
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, defaultReceiver)
|
||||
}
|
||||
|
||||
for _, cr := range receivers {
|
||||
route, err := createRoute(cr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create route for receiver %s in orgId %d: %w", cr.receiver.Name, orgID, err)
|
||||
}
|
||||
|
||||
amConfigPerOrg[orgID].AlertmanagerConfig.Route.Routes = append(amConfigPerOrg[orgID].AlertmanagerConfig.Route.Routes, route)
|
||||
}
|
||||
|
||||
for ar, channelUids := range rulesPerOrg[orgID] {
|
||||
filteredReceiverNames := m.filterReceiversForAlert(ar.Title, channelUids, receiversMap, defaultReceivers)
|
||||
|
||||
if len(filteredReceiverNames) != 0 {
|
||||
// Only create a contact label if there are specific receivers, otherwise it defaults to the root-level route.
|
||||
ar.Labels[ContactLabel] = contactListToString(filteredReceiverNames)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the alertmanager configuration produced, this gives a chance to catch bad configuration at migration time.
|
||||
// Validation between legacy and unified alerting can be different (e.g. due to bug fixes) so this would fail the migration in that case.
|
||||
if err := m.validateAlertmanagerConfig(amConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate AlertmanagerConfig in orgId %d: %w", orgID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return amConfigPerOrg, nil
|
||||
}
|
||||
|
||||
// contactListToString creates a sorted string representation of a given map (set) of receiver names. Each name will be comma-separated and double-quoted. Names should not contain double quotes.
|
||||
func contactListToString(m map[string]any) string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, quote(k))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return strings.Join(keys, ",")
|
||||
}
|
||||
|
||||
// quote will surround the given string in double quotes.
|
||||
func quote(s string) string {
|
||||
return `"` + s + `"`
|
||||
}
|
||||
|
||||
// getNotificationChannelMap returns a map of all channelUIDs to channel config as well as a separate map for just those channels that are default.
|
||||
// For any given Organization, all channels in defaultChannelsPerOrg should also exist in channelsPerOrg.
|
||||
func (m *migration) getNotificationChannelMap() (channelsPerOrg, defaultChannelsPerOrg, error) {
|
||||
q := `
|
||||
SELECT id,
|
||||
org_id,
|
||||
uid,
|
||||
name,
|
||||
type,
|
||||
disable_resolve_message,
|
||||
is_default,
|
||||
settings,
|
||||
secure_settings,
|
||||
send_reminder,
|
||||
frequency
|
||||
FROM
|
||||
alert_notification
|
||||
`
|
||||
allChannels := []notificationChannel{}
|
||||
err := m.sess.SQL(q).Find(&allChannels)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(allChannels) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
allChannelsMap := make(channelsPerOrg)
|
||||
defaultChannelsMap := make(defaultChannelsPerOrg)
|
||||
for i, c := range allChannels {
|
||||
if c.Type == "hipchat" || c.Type == "sensu" {
|
||||
m.mg.Logger.Error("Alert migration error: discontinued notification channel found", "type", c.Type, "name", c.Name, "uid", c.Uid)
|
||||
continue
|
||||
}
|
||||
|
||||
allChannelsMap[c.OrgID] = append(allChannelsMap[c.OrgID], &allChannels[i])
|
||||
|
||||
if c.IsDefault {
|
||||
defaultChannelsMap[c.OrgID] = append(defaultChannelsMap[c.OrgID], &allChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
return allChannelsMap, defaultChannelsMap, nil
|
||||
}
|
||||
|
||||
// Create a notifier (PostableGrafanaReceiver) from a legacy notification channel
|
||||
func (m *migration) createNotifier(c *notificationChannel) (*PostableGrafanaReceiver, error) {
|
||||
uid, err := m.determineChannelUid(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
settings, secureSettings, err := migrateSettingsToSecureSettings(c.Type, c.Settings, c.SecureSettings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PostableGrafanaReceiver{
|
||||
UID: uid,
|
||||
Name: c.Name,
|
||||
Type: c.Type,
|
||||
DisableResolveMessage: c.DisableResolveMessage,
|
||||
Settings: settings,
|
||||
SecureSettings: secureSettings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create one receiver for every unique notification channel.
|
||||
func (m *migration) createReceivers(allChannels []*notificationChannel) (map[uidOrID]*PostableApiReceiver, []channelReceiver, error) {
|
||||
receivers := make([]channelReceiver, 0, len(allChannels))
|
||||
receiversMap := make(map[uidOrID]*PostableApiReceiver)
|
||||
|
||||
set := make(map[string]struct{}) // Used to deduplicate sanitized names.
|
||||
for _, c := range allChannels {
|
||||
notifier, err := m.createNotifier(c)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We remove double quotes because this character will be used as the separator in the ContactLabel. To prevent partial matches in the Route Matcher we choose to sanitize them early on instead of complicating the Matcher regex.
|
||||
sanitizedName := strings.ReplaceAll(c.Name, `"`, `_`)
|
||||
// There can be name collisions after we sanitize. We check for this and attempt to make the name unique again using a short hash of the original name.
|
||||
if _, ok := set[sanitizedName]; ok {
|
||||
sanitizedName = sanitizedName + fmt.Sprintf("_%.3x", md5.Sum([]byte(c.Name)))
|
||||
m.mg.Logger.Warn("Alert contains duplicate contact name after sanitization, appending unique suffix", "type", c.Type, "name", c.Name, "new_name", sanitizedName, "uid", c.Uid)
|
||||
}
|
||||
notifier.Name = sanitizedName
|
||||
|
||||
set[sanitizedName] = struct{}{}
|
||||
|
||||
cr := channelReceiver{
|
||||
channel: c,
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: sanitizedName, // Channel name is unique within an Org.
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{notifier},
|
||||
},
|
||||
}
|
||||
|
||||
receivers = append(receivers, cr)
|
||||
|
||||
// Store receivers for creating routes from alert rules later.
|
||||
if c.Uid != "" {
|
||||
receiversMap[c.Uid] = cr.receiver
|
||||
}
|
||||
if c.ID != 0 {
|
||||
// In certain circumstances, the alert rule uses ID instead of uid. So, we add this to be able to lookup by ID in case.
|
||||
receiversMap[c.ID] = cr.receiver
|
||||
}
|
||||
}
|
||||
|
||||
return receiversMap, receivers, nil
|
||||
}
|
||||
|
||||
// Create the root-level route with the default receiver. If no new receiver is created specifically for the root-level route, the returned receiver will be nil.
|
||||
func (m *migration) createDefaultRouteAndReceiver(defaultChannels []*notificationChannel) (*PostableApiReceiver, *Route, error) {
|
||||
defaultReceiverName := "autogen-contact-point-default"
|
||||
defaultRoute := &Route{
|
||||
Receiver: defaultReceiverName,
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel}, // To keep parity with pre-migration notifications.
|
||||
RepeatInterval: nil,
|
||||
}
|
||||
newDefaultReceiver := &PostableApiReceiver{
|
||||
Name: defaultReceiverName,
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
}
|
||||
|
||||
// Return early if there are no default channels
|
||||
if len(defaultChannels) == 0 {
|
||||
return newDefaultReceiver, defaultRoute, nil
|
||||
}
|
||||
|
||||
repeatInterval := DisabledRepeatInterval // If no channels have SendReminders enabled, we will use this large value as a pseudo-disable.
|
||||
if len(defaultChannels) > 1 {
|
||||
// If there are more than one default channels we create a separate contact group that is used only in the root policy. This is to simplify the migrated notification policy structure.
|
||||
// If we ever allow more than one receiver per route this won't be necessary.
|
||||
for _, c := range defaultChannels {
|
||||
// Need to create a new notifier to prevent uid conflict.
|
||||
defaultNotifier, err := m.createNotifier(c)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
newDefaultReceiver.GrafanaManagedReceivers = append(newDefaultReceiver.GrafanaManagedReceivers, defaultNotifier)
|
||||
|
||||
// Choose the lowest send reminder duration from all the notifiers to use for default route.
|
||||
if c.SendReminder && c.Frequency < repeatInterval {
|
||||
repeatInterval = c.Frequency
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If there is only a single default channel, we don't need a separate receiver to hold it. We can reuse the existing receiver for that single notifier.
|
||||
defaultRoute.Receiver = defaultChannels[0].Name
|
||||
if defaultChannels[0].SendReminder {
|
||||
repeatInterval = defaultChannels[0].Frequency
|
||||
}
|
||||
|
||||
// No need to create a new receiver.
|
||||
newDefaultReceiver = nil
|
||||
}
|
||||
defaultRoute.RepeatInterval = &repeatInterval
|
||||
|
||||
return newDefaultReceiver, defaultRoute, nil
|
||||
}
|
||||
|
||||
// Create one route per contact point, matching based on ContactLabel.
|
||||
func createRoute(cr channelReceiver) (*Route, error) {
|
||||
// We create a regex matcher so that each alert rule need only have a single ContactLabel entry for all contact points it sends to.
|
||||
// For example, if an alert needs to send to contact1 and contact2 it will have ContactLabel=`"contact1","contact2"` and will match both routes looking
|
||||
// for `.*"contact1".*` and `.*"contact2".*`.
|
||||
|
||||
// We quote and escape here to ensure the regex will correctly match the ContactLabel on the alerts.
|
||||
name := fmt.Sprintf(`.*%s.*`, regexp.QuoteMeta(quote(cr.receiver.Name)))
|
||||
mat, err := labels.NewMatcher(labels.MatchRegexp, ContactLabel, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repeatInterval := DisabledRepeatInterval
|
||||
if cr.channel.SendReminder {
|
||||
repeatInterval = cr.channel.Frequency
|
||||
}
|
||||
|
||||
return &Route{
|
||||
Receiver: cr.receiver.Name,
|
||||
ObjectMatchers: ObjectMatchers{mat},
|
||||
Continue: true, // We continue so that each sibling contact point route can separately match.
|
||||
RepeatInterval: &repeatInterval,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Filter receivers to select those that were associated to the given rule as channels.
|
||||
func (m *migration) filterReceiversForAlert(name string, channelIDs []uidOrID, receivers map[uidOrID]*PostableApiReceiver, defaultReceivers map[string]struct{}) map[string]any {
|
||||
if len(channelIDs) == 0 {
|
||||
// If there are no channels associated, we use the default route.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter receiver names.
|
||||
filteredReceiverNames := make(map[string]any)
|
||||
for _, uidOrId := range channelIDs {
|
||||
recv, ok := receivers[uidOrId]
|
||||
if ok {
|
||||
filteredReceiverNames[recv.Name] = struct{}{} // Deduplicate on contact point name.
|
||||
} else {
|
||||
m.mg.Logger.Warn("Alert linked to obsolete notification channel, ignoring", "alert", name, "uid", uidOrId)
|
||||
}
|
||||
}
|
||||
|
||||
coveredByDefault := func(names map[string]any) bool {
|
||||
// Check if all receivers are also default ones and if so, just use the default route.
|
||||
for n := range names {
|
||||
if _, ok := defaultReceivers[n]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if len(filteredReceiverNames) == 0 || coveredByDefault(filteredReceiverNames) {
|
||||
// Use the default route instead.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add default receivers alongside rule-specific ones.
|
||||
for n := range defaultReceivers {
|
||||
filteredReceiverNames[n] = struct{}{}
|
||||
}
|
||||
|
||||
return filteredReceiverNames
|
||||
}
|
||||
|
||||
func (m *migration) determineChannelUid(c *notificationChannel) (string, error) {
|
||||
legacyUid := c.Uid
|
||||
if legacyUid == "" {
|
||||
newUid, err := m.seenUIDs.generateUid()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
m.mg.Logger.Info("Legacy notification had an empty uid, generating a new one", "id", c.ID, "uid", newUid)
|
||||
return newUid, nil
|
||||
}
|
||||
|
||||
if m.seenUIDs.contains(legacyUid) {
|
||||
newUid, err := m.seenUIDs.generateUid()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
m.mg.Logger.Warn("Legacy notification had a UID that collides with a migrated record, generating a new one", "id", c.ID, "old", legacyUid, "new", newUid)
|
||||
return newUid, nil
|
||||
}
|
||||
|
||||
return legacyUid, nil
|
||||
}
|
||||
|
||||
// Some settings were migrated from settings to secure settings in between.
|
||||
// See https://grafana.com/docs/grafana/latest/installation/upgrading/#ensure-encryption-of-existing-alert-notification-channel-secrets.
|
||||
// migrateSettingsToSecureSettings takes care of that.
|
||||
func migrateSettingsToSecureSettings(chanType string, settings *simplejson.Json, secureSettings SecureJsonData) (*simplejson.Json, map[string]string, error) {
|
||||
keys := []string{}
|
||||
switch chanType {
|
||||
case "slack":
|
||||
keys = []string{"url", "token"}
|
||||
case "pagerduty":
|
||||
keys = []string{"integrationKey"}
|
||||
case "webhook":
|
||||
keys = []string{"password"}
|
||||
case "prometheus-alertmanager":
|
||||
keys = []string{"basicAuthPassword"}
|
||||
case "opsgenie":
|
||||
keys = []string{"apiKey"}
|
||||
case "telegram":
|
||||
keys = []string{"bottoken"}
|
||||
case "line":
|
||||
keys = []string{"token"}
|
||||
case "pushover":
|
||||
keys = []string{"apiToken", "userKey"}
|
||||
case "threema":
|
||||
keys = []string{"api_secret"}
|
||||
}
|
||||
|
||||
newSecureSettings := secureSettings.Decrypt()
|
||||
cloneSettings := simplejson.New()
|
||||
settingsMap, err := settings.Map()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for k, v := range settingsMap {
|
||||
cloneSettings.Set(k, v)
|
||||
}
|
||||
for _, k := range keys {
|
||||
if v, ok := newSecureSettings[k]; ok && v != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
sv := cloneSettings.Get(k).MustString()
|
||||
if sv != "" {
|
||||
newSecureSettings[k] = sv
|
||||
cloneSettings.Del(k)
|
||||
}
|
||||
}
|
||||
|
||||
encryptedData := GetEncryptedJsonData(newSecureSettings)
|
||||
for k, v := range encryptedData {
|
||||
newSecureSettings[k] = base64.StdEncoding.EncodeToString(v)
|
||||
}
|
||||
|
||||
return cloneSettings, newSecureSettings, nil
|
||||
}
|
||||
|
||||
// Below is a snapshot of all the config and supporting functions imported
|
||||
// to avoid vendoring those packages.
|
||||
|
||||
type PostableUserConfig struct {
|
||||
TemplateFiles map[string]string `yaml:"template_files" json:"template_files"`
|
||||
AlertmanagerConfig PostableApiAlertingConfig `yaml:"alertmanager_config" json:"alertmanager_config"`
|
||||
}
|
||||
|
||||
type amConfigsPerOrg = map[int64]*PostableUserConfig
|
||||
|
||||
type PostableApiAlertingConfig struct {
|
||||
Route *Route `yaml:"route,omitempty" json:"route,omitempty"`
|
||||
Templates []string `yaml:"templates" json:"templates"`
|
||||
Receivers []*PostableApiReceiver `yaml:"receivers,omitempty" json:"receivers,omitempty"`
|
||||
}
|
||||
|
||||
type Route struct {
|
||||
Receiver string `yaml:"receiver,omitempty" json:"receiver,omitempty"`
|
||||
ObjectMatchers ObjectMatchers `yaml:"object_matchers,omitempty" json:"object_matchers,omitempty"`
|
||||
Routes []*Route `yaml:"routes,omitempty" json:"routes,omitempty"`
|
||||
Continue bool `yaml:"continue,omitempty" json:"continue,omitempty"`
|
||||
GroupByStr []string `yaml:"group_by,omitempty" json:"group_by,omitempty"`
|
||||
RepeatInterval *model.Duration `yaml:"repeat_interval,omitempty" json:"repeat_interval,omitempty"`
|
||||
}
|
||||
|
||||
type ObjectMatchers labels.Matchers
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface for Matchers. Vendored from definitions.ObjectMatchers.
|
||||
func (m ObjectMatchers) MarshalJSON() ([]byte, error) {
|
||||
if len(m) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result := make([][3]string, len(m))
|
||||
for i, matcher := range m {
|
||||
result[i] = [3]string{matcher.Name, matcher.Type.String(), matcher.Value}
|
||||
}
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
type PostableApiReceiver struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
GrafanaManagedReceivers []*PostableGrafanaReceiver `yaml:"grafana_managed_receiver_configs,omitempty" json:"grafana_managed_receiver_configs,omitempty"`
|
||||
}
|
||||
|
||||
type PostableGrafanaReceiver CreateAlertNotificationCommand
|
||||
|
||||
type CreateAlertNotificationCommand struct {
|
||||
UID string `json:"uid"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
DisableResolveMessage bool `json:"disableResolveMessage"`
|
||||
Settings *simplejson.Json `json:"settings"`
|
||||
SecureSettings map[string]string `json:"secureSettings"`
|
||||
}
|
||||
471
pkg/services/sqlstore/migrations/ualert/channel_test.go
Normal file
471
pkg/services/sqlstore/migrations/ualert/channel_test.go
Normal file
@@ -0,0 +1,471 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
ngModels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
)
|
||||
|
||||
func TestFilterReceiversForAlert(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
channelIds []uidOrID
|
||||
receivers map[uidOrID]*PostableApiReceiver
|
||||
defaultReceivers map[string]struct{}
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "when an alert has multiple channels, each should filter for the correct receiver",
|
||||
channelIds: []uidOrID{"uid1", "uid2"},
|
||||
receivers: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "recv1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid2": {
|
||||
Name: "recv2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid3": {
|
||||
Name: "recv3",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
},
|
||||
defaultReceivers: map[string]struct{}{},
|
||||
expected: map[string]any{
|
||||
"recv1": struct{}{},
|
||||
"recv2": struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when default receivers exist, they should be added to an alert's filtered receivers",
|
||||
channelIds: []uidOrID{"uid1"},
|
||||
receivers: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "recv1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid2": {
|
||||
Name: "recv2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid3": {
|
||||
Name: "recv3",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
},
|
||||
defaultReceivers: map[string]struct{}{
|
||||
"recv2": {},
|
||||
},
|
||||
expected: map[string]any{
|
||||
"recv1": struct{}{}, // From alert
|
||||
"recv2": struct{}{}, // From default
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when an alert has a channels associated by ID instead of UID, it should be included",
|
||||
channelIds: []uidOrID{int64(42)},
|
||||
receivers: map[uidOrID]*PostableApiReceiver{
|
||||
int64(42): {
|
||||
Name: "recv1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
},
|
||||
defaultReceivers: map[string]struct{}{},
|
||||
expected: map[string]any{
|
||||
"recv1": struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when an alert's receivers are covered by the defaults, return nil to use default receiver downstream",
|
||||
channelIds: []uidOrID{"uid1"},
|
||||
receivers: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "recv1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid2": {
|
||||
Name: "recv2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
"uid3": {
|
||||
Name: "recv3",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
},
|
||||
defaultReceivers: map[string]struct{}{
|
||||
"recv1": {},
|
||||
"recv2": {},
|
||||
},
|
||||
expected: nil, // recv1 is already a default
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
res := m.filterReceiversForAlert("", tt.channelIds, tt.receivers, tt.defaultReceivers)
|
||||
|
||||
require.Equal(t, tt.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRoute(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
channel *notificationChannel
|
||||
recv *PostableApiReceiver
|
||||
expected *Route
|
||||
}{
|
||||
{
|
||||
name: "when a receiver is passed in, the route should regex match based on quoted name with continue=true",
|
||||
channel: ¬ificationChannel{},
|
||||
recv: &PostableApiReceiver{
|
||||
Name: "recv1",
|
||||
},
|
||||
expected: &Route{
|
||||
Receiver: "recv1",
|
||||
ObjectMatchers: ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
|
||||
Routes: nil,
|
||||
Continue: true,
|
||||
GroupByStr: nil,
|
||||
RepeatInterval: durationPointer(DisabledRepeatInterval),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "notification channel should be escaped for regex in the matcher",
|
||||
channel: ¬ificationChannel{},
|
||||
recv: &PostableApiReceiver{
|
||||
Name: `. ^ $ * + - ? ( ) [ ] { } \ |`,
|
||||
},
|
||||
expected: &Route{
|
||||
Receiver: `. ^ $ * + - ? ( ) [ ] { } \ |`,
|
||||
ObjectMatchers: ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"\. \^ \$ \* \+ - \? \( \) \[ \] \{ \} \\ \|".*`}},
|
||||
Routes: nil,
|
||||
Continue: true,
|
||||
GroupByStr: nil,
|
||||
RepeatInterval: durationPointer(DisabledRepeatInterval),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a channel has sendReminder=true, the route should use the frequency in repeat interval",
|
||||
channel: ¬ificationChannel{SendReminder: true, Frequency: model.Duration(time.Duration(42) * time.Hour)},
|
||||
recv: &PostableApiReceiver{
|
||||
Name: "recv1",
|
||||
},
|
||||
expected: &Route{
|
||||
Receiver: "recv1",
|
||||
ObjectMatchers: ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
|
||||
Routes: nil,
|
||||
Continue: true,
|
||||
GroupByStr: nil,
|
||||
RepeatInterval: durationPointer(model.Duration(time.Duration(42) * time.Hour)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a channel has sendReminder=false, the route should ignore the frequency in repeat interval and use DisabledRepeatInterval",
|
||||
channel: ¬ificationChannel{SendReminder: false, Frequency: model.Duration(time.Duration(42) * time.Hour)},
|
||||
recv: &PostableApiReceiver{
|
||||
Name: "recv1",
|
||||
},
|
||||
expected: &Route{
|
||||
Receiver: "recv1",
|
||||
ObjectMatchers: ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
|
||||
Routes: nil,
|
||||
Continue: true,
|
||||
GroupByStr: nil,
|
||||
RepeatInterval: durationPointer(DisabledRepeatInterval),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, err := createRoute(channelReceiver{
|
||||
channel: tt.channel,
|
||||
receiver: tt.recv,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Order of nested routes is not guaranteed.
|
||||
cOpt := []cmp.Option{
|
||||
cmpopts.SortSlices(func(a, b *Route) bool {
|
||||
if a.Receiver != b.Receiver {
|
||||
return a.Receiver < b.Receiver
|
||||
}
|
||||
return a.ObjectMatchers[0].Value < b.ObjectMatchers[0].Value
|
||||
}),
|
||||
cmpopts.IgnoreUnexported(Route{}, labels.Matcher{}),
|
||||
}
|
||||
|
||||
if !cmp.Equal(tt.expected, res, cOpt...) {
|
||||
t.Errorf("Unexpected Route: %v", cmp.Diff(tt.expected, res, cOpt...))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createNotChannel(t *testing.T, uid string, id int64, name string) *notificationChannel {
|
||||
t.Helper()
|
||||
return ¬ificationChannel{Uid: uid, ID: id, Name: name, Settings: simplejson.New()}
|
||||
}
|
||||
|
||||
func createNotChannelWithReminder(t *testing.T, uid string, id int64, name string, frequency model.Duration) *notificationChannel {
|
||||
t.Helper()
|
||||
return ¬ificationChannel{Uid: uid, ID: id, Name: name, SendReminder: true, Frequency: frequency, Settings: simplejson.New()}
|
||||
}
|
||||
|
||||
func TestCreateReceivers(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
allChannels []*notificationChannel
|
||||
defaultChannels []*notificationChannel
|
||||
expRecvMap map[uidOrID]*PostableApiReceiver
|
||||
expRecv []channelReceiver
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
name: "when given notification channels migrate them to receivers",
|
||||
allChannels: []*notificationChannel{createNotChannel(t, "uid1", int64(1), "name1"), createNotChannel(t, "uid2", int64(2), "name2")},
|
||||
expRecvMap: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "name1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name1"}},
|
||||
},
|
||||
"uid2": {
|
||||
Name: "name2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name2"}},
|
||||
},
|
||||
int64(1): {
|
||||
Name: "name1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name1"}},
|
||||
},
|
||||
int64(2): {
|
||||
Name: "name2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name2"}},
|
||||
},
|
||||
},
|
||||
expRecv: []channelReceiver{
|
||||
{
|
||||
channel: createNotChannel(t, "uid1", int64(1), "name1"),
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: "name1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
channel: createNotChannel(t, "uid2", int64(2), "name2"),
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: "name2",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given notification channel contains double quote sanitize with underscore",
|
||||
allChannels: []*notificationChannel{createNotChannel(t, "uid1", int64(1), "name\"1")},
|
||||
expRecvMap: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
int64(1): {
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
},
|
||||
expRecv: []channelReceiver{
|
||||
{
|
||||
channel: createNotChannel(t, "uid1", int64(1), "name\"1"),
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given notification channels collide after sanitization add short hash to end",
|
||||
allChannels: []*notificationChannel{createNotChannel(t, "uid1", int64(1), "name\"1"), createNotChannel(t, "uid2", int64(2), "name_1")},
|
||||
expRecvMap: map[uidOrID]*PostableApiReceiver{
|
||||
"uid1": {
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
"uid2": {
|
||||
Name: "name_1_dba13d",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1_dba13d"}},
|
||||
},
|
||||
int64(1): {
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
int64(2): {
|
||||
Name: "name_1_dba13d",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1_dba13d"}},
|
||||
},
|
||||
},
|
||||
expRecv: []channelReceiver{
|
||||
{
|
||||
channel: createNotChannel(t, "uid1", int64(1), "name\"1"),
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: "name_1",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
channel: createNotChannel(t, "uid2", int64(2), "name_1"),
|
||||
receiver: &PostableApiReceiver{
|
||||
Name: "name_1_dba13d",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name_1_dba13d"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
recvMap, recvs, err := m.createReceivers(tt.allChannels)
|
||||
if tt.expErr != nil {
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, tt.expErr.Error())
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// We ignore certain fields for the purposes of this test
|
||||
for _, recv := range recvs {
|
||||
for _, not := range recv.receiver.GrafanaManagedReceivers {
|
||||
not.UID = ""
|
||||
not.Settings = nil
|
||||
not.SecureSettings = nil
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expRecvMap, recvMap)
|
||||
require.ElementsMatch(t, tt.expRecv, recvs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDefaultRouteAndReceiver(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
amConfig *PostableUserConfig
|
||||
defaultChannels []*notificationChannel
|
||||
expRecv *PostableApiReceiver
|
||||
expRoute *Route
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
name: "when given multiple default notification channels migrate them to a single receiver",
|
||||
defaultChannels: []*notificationChannel{createNotChannel(t, "uid1", int64(1), "name1"), createNotChannel(t, "uid2", int64(2), "name2")},
|
||||
expRecv: &PostableApiReceiver{
|
||||
Name: "autogen-contact-point-default",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name1"}, {Name: "name2"}},
|
||||
},
|
||||
expRoute: &Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
RepeatInterval: durationPointer(DisabledRepeatInterval),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given multiple default notification channels migrate them to a single receiver with RepeatInterval set to be the minimum of all channel frequencies",
|
||||
defaultChannels: []*notificationChannel{
|
||||
createNotChannelWithReminder(t, "uid1", int64(1), "name1", model.Duration(42)),
|
||||
createNotChannelWithReminder(t, "uid2", int64(2), "name2", model.Duration(100000)),
|
||||
},
|
||||
expRecv: &PostableApiReceiver{
|
||||
Name: "autogen-contact-point-default",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{{Name: "name1"}, {Name: "name2"}},
|
||||
},
|
||||
expRoute: &Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
RepeatInterval: durationPointer(model.Duration(42)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given no default notification channels create a single empty receiver for default",
|
||||
defaultChannels: []*notificationChannel{},
|
||||
expRecv: &PostableApiReceiver{
|
||||
Name: "autogen-contact-point-default",
|
||||
GrafanaManagedReceivers: []*PostableGrafanaReceiver{},
|
||||
},
|
||||
expRoute: &Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
RepeatInterval: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given a single default notification channels don't create a new default receiver",
|
||||
defaultChannels: []*notificationChannel{createNotChannel(t, "uid1", int64(1), "name1")},
|
||||
expRecv: nil,
|
||||
expRoute: &Route{
|
||||
Receiver: "name1",
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
RepeatInterval: durationPointer(DisabledRepeatInterval),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when given a single default notification channel with SendReminder=true, use the channels Frequency as the RepeatInterval",
|
||||
defaultChannels: []*notificationChannel{createNotChannelWithReminder(t, "uid1", int64(1), "name1", model.Duration(42))},
|
||||
expRecv: nil,
|
||||
expRoute: &Route{
|
||||
Receiver: "name1",
|
||||
Routes: make([]*Route, 0),
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
RepeatInterval: durationPointer(model.Duration(42)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := newTestMigration(t)
|
||||
recv, route, err := m.createDefaultRouteAndReceiver(tt.defaultChannels)
|
||||
if tt.expErr != nil {
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, tt.expErr.Error())
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// We ignore certain fields for the purposes of this test
|
||||
if recv != nil {
|
||||
for _, not := range recv.GrafanaManagedReceivers {
|
||||
not.UID = ""
|
||||
not.Settings = nil
|
||||
not.SecureSettings = nil
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expRecv, recv)
|
||||
require.Equal(t, tt.expRoute, route)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func durationPointer(d model.Duration) *model.Duration {
|
||||
return &d
|
||||
}
|
||||
339
pkg/services/sqlstore/migrations/ualert/cond_trans.go
Normal file
339
pkg/services/sqlstore/migrations/ualert/cond_trans.go
Normal file
@@ -0,0 +1,339 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/tsdb/legacydata"
|
||||
"github.com/grafana/grafana/pkg/tsdb/legacydata/interval"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
func transConditions(set dashAlertSettings, orgID int64, dsUIDMap dsUIDLookup) (*condition, error) {
|
||||
refIDtoCondIdx := make(map[string][]int) // a map of original refIds to their corresponding condition index
|
||||
for i, cond := range set.Conditions {
|
||||
if len(cond.Query.Params) != 3 {
|
||||
return nil, fmt.Errorf("unexpected number of query parameters in cond %v, want 3 got %v", i+1, len(cond.Query.Params))
|
||||
}
|
||||
refID := cond.Query.Params[0]
|
||||
refIDtoCondIdx[refID] = append(refIDtoCondIdx[refID], i)
|
||||
}
|
||||
|
||||
newRefIDstoCondIdx := make(map[string][]int) // a map of the new refIds to their coresponding condition index
|
||||
|
||||
refIDs := make([]string, 0, len(refIDtoCondIdx)) // a unique sorted list of the original refIDs
|
||||
for refID := range refIDtoCondIdx {
|
||||
refIDs = append(refIDs, refID)
|
||||
}
|
||||
sort.Strings(refIDs)
|
||||
|
||||
newRefIDsToTimeRanges := make(map[string][2]string) // a map of new RefIDs to their time range string tuple representation
|
||||
for _, refID := range refIDs {
|
||||
condIdxes := refIDtoCondIdx[refID]
|
||||
|
||||
if len(condIdxes) == 1 {
|
||||
// If the refID is used in only condition, keep the letter a new refID
|
||||
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[0])
|
||||
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[0]].Query.Params[1], set.Conditions[condIdxes[0]].Query.Params[2]}
|
||||
continue
|
||||
}
|
||||
|
||||
// track unique time ranges within the same refID
|
||||
timeRangesToCondIdx := make(map[[2]string][]int) // a map of the time range tuple to the condition index
|
||||
for _, idx := range condIdxes {
|
||||
timeParamFrom := set.Conditions[idx].Query.Params[1]
|
||||
timeParamTo := set.Conditions[idx].Query.Params[2]
|
||||
key := [2]string{timeParamFrom, timeParamTo}
|
||||
timeRangesToCondIdx[key] = append(timeRangesToCondIdx[key], idx)
|
||||
}
|
||||
|
||||
if len(timeRangesToCondIdx) == 1 {
|
||||
// if all shared time range, no need to create a new query with a new RefID
|
||||
for i := range condIdxes {
|
||||
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[i])
|
||||
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[i]].Query.Params[1], set.Conditions[condIdxes[i]].Query.Params[2]}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This referenced query/refID has different time ranges, so new queries are needed for each unique time range.
|
||||
timeRanges := make([][2]string, 0, len(timeRangesToCondIdx)) // a sorted list of unique time ranges for the query
|
||||
for tr := range timeRangesToCondIdx {
|
||||
timeRanges = append(timeRanges, tr)
|
||||
}
|
||||
|
||||
sort.Slice(timeRanges, func(i, j int) bool {
|
||||
switch {
|
||||
case timeRanges[i][0] < timeRanges[j][0]:
|
||||
return true
|
||||
case timeRanges[i][0] > timeRanges[j][0]:
|
||||
return false
|
||||
default:
|
||||
return timeRanges[i][1] < timeRanges[j][1]
|
||||
}
|
||||
})
|
||||
|
||||
for _, tr := range timeRanges {
|
||||
idxes := timeRangesToCondIdx[tr]
|
||||
for i := 0; i < len(idxes); i++ {
|
||||
newLetter, err := getNewRefID(newRefIDstoCondIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newRefIDstoCondIdx[newLetter] = append(newRefIDstoCondIdx[newLetter], idxes[i])
|
||||
newRefIDsToTimeRanges[newLetter] = [2]string{set.Conditions[idxes[i]].Query.Params[1], set.Conditions[idxes[i]].Query.Params[2]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRefIDs := make([]string, 0, len(newRefIDstoCondIdx)) // newRefIds is a sorted list of the unique refIds of new queries
|
||||
for refID := range newRefIDstoCondIdx {
|
||||
newRefIDs = append(newRefIDs, refID)
|
||||
}
|
||||
sort.Strings(newRefIDs)
|
||||
|
||||
newCond := &condition{}
|
||||
condIdxToNewRefID := make(map[int]string) // a map of condition indices to the RefIDs of new queries
|
||||
|
||||
// build the new data source queries
|
||||
for _, refID := range newRefIDs {
|
||||
condIdxes := newRefIDstoCondIdx[refID]
|
||||
for i, condIdx := range condIdxes {
|
||||
condIdxToNewRefID[condIdx] = refID
|
||||
if i > 0 {
|
||||
// only create each unique query once
|
||||
continue
|
||||
}
|
||||
|
||||
var queryObj map[string]any // copy the model
|
||||
err := json.Unmarshal(set.Conditions[condIdx].Query.Model, &queryObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var queryType string
|
||||
if v, ok := queryObj["queryType"]; ok {
|
||||
if s, ok := v.(string); ok {
|
||||
queryType = s
|
||||
}
|
||||
}
|
||||
|
||||
// one could have an alert saved but datasource deleted, so can not require match.
|
||||
dsUID := dsUIDMap.GetUID(orgID, set.Conditions[condIdx].Query.DatasourceID)
|
||||
queryObj["refId"] = refID
|
||||
|
||||
// See services/alerting/conditions/query.go's newQueryCondition
|
||||
queryObj["maxDataPoints"] = interval.DefaultRes
|
||||
|
||||
simpleJson, err := simplejson.NewJson(set.Conditions[condIdx].Query.Model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawFrom := newRefIDsToTimeRanges[refID][0]
|
||||
rawTo := newRefIDsToTimeRanges[refID][1]
|
||||
calculatedInterval, err := calculateInterval(legacydata.NewDataTimeRange(rawFrom, rawTo), simpleJson, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryObj["intervalMs"] = calculatedInterval.Milliseconds()
|
||||
|
||||
encodedObj, err := json.Marshal(queryObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rTR, err := getRelativeDuration(rawFrom, rawTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alertQuery := alertQuery{
|
||||
RefID: refID,
|
||||
Model: encodedObj,
|
||||
RelativeTimeRange: *rTR,
|
||||
DatasourceUID: dsUID,
|
||||
QueryType: queryType,
|
||||
}
|
||||
newCond.Data = append(newCond.Data, alertQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// build the new classic condition pointing our new equivalent queries
|
||||
conditions := make([]classicConditionJSON, len(set.Conditions))
|
||||
for i, cond := range set.Conditions {
|
||||
newCond := classicConditionJSON{}
|
||||
newCond.Evaluator = conditionEvalJSON{
|
||||
Type: cond.Evaluator.Type,
|
||||
Params: cond.Evaluator.Params,
|
||||
}
|
||||
newCond.Operator.Type = cond.Operator.Type
|
||||
newCond.Query.Params = append(newCond.Query.Params, condIdxToNewRefID[i])
|
||||
newCond.Reducer.Type = cond.Reducer.Type
|
||||
|
||||
conditions[i] = newCond
|
||||
}
|
||||
|
||||
ccRefID, err := getNewRefID(newRefIDstoCondIdx) // get refID for the classic condition
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCond.Condition = ccRefID // set the alert condition to point to the classic condition
|
||||
newCond.OrgID = orgID
|
||||
|
||||
exprModel := struct {
|
||||
Type string `json:"type"`
|
||||
RefID string `json:"refId"`
|
||||
Conditions []classicConditionJSON `json:"conditions"`
|
||||
}{
|
||||
"classic_conditions",
|
||||
ccRefID,
|
||||
conditions,
|
||||
}
|
||||
|
||||
exprModelJSON, err := json.Marshal(&exprModel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccAlertQuery := alertQuery{
|
||||
RefID: ccRefID,
|
||||
Model: exprModelJSON,
|
||||
DatasourceUID: expressionDatasourceUID,
|
||||
}
|
||||
|
||||
newCond.Data = append(newCond.Data, ccAlertQuery)
|
||||
|
||||
sort.Slice(newCond.Data, func(i, j int) bool {
|
||||
return newCond.Data[i].RefID < newCond.Data[j].RefID
|
||||
})
|
||||
|
||||
return newCond, nil
|
||||
}
|
||||
|
||||
type condition struct {
|
||||
// Condition is the RefID of the query or expression from
|
||||
// the Data property to get the results for.
|
||||
Condition string `json:"condition"`
|
||||
OrgID int64 `json:"-"`
|
||||
|
||||
// Data is an array of data source queries and/or server side expressions.
|
||||
Data []alertQuery `json:"data"`
|
||||
}
|
||||
|
||||
const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// getNewRefID finds first capital letter in the alphabet not in use
|
||||
// to use for a new RefID. It errors if it runs out of letters.
|
||||
func getNewRefID(refIDs map[string][]int) (string, error) {
|
||||
for _, r := range alpha {
|
||||
sR := string(r)
|
||||
if _, ok := refIDs[sR]; ok {
|
||||
continue
|
||||
}
|
||||
return sR, nil
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
sR := util.GenerateShortUID()
|
||||
if _, ok := refIDs[sR]; ok {
|
||||
continue
|
||||
}
|
||||
return sR, nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to generate unique RefID")
|
||||
}
|
||||
|
||||
// getRelativeDuration turns the alerting durations for dashboard conditions
|
||||
// into a relative time range.
|
||||
func getRelativeDuration(rawFrom, rawTo string) (*relativeTimeRange, error) {
|
||||
fromD, err := getFrom(rawFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toD, err := getTo(rawTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &relativeTimeRange{
|
||||
From: duration(fromD),
|
||||
To: duration(toD),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getFrom(from string) (time.Duration, error) {
|
||||
fromRaw := strings.Replace(from, "now-", "", 1)
|
||||
|
||||
d, err := time.ParseDuration("-" + fromRaw)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return -d, err
|
||||
}
|
||||
|
||||
func getTo(to string) (time.Duration, error) {
|
||||
if to == "now" {
|
||||
return 0, nil
|
||||
} else if strings.HasPrefix(to, "now-") {
|
||||
withoutNow := strings.Replace(to, "now-", "", 1)
|
||||
|
||||
d, err := time.ParseDuration("-" + withoutNow)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return -d, nil
|
||||
}
|
||||
|
||||
d, err := time.ParseDuration(to)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return -d, nil
|
||||
}
|
||||
|
||||
type classicConditionJSON struct {
|
||||
Evaluator conditionEvalJSON `json:"evaluator"`
|
||||
|
||||
Operator struct {
|
||||
Type string `json:"type"`
|
||||
} `json:"operator"`
|
||||
|
||||
Query struct {
|
||||
Params []string `json:"params"`
|
||||
} `json:"query"`
|
||||
|
||||
Reducer struct {
|
||||
// Params []any `json:"params"` (Unused)
|
||||
Type string `json:"type"`
|
||||
} `json:"reducer"`
|
||||
}
|
||||
|
||||
// Copied from services/alerting/conditions/query.go's calculateInterval
|
||||
func calculateInterval(timeRange legacydata.DataTimeRange, model *simplejson.Json, dsInfo *datasources.DataSource) (time.Duration, error) {
|
||||
// if there is no min-interval specified in the datasource or in the dashboard-panel,
|
||||
// the value of 1ms is used (this is how it is done in the dashboard-interval-calculation too,
|
||||
// see https://github.com/grafana/grafana/blob/9a0040c0aeaae8357c650cec2ee644a571dddf3d/packages/grafana-data/src/datetime/rangeutil.ts#L264)
|
||||
defaultMinInterval := time.Millisecond * 1
|
||||
|
||||
// interval.GetIntervalFrom has two problems (but they do not affect us here):
|
||||
// - it returns the min-interval, so it should be called interval.GetMinIntervalFrom
|
||||
// - it falls back to model.intervalMs. it should not, because that one is the real final
|
||||
// interval-value calculated by the browser. but, in this specific case (old-alert),
|
||||
// that value is not set, so the fallback never happens.
|
||||
minInterval, err := interval.GetIntervalFrom(dsInfo, model, defaultMinInterval)
|
||||
|
||||
if err != nil {
|
||||
return time.Duration(0), err
|
||||
}
|
||||
|
||||
calc := interval.NewCalculator()
|
||||
|
||||
intvl := calc.Calculate(timeRange, minInterval)
|
||||
|
||||
return intvl.Value, nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
)
|
||||
|
||||
// stateKey is a vendored migrationStore.stateKey.
|
||||
var stateKey = "stateKey"
|
||||
|
||||
// CreatedFoldersMigration moves the record of created folders during legacy migration from Dashboard created_by=-8
|
||||
// to the kvstore. If there are no dashboards with created_by=-.8, then nothing needs to be done.
|
||||
func CreatedFoldersMigration(mg *migrator.Migrator) {
|
||||
mg.AddMigration("migrate record of created folders during legacy migration to kvstore", &createdFoldersToKVStore{})
|
||||
}
|
||||
|
||||
type createdFoldersToKVStore struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (c createdFoldersToKVStore) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
func (c createdFoldersToKVStore) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
var results []struct {
|
||||
UID string `xorm:"uid"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
}
|
||||
folderCreatedBy := -8
|
||||
if err := sess.SQL("select * from dashboard where created_by = ?", folderCreatedBy).Find(&results); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
mg.Logger.Debug("no dashboards with created_by=-8, nothing to set in kvstore")
|
||||
return nil
|
||||
}
|
||||
|
||||
type orgMigrationState struct {
|
||||
OrgID int64 `json:"orgId"`
|
||||
CreatedFolders []string `json:"createdFolders"`
|
||||
}
|
||||
states := make(map[int64]*orgMigrationState)
|
||||
for _, r := range results {
|
||||
if _, ok := states[r.OrgID]; !ok {
|
||||
states[r.OrgID] = &orgMigrationState{
|
||||
OrgID: r.OrgID,
|
||||
CreatedFolders: []string{},
|
||||
}
|
||||
}
|
||||
states[r.OrgID].CreatedFolders = append(states[r.OrgID].CreatedFolders, r.UID)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, state := range states {
|
||||
raw, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orgId := state.OrgID
|
||||
entry := kvStoreV1Entry{
|
||||
OrgID: &orgId,
|
||||
Namespace: &KVNamespace,
|
||||
Key: &stateKey,
|
||||
Value: string(raw),
|
||||
Created: now,
|
||||
Updated: now,
|
||||
}
|
||||
if _, errCreate := sess.Table("kv_store").Insert(&entry); errCreate != nil {
|
||||
mg.Logger.Error("failed to insert record of created folders to kvstore", "err", errCreate)
|
||||
return fmt.Errorf("failed to insert record of created folders to kvstore: %w", errCreate)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
130
pkg/services/sqlstore/migrations/ualert/dash_alert.go
Normal file
130
pkg/services/sqlstore/migrations/ualert/dash_alert.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type dashAlert struct {
|
||||
Id int64
|
||||
OrgId int64
|
||||
DashboardId int64
|
||||
PanelId int64
|
||||
Name string
|
||||
Message string
|
||||
Frequency int64
|
||||
For time.Duration
|
||||
State string
|
||||
|
||||
Settings json.RawMessage
|
||||
ParsedSettings *dashAlertSettings
|
||||
DashboardUID string // Set from separate call
|
||||
}
|
||||
|
||||
var slurpDashSQL = `
|
||||
SELECT id,
|
||||
org_id,
|
||||
dashboard_id,
|
||||
panel_id,
|
||||
org_id,
|
||||
name,
|
||||
message,
|
||||
frequency,
|
||||
%s,
|
||||
state,
|
||||
settings
|
||||
FROM
|
||||
alert
|
||||
WHERE org_id IN (SELECT id from org)
|
||||
AND dashboard_id IN (SELECT id from dashboard)
|
||||
`
|
||||
|
||||
// slurpDashAlerts loads all alerts from the alert database table into
|
||||
// the dashAlert type. If there are alerts that belong to either organization or dashboard that does not exist, those alerts will not be returned/
|
||||
// Additionally it unmarshals the json settings for the alert into the
|
||||
// ParsedSettings property of the dash alert.
|
||||
func (m *migration) slurpDashAlerts() ([]dashAlert, error) {
|
||||
dashAlerts := []dashAlert{}
|
||||
err := m.sess.SQL(fmt.Sprintf(slurpDashSQL, m.mg.Dialect.Quote("for"))).Find(&dashAlerts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range dashAlerts {
|
||||
err = json.Unmarshal(dashAlerts[i].Settings, &dashAlerts[i].ParsedSettings)
|
||||
if err != nil {
|
||||
da := dashAlerts[i]
|
||||
return nil, fmt.Errorf("failed to parse alert rule ID:%d, name:'%s', orgID:%d: %w", da.Id, da.Name, da.OrgId, err)
|
||||
}
|
||||
}
|
||||
|
||||
return dashAlerts, nil
|
||||
}
|
||||
|
||||
// dashAlertSettings is a type for the JSON that is in the settings field of
|
||||
// the alert table.
|
||||
type dashAlertSettings struct {
|
||||
NoDataState string `json:"noDataState"`
|
||||
ExecutionErrorState string `json:"executionErrorState"`
|
||||
Conditions []dashAlertCondition `json:"conditions"`
|
||||
AlertRuleTags any `json:"alertRuleTags"`
|
||||
Notifications []dashAlertNot `json:"notifications"`
|
||||
}
|
||||
|
||||
// dashAlertNot is the object that represents the Notifications array in
|
||||
// dashAlertSettings
|
||||
type dashAlertNot struct {
|
||||
UID string `json:"uid,omitempty"`
|
||||
ID int64 `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// dashAlertingConditionJSON is like classic.ClassicConditionJSON except that it
|
||||
// includes the model property with the query.
|
||||
type dashAlertCondition struct {
|
||||
Evaluator conditionEvalJSON `json:"evaluator"`
|
||||
|
||||
Operator struct {
|
||||
Type string `json:"type"`
|
||||
} `json:"operator"`
|
||||
|
||||
Query struct {
|
||||
Params []string `json:"params"`
|
||||
DatasourceID int64 `json:"datasourceId"`
|
||||
Model json.RawMessage
|
||||
} `json:"query"`
|
||||
|
||||
Reducer struct {
|
||||
// Params []any `json:"params"` (Unused)
|
||||
Type string `json:"type"`
|
||||
}
|
||||
}
|
||||
|
||||
type conditionEvalJSON struct {
|
||||
Params []float64 `json:"params"`
|
||||
Type string `json:"type"` // e.g. "gt"
|
||||
}
|
||||
|
||||
// slurpDashUIDs returns a map of [orgID, dashboardId] -> dashUID.
|
||||
func (m *migration) slurpDashUIDs() (map[[2]int64]string, error) {
|
||||
dashIDs := []struct {
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
ID int64 `xorm:"id"`
|
||||
UID string `xorm:"uid"`
|
||||
}{}
|
||||
|
||||
err := m.sess.SQL(`SELECT org_id, id, uid FROM dashboard`).Find(&dashIDs)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idToUID := make(map[[2]int64]string, len(dashIDs))
|
||||
|
||||
for _, ds := range dashIDs {
|
||||
idToUID[[2]int64{ds.OrgID, ds.ID}] = ds.UID
|
||||
}
|
||||
|
||||
return idToUID, nil
|
||||
}
|
||||
108
pkg/services/sqlstore/migrations/ualert/dashboard.go
Normal file
108
pkg/services/sqlstore/migrations/ualert/dashboard.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/slugify"
|
||||
)
|
||||
|
||||
type dashboard struct {
|
||||
Id int64
|
||||
Uid string
|
||||
Slug string
|
||||
OrgId int64
|
||||
GnetId int64
|
||||
Version int
|
||||
PluginId string
|
||||
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
|
||||
UpdatedBy int64
|
||||
CreatedBy int64
|
||||
FolderId int64
|
||||
IsFolder bool
|
||||
HasACL bool `xorm:"has_acl"`
|
||||
|
||||
Title string
|
||||
Data *simplejson.Json
|
||||
}
|
||||
|
||||
func (d *dashboard) setUid(uid string) {
|
||||
d.Uid = uid
|
||||
d.Data.Set("uid", uid)
|
||||
}
|
||||
|
||||
func (d *dashboard) setVersion(version int) {
|
||||
d.Version = version
|
||||
d.Data.Set("version", version)
|
||||
}
|
||||
|
||||
// UpdateSlug updates the slug
|
||||
func (d *dashboard) updateSlug() {
|
||||
title := d.Data.Get("title").MustString()
|
||||
d.Slug = slugify.Slugify(title)
|
||||
}
|
||||
|
||||
func newDashboardFromJson(data *simplejson.Json) *dashboard {
|
||||
dash := &dashboard{}
|
||||
dash.Data = data
|
||||
dash.Title = dash.Data.Get("title").MustString()
|
||||
dash.updateSlug()
|
||||
update := false
|
||||
|
||||
if id, err := dash.Data.Get("id").Float64(); err == nil {
|
||||
dash.Id = int64(id)
|
||||
update = true
|
||||
}
|
||||
|
||||
if uid, err := dash.Data.Get("uid").String(); err == nil {
|
||||
dash.Uid = uid
|
||||
update = true
|
||||
}
|
||||
|
||||
if version, err := dash.Data.Get("version").Float64(); err == nil && update {
|
||||
dash.Version = int(version)
|
||||
dash.Updated = time.Now()
|
||||
} else {
|
||||
dash.Data.Set("version", 0)
|
||||
dash.Created = time.Now()
|
||||
dash.Updated = time.Now()
|
||||
}
|
||||
|
||||
if gnetId, err := dash.Data.Get("gnetId").Float64(); err == nil {
|
||||
dash.GnetId = int64(gnetId)
|
||||
}
|
||||
|
||||
return dash
|
||||
}
|
||||
|
||||
type saveFolderCommand struct {
|
||||
Dashboard *simplejson.Json `json:"dashboard" binding:"Required"`
|
||||
UserId int64 `json:"userId"`
|
||||
Message string `json:"message"`
|
||||
OrgId int64 `json:"-"`
|
||||
RestoredFrom int `json:"-"`
|
||||
PluginId string `json:"-"`
|
||||
FolderId int64 `json:"folderId"`
|
||||
IsFolder bool `json:"isFolder"`
|
||||
}
|
||||
|
||||
// GetDashboardModel turns the command into the saveable model
|
||||
func (cmd *saveFolderCommand) getDashboardModel() *dashboard {
|
||||
dash := newDashboardFromJson(cmd.Dashboard)
|
||||
userId := cmd.UserId
|
||||
|
||||
if userId == 0 {
|
||||
userId = -1
|
||||
}
|
||||
|
||||
dash.UpdatedBy = userId
|
||||
dash.OrgId = cmd.OrgId
|
||||
dash.PluginId = cmd.PluginId
|
||||
dash.IsFolder = cmd.IsFolder
|
||||
dash.FolderId = cmd.FolderId
|
||||
dash.updateSlug()
|
||||
return dash
|
||||
}
|
||||
31
pkg/services/sqlstore/migrations/ualert/datasource.go
Normal file
31
pkg/services/sqlstore/migrations/ualert/datasource.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package ualert
|
||||
|
||||
type dsUIDLookup map[[2]int64]string
|
||||
|
||||
// GetUID fetch thes datasource UID based on orgID+datasourceID
|
||||
func (d dsUIDLookup) GetUID(orgID, datasourceID int64) string {
|
||||
return d[[2]int64{orgID, datasourceID}]
|
||||
}
|
||||
|
||||
// slurpDSIDs returns a map of [orgID, dataSourceId] -> UID.
|
||||
func (m *migration) slurpDSIDs() (dsUIDLookup, error) {
|
||||
dsIDs := []struct {
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
ID int64 `xorm:"id"`
|
||||
UID string `xorm:"uid"`
|
||||
}{}
|
||||
|
||||
err := m.sess.SQL(`SELECT org_id, id, uid FROM data_source`).Find(&dsIDs)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idToUID := make(dsUIDLookup, len(dsIDs))
|
||||
|
||||
for _, ds := range dsIDs {
|
||||
idToUID[[2]int64{ds.OrgID, ds.ID}] = ds.UID
|
||||
}
|
||||
|
||||
return idToUID, nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
)
|
||||
|
||||
// KVNamespace is a vendored migration.KVNamespace.
|
||||
var KVNamespace = "ngalert.migration"
|
||||
|
||||
// migratedKey is a vendored migration.migratedKey.
|
||||
var migratedKey = "migrated"
|
||||
|
||||
// MigrationServiceMigration moves the legacy alert migration status from the migration log to kvstore.
|
||||
func MigrationServiceMigration(mg *migrator.Migrator) {
|
||||
mg.AddMigration("set legacy alert migration status in kvstore", &migrationLogToKVStore{})
|
||||
}
|
||||
|
||||
type migrationLogToKVStore struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (c migrationLogToKVStore) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
func (c migrationLogToKVStore) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
_, migrationRun := logs[migTitle]
|
||||
|
||||
var anyOrg int64 = 0
|
||||
now := time.Now()
|
||||
entry := kvStoreV1Entry{
|
||||
OrgID: &anyOrg,
|
||||
Namespace: &KVNamespace,
|
||||
Key: &migratedKey,
|
||||
Value: strconv.FormatBool(migrationRun),
|
||||
Created: now,
|
||||
Updated: now,
|
||||
}
|
||||
if _, errCreate := sess.Table("kv_store").Insert(&entry); errCreate != nil {
|
||||
mg.Logger.Error("failed to insert migration status to kvstore", "err", errCreate)
|
||||
return fmt.Errorf("failed to insert migration status to kvstore: %w", errCreate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kvStoreV1Entry is a vendored kvstore.Item.
|
||||
type kvStoreV1Entry struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
OrgID *int64 `xorm:"org_id"`
|
||||
Namespace *string `xorm:"namespace"`
|
||||
Key *string `xorm:"key"`
|
||||
Value string `xorm:"value"`
|
||||
|
||||
Created time.Time `xorm:"created"`
|
||||
Updated time.Time `xorm:"updated"`
|
||||
}
|
||||
847
pkg/services/sqlstore/migrations/ualert/migration_test.go
Normal file
847
pkg/services/sqlstore/migrations/ualert/migration_test.go
Normal file
@@ -0,0 +1,847 @@
|
||||
package ualert_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/ini.v1"
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/alerting/models"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
ngModels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
"github.com/grafana/grafana/pkg/services/org"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrations"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrations/ualert"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
// TestAddDashAlertMigration tests the AddDashAlertMigration wrapper method that decides when to run the migration based on migration status and settings.
|
||||
func TestAddDashAlertMigration(t *testing.T) {
|
||||
x := setupTestDB(t)
|
||||
|
||||
tc := []struct {
|
||||
name string
|
||||
config *setting.Cfg
|
||||
isMigrationRun bool
|
||||
shouldPanic bool
|
||||
expected []string // set of migration titles
|
||||
}{
|
||||
{
|
||||
name: "when unified alerting enabled and migration not already run, then add main migration and clear rmMigration log entry",
|
||||
config: &setting.Cfg{
|
||||
UnifiedAlerting: setting.UnifiedAlertingSettings{
|
||||
Enabled: boolPointer(true),
|
||||
},
|
||||
},
|
||||
isMigrationRun: false,
|
||||
expected: []string{fmt.Sprintf(ualert.ClearMigrationEntryTitle, ualert.RmMigTitle), ualert.MigTitle},
|
||||
},
|
||||
{
|
||||
name: "when unified alerting disabled and migration is already run, then add rmMigration and clear main migration log entry",
|
||||
config: &setting.Cfg{
|
||||
UnifiedAlerting: setting.UnifiedAlertingSettings{
|
||||
Enabled: boolPointer(false),
|
||||
},
|
||||
ForceMigration: true,
|
||||
},
|
||||
isMigrationRun: true,
|
||||
expected: []string{fmt.Sprintf(ualert.ClearMigrationEntryTitle, ualert.MigTitle), ualert.RmMigTitle},
|
||||
},
|
||||
{
|
||||
name: "when unified alerting disabled, migration is already run and force migration is disabled, then the migration should panic",
|
||||
config: &setting.Cfg{
|
||||
UnifiedAlerting: setting.UnifiedAlertingSettings{
|
||||
Enabled: boolPointer(false),
|
||||
},
|
||||
ForceMigration: false,
|
||||
},
|
||||
isMigrationRun: true,
|
||||
expected: []string{fmt.Sprintf(ualert.ClearMigrationEntryTitle, ualert.MigTitle), ualert.RmMigTitle},
|
||||
},
|
||||
{
|
||||
name: "when unified alerting enabled and migration is already run, then do nothing",
|
||||
config: &setting.Cfg{
|
||||
UnifiedAlerting: setting.UnifiedAlertingSettings{
|
||||
Enabled: boolPointer(true),
|
||||
},
|
||||
},
|
||||
isMigrationRun: true,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "when unified alerting disabled and migration is not already run, then do nothing",
|
||||
config: &setting.Cfg{
|
||||
UnifiedAlerting: setting.UnifiedAlertingSettings{
|
||||
Enabled: boolPointer(false),
|
||||
},
|
||||
},
|
||||
isMigrationRun: false,
|
||||
expected: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
// if the code should panic, make sure it has
|
||||
if r := recover(); r == nil && tt.shouldPanic {
|
||||
t.Errorf("The code did not panic")
|
||||
}
|
||||
}()
|
||||
if tt.isMigrationRun {
|
||||
log := migrator.MigrationLog{
|
||||
MigrationID: ualert.MigTitle,
|
||||
SQL: "",
|
||||
Timestamp: time.Now(),
|
||||
Success: true,
|
||||
}
|
||||
_, err := x.Insert(log)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
_, err := x.Exec("DELETE FROM migration_log WHERE migration_id = ?", ualert.MigTitle)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
mg := migrator.NewMigrator(x, tt.config)
|
||||
|
||||
ualert.AddDashAlertMigration(mg)
|
||||
require.Equal(t, tt.expected, mg.GetMigrationIDs(false))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAMConfigMigration tests the execution of the main DashAlertMigration specifically for migrations of channels and routes.
|
||||
func TestAMConfigMigration(t *testing.T) {
|
||||
// Run initial migration to have a working DB.
|
||||
x := setupTestDB(t)
|
||||
|
||||
tc := []struct {
|
||||
name string
|
||||
legacyChannels []*models.AlertNotification
|
||||
alerts []*models.Alert
|
||||
|
||||
expected map[int64]*ualert.PostableUserConfig
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
name: "general multi-org, multi-alert, multi-channel migration",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier2", "slack", slackSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier3", "opsgenie", opsgenieSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier4", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier5", "slack", slackSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier6", "opsgenie", opsgenieSettings, true), // default
|
||||
},
|
||||
alerts: []*models.Alert{
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert1", []string{"notifier1"}),
|
||||
createAlert(t, int64(1), int64(1), int64(2), "alert2", []string{"notifier2", "notifier3"}),
|
||||
createAlert(t, int64(1), int64(2), int64(3), "alert3", []string{"notifier3"}),
|
||||
createAlert(t, int64(2), int64(3), int64(1), "alert4", []string{"notifier4"}),
|
||||
createAlert(t, int64(2), int64(3), int64(2), "alert5", []string{"notifier4", "notifier5", "notifier6"}),
|
||||
createAlert(t, int64(2), int64(4), int64(3), "alert6", []string{}),
|
||||
},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier2", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier2".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier3", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier3".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: nil,
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "notifier2", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier2", Type: "slack"}}},
|
||||
{Name: "notifier3", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier3", Type: "opsgenie"}}},
|
||||
{Name: "autogen-contact-point-default"}, // empty default
|
||||
},
|
||||
},
|
||||
},
|
||||
int64(2): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "notifier6",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier4", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier4".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier5", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier5".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier6", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier6".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: durationPointer(ualert.DisabledRepeatInterval),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier4", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier4", Type: "email"}}},
|
||||
{Name: "notifier5", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier5", Type: "slack"}}},
|
||||
{Name: "notifier6", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier6", Type: "opsgenie"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when no default channel, create empty autogen-contact-point-default",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: nil,
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "autogen-contact-point-default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when single default channel, don't create autogen-contact-point-default",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, true),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "notifier1",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: durationPointer(ualert.DisabledRepeatInterval),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when single default channel with SendReminder, use channel Frequency as RepeatInterval",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotificationWithReminder(t, int64(1), "notifier1", "email", emailSettings, true, true, time.Duration(1)*time.Hour),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "notifier1",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(model.Duration(time.Duration(1) * time.Hour))},
|
||||
},
|
||||
RepeatInterval: durationPointer(model.Duration(time.Duration(1) * time.Hour)),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when multiple default channels, add them to autogen-contact-point-default as well",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, true),
|
||||
createAlertNotification(t, int64(1), "notifier2", "slack", slackSettings, true),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier2", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier2".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: durationPointer(ualert.DisabledRepeatInterval),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "notifier2", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier2", Type: "slack"}}},
|
||||
{Name: "autogen-contact-point-default", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}, {Name: "notifier2", Type: "slack"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when multiple default channels with SendReminder, use minimum channel frequency as RepeatInterval",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotificationWithReminder(t, int64(1), "notifier1", "email", emailSettings, true, true, time.Duration(1)*time.Hour),
|
||||
createAlertNotificationWithReminder(t, int64(1), "notifier2", "slack", slackSettings, true, true, time.Duration(30)*time.Minute),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(model.Duration(time.Duration(1) * time.Hour))},
|
||||
{Receiver: "notifier2", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier2".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(model.Duration(time.Duration(30) * time.Minute))},
|
||||
},
|
||||
RepeatInterval: durationPointer(model.Duration(time.Duration(30) * time.Minute)),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "notifier2", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier2", Type: "slack"}}},
|
||||
{Name: "autogen-contact-point-default", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}, {Name: "notifier2", Type: "slack"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when default channels exist alongside non-default, add only defaults to autogen-contact-point-default",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, true), // default
|
||||
createAlertNotification(t, int64(1), "notifier2", "slack", slackSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier3", "opsgenie", opsgenieSettings, true), // default
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier2", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier2".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier3", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier3".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
RepeatInterval: durationPointer(ualert.DisabledRepeatInterval),
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "notifier2", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier2", Type: "slack"}}},
|
||||
{Name: "notifier3", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier3", Type: "opsgenie"}}},
|
||||
{Name: "autogen-contact-point-default", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}, {Name: "notifier3", Type: "opsgenie"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when alerts share channels, only create one receiver per legacy channel",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier2", "slack", slackSettings, false),
|
||||
},
|
||||
alerts: []*models.Alert{
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert1", []string{"notifier1"}),
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert2", []string{"notifier1", "notifier2"}),
|
||||
},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
{Receiver: "notifier2", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier2".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "notifier2", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier2", Type: "slack"}}},
|
||||
{Name: "autogen-contact-point-default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when channel not linked to any alerts, still create a receiver for it",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "autogen-contact-point-default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when unsupported channels, do not migrate them",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier2", "hipchat", "", false),
|
||||
createAlertNotification(t, int64(1), "notifier3", "sensu", "", false),
|
||||
},
|
||||
alerts: []*models.Alert{},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "autogen-contact-point-default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when unsupported channel linked to alert, do not migrate only that channel",
|
||||
legacyChannels: []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier2", "sensu", "", false),
|
||||
},
|
||||
alerts: []*models.Alert{
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert1", []string{"notifier1", "notifier2"}),
|
||||
},
|
||||
expected: map[int64]*ualert.PostableUserConfig{
|
||||
int64(1): {
|
||||
AlertmanagerConfig: ualert.PostableApiAlertingConfig{
|
||||
Route: &ualert.Route{
|
||||
Receiver: "autogen-contact-point-default",
|
||||
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
|
||||
Routes: []*ualert.Route{
|
||||
{Receiver: "notifier1", ObjectMatchers: ualert.ObjectMatchers{{Type: 2, Name: ualert.ContactLabel, Value: `.*"notifier1".*`}}, Routes: nil, Continue: true, RepeatInterval: durationPointer(ualert.DisabledRepeatInterval)},
|
||||
},
|
||||
},
|
||||
Receivers: []*ualert.PostableApiReceiver{
|
||||
{Name: "notifier1", GrafanaManagedReceivers: []*ualert.PostableGrafanaReceiver{{Name: "notifier1", Type: "email"}}},
|
||||
{Name: "autogen-contact-point-default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer teardown(t, x)
|
||||
setupLegacyAlertsTables(t, x, tt.legacyChannels, tt.alerts)
|
||||
runDashAlertMigrationTestRun(t, x)
|
||||
|
||||
for orgId := range tt.expected {
|
||||
amConfig := getAlertmanagerConfig(t, x, orgId)
|
||||
|
||||
// Order of nested GrafanaManagedReceivers is not guaranteed.
|
||||
cOpt := []cmp.Option{
|
||||
cmpopts.IgnoreUnexported(ualert.PostableApiReceiver{}),
|
||||
cmpopts.IgnoreFields(ualert.PostableGrafanaReceiver{}, "UID", "Settings", "SecureSettings"),
|
||||
cmpopts.SortSlices(func(a, b *ualert.PostableGrafanaReceiver) bool { return a.Name < b.Name }),
|
||||
cmpopts.SortSlices(func(a, b *ualert.PostableApiReceiver) bool { return a.Name < b.Name }),
|
||||
}
|
||||
if !cmp.Equal(tt.expected[orgId].AlertmanagerConfig.Receivers, amConfig.AlertmanagerConfig.Receivers, cOpt...) {
|
||||
t.Errorf("Unexpected Receivers: %v", cmp.Diff(tt.expected[orgId].AlertmanagerConfig.Receivers, amConfig.AlertmanagerConfig.Receivers, cOpt...))
|
||||
}
|
||||
|
||||
// Order of routes is not guaranteed.
|
||||
cOpt = []cmp.Option{
|
||||
cmpopts.SortSlices(func(a, b *ualert.Route) bool {
|
||||
if a.Receiver != b.Receiver {
|
||||
return a.Receiver < b.Receiver
|
||||
}
|
||||
return a.ObjectMatchers[0].Value < b.ObjectMatchers[0].Value
|
||||
}),
|
||||
cmpopts.IgnoreUnexported(ualert.Route{}, labels.Matcher{}),
|
||||
}
|
||||
if !cmp.Equal(tt.expected[orgId].AlertmanagerConfig.Route, amConfig.AlertmanagerConfig.Route, cOpt...) {
|
||||
t.Errorf("Unexpected Route: %v", cmp.Diff(tt.expected[orgId].AlertmanagerConfig.Route, amConfig.AlertmanagerConfig.Route, cOpt...))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDashAlertMigration tests the execution of the main DashAlertMigration specifically for migrations of models.
|
||||
func TestDashAlertMigration(t *testing.T) {
|
||||
// Run initial migration to have a working DB.
|
||||
x := setupTestDB(t)
|
||||
|
||||
t.Run("when DashAlertMigration create ContactLabel on migrated AlertRules", func(t *testing.T) {
|
||||
defer teardown(t, x)
|
||||
legacyChannels := []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier2", "slack", slackSettings, false),
|
||||
createAlertNotification(t, int64(1), "notifier3", "opsgenie", opsgenieSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier4", "email", emailSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier5", "slack", slackSettings, false),
|
||||
createAlertNotification(t, int64(2), "notifier6", "opsgenie", opsgenieSettings, true), // default
|
||||
}
|
||||
alerts := []*models.Alert{
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert1", []string{"notifier1"}),
|
||||
createAlert(t, int64(1), int64(1), int64(2), "alert2", []string{"notifier2", "notifier3"}),
|
||||
createAlert(t, int64(1), int64(2), int64(3), "alert3", []string{"notifier3"}),
|
||||
createAlert(t, int64(2), int64(3), int64(1), "alert4", []string{"notifier4"}),
|
||||
createAlert(t, int64(2), int64(3), int64(2), "alert5", []string{"notifier4", "notifier5", "notifier6"}),
|
||||
createAlert(t, int64(2), int64(4), int64(3), "alert6", []string{}),
|
||||
}
|
||||
expected := map[int64]map[string]*ngModels.AlertRule{
|
||||
int64(1): {
|
||||
"alert1": {Labels: map[string]string{ualert.ContactLabel: `"notifier1"`}},
|
||||
"alert2": {Labels: map[string]string{ualert.ContactLabel: `"notifier2","notifier3"`}},
|
||||
"alert3": {Labels: map[string]string{ualert.ContactLabel: `"notifier3"`}},
|
||||
},
|
||||
int64(2): {
|
||||
"alert4": {Labels: map[string]string{ualert.ContactLabel: `"notifier4","notifier6"`}},
|
||||
"alert5": {Labels: map[string]string{ualert.ContactLabel: `"notifier4","notifier5","notifier6"`}},
|
||||
"alert6": {Labels: map[string]string{}},
|
||||
},
|
||||
}
|
||||
setupLegacyAlertsTables(t, x, legacyChannels, alerts)
|
||||
runDashAlertMigrationTestRun(t, x)
|
||||
|
||||
for orgId := range expected {
|
||||
rules := getAlertRules(t, x, orgId)
|
||||
expectedRulesMap := expected[orgId]
|
||||
require.Len(t, rules, len(expectedRulesMap))
|
||||
for _, r := range rules {
|
||||
require.Equal(t, expectedRulesMap[r.Title].Labels[ualert.ContactLabel], r.Labels[ualert.ContactLabel])
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("when DashAlertMigration create ContactLabel with sanitized name if name contains double quote", func(t *testing.T) {
|
||||
defer teardown(t, x)
|
||||
legacyChannels := []*models.AlertNotification{
|
||||
createAlertNotification(t, int64(1), "notif\"ier1", "email", emailSettings, false),
|
||||
}
|
||||
alerts := []*models.Alert{
|
||||
createAlert(t, int64(1), int64(1), int64(1), "alert1", []string{"notif\"ier1"}),
|
||||
}
|
||||
expected := map[int64]map[string]*ngModels.AlertRule{
|
||||
int64(1): {
|
||||
"alert1": {Labels: map[string]string{ualert.ContactLabel: `"notif_ier1"`}},
|
||||
},
|
||||
}
|
||||
setupLegacyAlertsTables(t, x, legacyChannels, alerts)
|
||||
runDashAlertMigrationTestRun(t, x)
|
||||
|
||||
for orgId := range expected {
|
||||
rules := getAlertRules(t, x, orgId)
|
||||
expectedRulesMap := expected[orgId]
|
||||
require.Len(t, rules, len(expectedRulesMap))
|
||||
for _, r := range rules {
|
||||
require.Equal(t, expectedRulesMap[r.Title].Labels[ualert.ContactLabel], r.Labels[ualert.ContactLabel])
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("when folder is missing put alert in General folder", func(t *testing.T) {
|
||||
o := createOrg(t, 1)
|
||||
folder1 := createDashboard(t, 1, o.ID, "folder-1")
|
||||
folder1.IsFolder = true
|
||||
dash1 := createDashboard(t, 3, o.ID, "dash1")
|
||||
dash1.FolderID = folder1.ID
|
||||
dash2 := createDashboard(t, 4, o.ID, "dash2")
|
||||
dash2.FolderID = 22 // missing folder
|
||||
|
||||
a1 := createAlert(t, o.ID, dash1.ID, int64(1), "alert-1", []string{})
|
||||
a2 := createAlert(t, o.ID, dash2.ID, int64(1), "alert-2", []string{})
|
||||
|
||||
_, err := x.Insert(o, folder1, dash1, dash2, a1, a2)
|
||||
require.NoError(t, err)
|
||||
|
||||
runDashAlertMigrationTestRun(t, x)
|
||||
|
||||
rules := getAlertRules(t, x, o.ID)
|
||||
require.Len(t, rules, 2)
|
||||
|
||||
var generalFolder dashboards.Dashboard
|
||||
_, err = x.Table(&dashboards.Dashboard{}).Where("title = ? AND org_id = ?", ualert.GENERAL_FOLDER, o.ID).Get(&generalFolder)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, generalFolder)
|
||||
|
||||
for _, rule := range rules {
|
||||
var expectedFolder dashboards.Dashboard
|
||||
if rule.Title == a1.Name {
|
||||
expectedFolder = *folder1
|
||||
} else {
|
||||
expectedFolder = generalFolder
|
||||
}
|
||||
require.Equal(t, expectedFolder.UID, rule.NamespaceUID)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
emailSettings = `{"addresses": "test"}`
|
||||
slackSettings = `{"recipient": "test", "token": "test"}`
|
||||
opsgenieSettings = `{"apiKey": "test"}`
|
||||
)
|
||||
|
||||
// setupTestDB prepares the sqlite database and runs OSS migrations to initialize the schemas.
|
||||
func setupTestDB(t *testing.T) *xorm.Engine {
|
||||
t.Helper()
|
||||
testDB := sqlutil.SQLite3TestDB()
|
||||
|
||||
x, err := xorm.NewEngine(testDB.DriverName, testDB.ConnStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = migrator.NewDialect(x.DriverName()).CleanDB(x)
|
||||
require.NoError(t, err)
|
||||
|
||||
mg := migrator.NewMigrator(x, &setting.Cfg{Raw: ini.Empty()})
|
||||
migrations := &migrations.OSSMigrations{}
|
||||
migrations.AddMigration(mg)
|
||||
|
||||
err = mg.Start(false, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
// createAlertNotificationWithReminder creates a legacy alert notification channel for inserting into the test database.
|
||||
func createAlertNotificationWithReminder(t *testing.T, orgId int64, uid string, channelType string, settings string, defaultChannel bool, sendReminder bool, frequency time.Duration) *models.AlertNotification {
|
||||
t.Helper()
|
||||
settingsJson := simplejson.New()
|
||||
if settings != "" {
|
||||
s, err := simplejson.NewJson([]byte(settings))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal alert notification json: %v", err)
|
||||
}
|
||||
settingsJson = s
|
||||
}
|
||||
|
||||
return &models.AlertNotification{
|
||||
OrgID: orgId,
|
||||
UID: uid,
|
||||
Name: uid, // Same as uid to make testing easier.
|
||||
Type: channelType,
|
||||
DisableResolveMessage: false,
|
||||
IsDefault: defaultChannel,
|
||||
Settings: settingsJson,
|
||||
SecureSettings: make(map[string][]byte),
|
||||
Created: now,
|
||||
Updated: now,
|
||||
SendReminder: sendReminder,
|
||||
Frequency: frequency,
|
||||
}
|
||||
}
|
||||
|
||||
// createAlertNotification creates a legacy alert notification channel for inserting into the test database.
|
||||
func createAlertNotification(t *testing.T, orgId int64, uid string, channelType string, settings string, defaultChannel bool) *models.AlertNotification {
|
||||
return createAlertNotificationWithReminder(t, orgId, uid, channelType, settings, defaultChannel, false, time.Duration(0))
|
||||
}
|
||||
|
||||
// createAlert creates a legacy alert rule for inserting into the test database.
|
||||
func createAlert(t *testing.T, orgId int64, dashboardId int64, panelsId int64, name string, notifierUids []string) *models.Alert {
|
||||
t.Helper()
|
||||
|
||||
var settings = simplejson.New()
|
||||
if len(notifierUids) != 0 {
|
||||
notifiers := make([]interface{}, 0)
|
||||
for _, n := range notifierUids {
|
||||
notifiers = append(notifiers, struct {
|
||||
Uid string
|
||||
}{Uid: n})
|
||||
}
|
||||
|
||||
settings.Set("notifications", notifiers)
|
||||
}
|
||||
|
||||
return &models.Alert{
|
||||
OrgID: orgId,
|
||||
DashboardID: dashboardId,
|
||||
PanelID: panelsId,
|
||||
Name: name,
|
||||
Message: "message",
|
||||
Frequency: int64(60),
|
||||
For: time.Duration(time.Duration(60).Seconds()),
|
||||
State: models.AlertStateOK,
|
||||
Settings: settings,
|
||||
NewStateDate: now,
|
||||
Created: now,
|
||||
Updated: now,
|
||||
}
|
||||
}
|
||||
|
||||
// createDashboard creates a dashboard for inserting into the test database.
|
||||
func createDashboard(t *testing.T, id int64, orgId int64, uid string) *dashboards.Dashboard {
|
||||
t.Helper()
|
||||
return &dashboards.Dashboard{
|
||||
ID: id,
|
||||
OrgID: orgId,
|
||||
UID: uid,
|
||||
Created: now,
|
||||
Updated: now,
|
||||
Title: uid, // Not tested, needed to satisfy contraint.
|
||||
}
|
||||
}
|
||||
|
||||
// createDatasource creates a ddatasource for inserting into the test database.
|
||||
func createDatasource(t *testing.T, id int64, orgId int64, uid string) *datasources.DataSource {
|
||||
t.Helper()
|
||||
return &datasources.DataSource{
|
||||
ID: id,
|
||||
OrgID: orgId,
|
||||
UID: uid,
|
||||
Created: now,
|
||||
Updated: now,
|
||||
Name: uid, // Not tested, needed to satisfy contraint.
|
||||
}
|
||||
}
|
||||
|
||||
func createOrg(t *testing.T, id int64) *org.Org {
|
||||
t.Helper()
|
||||
return &org.Org{
|
||||
ID: id,
|
||||
Version: 1,
|
||||
Name: fmt.Sprintf("org_%d", id),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// teardown cleans the input tables between test cases.
|
||||
func teardown(t *testing.T, x *xorm.Engine) {
|
||||
_, err := x.Exec("DELETE from org")
|
||||
require.NoError(t, err)
|
||||
_, err = x.Exec("DELETE from alert")
|
||||
require.NoError(t, err)
|
||||
_, err = x.Exec("DELETE from alert_notification")
|
||||
require.NoError(t, err)
|
||||
_, err = x.Exec("DELETE from dashboard")
|
||||
require.NoError(t, err)
|
||||
_, err = x.Exec("DELETE from data_source")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// setupDashAlertMigrationTestRun runs DashAlertMigration for a new test run.
|
||||
func runDashAlertMigrationTestRun(t *testing.T, x *xorm.Engine) {
|
||||
_, errDeleteMig := x.Exec("DELETE FROM migration_log WHERE migration_id = ?", ualert.MigTitle)
|
||||
require.NoError(t, errDeleteMig)
|
||||
|
||||
alertMigrator := migrator.NewMigrator(x, &setting.Cfg{})
|
||||
alertMigrator.AddMigration(ualert.RmMigTitle, &ualert.RmMigration{})
|
||||
ualert.AddDashAlertMigration(alertMigrator)
|
||||
|
||||
errRunningMig := alertMigrator.Start(false, 0)
|
||||
require.NoError(t, errRunningMig)
|
||||
}
|
||||
|
||||
// setupLegacyAlertsTables inserts data into the legacy alerting tables that is needed for testing the migration.
|
||||
func setupLegacyAlertsTables(t *testing.T, x *xorm.Engine, legacyChannels []*models.AlertNotification, alerts []*models.Alert) {
|
||||
t.Helper()
|
||||
|
||||
orgs := []org.Org{
|
||||
*createOrg(t, 1),
|
||||
*createOrg(t, 2),
|
||||
}
|
||||
|
||||
// Setup dashboards.
|
||||
dashboards := []dashboards.Dashboard{
|
||||
*createDashboard(t, 1, 1, "dash1-1"),
|
||||
*createDashboard(t, 2, 1, "dash2-1"),
|
||||
*createDashboard(t, 3, 2, "dash3-2"),
|
||||
*createDashboard(t, 4, 2, "dash4-2"),
|
||||
}
|
||||
_, errDashboards := x.Insert(dashboards)
|
||||
require.NoError(t, errDashboards)
|
||||
|
||||
// Setup data_sources.
|
||||
dataSources := []datasources.DataSource{
|
||||
*createDatasource(t, 1, 1, "ds1-1"),
|
||||
*createDatasource(t, 2, 1, "ds2-1"),
|
||||
*createDatasource(t, 3, 2, "ds3-2"),
|
||||
*createDatasource(t, 4, 2, "ds4-2"),
|
||||
}
|
||||
|
||||
_, errOrgs := x.Insert(orgs)
|
||||
require.NoError(t, errOrgs)
|
||||
|
||||
_, errDataSourcess := x.Insert(dataSources)
|
||||
require.NoError(t, errDataSourcess)
|
||||
|
||||
if len(legacyChannels) > 0 {
|
||||
_, channelErr := x.Insert(legacyChannels)
|
||||
require.NoError(t, channelErr)
|
||||
}
|
||||
|
||||
if len(alerts) > 0 {
|
||||
_, alertErr := x.Insert(alerts)
|
||||
require.NoError(t, alertErr)
|
||||
}
|
||||
}
|
||||
|
||||
// getAlertmanagerConfig retreives the Alertmanager Config from the database for a given orgId.
|
||||
func getAlertmanagerConfig(t *testing.T, x *xorm.Engine, orgId int64) *ualert.PostableUserConfig {
|
||||
amConfig := ""
|
||||
_, err := x.Table("alert_configuration").Where("org_id = ?", orgId).Cols("alertmanager_configuration").Get(&amConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
config := ualert.PostableUserConfig{}
|
||||
err = json.Unmarshal([]byte(amConfig), &config)
|
||||
require.NoError(t, err)
|
||||
return &config
|
||||
}
|
||||
|
||||
// getAlertmanagerConfig retreives the Alertmanager Config from the database for a given orgId.
|
||||
func getAlertRules(t *testing.T, x *xorm.Engine, orgId int64) []*ngModels.AlertRule {
|
||||
rules := make([]*ngModels.AlertRule, 0)
|
||||
err := x.Table("alert_rule").Where("org_id = ?", orgId).Find(&rules)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rules
|
||||
}
|
||||
|
||||
func boolPointer(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func durationPointer(d model.Duration) *model.Duration {
|
||||
return &d
|
||||
}
|
||||
284
pkg/services/sqlstore/migrations/ualert/permissions.go
Normal file
284
pkg/services/sqlstore/migrations/ualert/permissions.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/metrics"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
dashver "github.com/grafana/grafana/pkg/services/dashboardversion"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
type roleType string
|
||||
|
||||
const (
|
||||
RoleNone roleType = "None"
|
||||
RoleViewer roleType = "Viewer"
|
||||
RoleEditor roleType = "Editor"
|
||||
RoleAdmin roleType = "Admin"
|
||||
)
|
||||
|
||||
func (r roleType) IsValid() bool {
|
||||
return r == RoleViewer || r == RoleAdmin || r == RoleEditor || r == RoleNone
|
||||
}
|
||||
|
||||
type permissionType int
|
||||
|
||||
type dashboardACL struct {
|
||||
// nolint:stylecheck
|
||||
Id int64
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
DashboardID int64 `xorm:"dashboard_id"`
|
||||
|
||||
UserID int64 `xorm:"user_id"`
|
||||
TeamID int64 `xorm:"team_id"`
|
||||
Role *roleType // pointer to be nullable
|
||||
Permission permissionType
|
||||
|
||||
Created time.Time
|
||||
Updated time.Time
|
||||
}
|
||||
|
||||
func (p dashboardACL) TableName() string { return "dashboard_acl" }
|
||||
|
||||
type folderHelper struct {
|
||||
sess *xorm.Session
|
||||
mg *migrator.Migrator
|
||||
}
|
||||
|
||||
// getOrCreateGeneralFolder returns the general folder under the specific organisation
|
||||
// If the general folder does not exist it creates it.
|
||||
func (m *folderHelper) getOrCreateGeneralFolder(orgID int64) (*dashboard, error) {
|
||||
// there is a unique constraint on org_id, folder_id, title
|
||||
// there are no nested folders so the parent folder id is always 0
|
||||
dashboard := dashboard{OrgId: orgID, FolderId: 0, Title: GENERAL_FOLDER}
|
||||
has, err := m.sess.Get(&dashboard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
// create folder
|
||||
return m.createGeneralFolder(orgID)
|
||||
}
|
||||
return &dashboard, nil
|
||||
}
|
||||
|
||||
func (m *folderHelper) createGeneralFolder(orgID int64) (*dashboard, error) {
|
||||
return m.createFolder(orgID, GENERAL_FOLDER)
|
||||
}
|
||||
|
||||
// returns the folder of the given dashboard (if exists)
|
||||
func (m *folderHelper) getFolder(dash dashboard, da dashAlert) (dashboard, error) {
|
||||
// get folder if exists
|
||||
folder := dashboard{}
|
||||
if dash.FolderId > 0 {
|
||||
exists, err := m.sess.Where("id=?", dash.FolderId).Get(&folder)
|
||||
if err != nil {
|
||||
return folder, fmt.Errorf("failed to get folder %d: %w", dash.FolderId, err)
|
||||
}
|
||||
if !exists {
|
||||
return folder, fmt.Errorf("folder with id %v not found", dash.FolderId)
|
||||
}
|
||||
if !folder.IsFolder {
|
||||
return folder, fmt.Errorf("id %v is a dashboard not a folder", dash.FolderId)
|
||||
}
|
||||
}
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// based on sqlstore.saveDashboard()
|
||||
// it should be called from inside a transaction
|
||||
func (m *folderHelper) createFolder(orgID int64, title string) (*dashboard, error) {
|
||||
cmd := saveFolderCommand{
|
||||
OrgId: orgID,
|
||||
FolderId: 0,
|
||||
IsFolder: true,
|
||||
Dashboard: simplejson.NewFromAny(map[string]any{
|
||||
"title": title,
|
||||
}),
|
||||
}
|
||||
dash := cmd.getDashboardModel()
|
||||
dash.setUid(util.GenerateShortUID())
|
||||
|
||||
parentVersion := dash.Version
|
||||
dash.setVersion(1)
|
||||
dash.Created = time.Now()
|
||||
dash.CreatedBy = FOLDER_CREATED_BY
|
||||
dash.Updated = time.Now()
|
||||
dash.UpdatedBy = FOLDER_CREATED_BY
|
||||
metrics.MApiDashboardInsert.Inc()
|
||||
|
||||
if _, err := m.sess.Insert(dash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dashVersion := &dashver.DashboardVersion{
|
||||
DashboardID: dash.Id,
|
||||
ParentVersion: parentVersion,
|
||||
RestoredFrom: cmd.RestoredFrom,
|
||||
Version: dash.Version,
|
||||
Created: time.Now(),
|
||||
CreatedBy: dash.UpdatedBy,
|
||||
Message: cmd.Message,
|
||||
Data: dash.Data,
|
||||
}
|
||||
|
||||
// insert version entry
|
||||
if _, err := m.sess.Insert(dashVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dash, nil
|
||||
}
|
||||
|
||||
// based on SQLStore.UpdateDashboardACL()
|
||||
// it should be called from inside a transaction
|
||||
func (m *folderHelper) setACL(orgID int64, dashboardID int64, items []*dashboardACL) error {
|
||||
if dashboardID <= 0 {
|
||||
return fmt.Errorf("folder id must be greater than zero for a folder permission")
|
||||
}
|
||||
|
||||
// userPermissionsMap is a map keeping the highest permission per user
|
||||
// for handling conficting inherited (folder) and non-inherited (dashboard) user permissions
|
||||
userPermissionsMap := make(map[int64]*dashboardACL, len(items))
|
||||
// teamPermissionsMap is a map keeping the highest permission per team
|
||||
// for handling conficting inherited (folder) and non-inherited (dashboard) team permissions
|
||||
teamPermissionsMap := make(map[int64]*dashboardACL, len(items))
|
||||
for _, item := range items {
|
||||
if item.UserID != 0 {
|
||||
acl, ok := userPermissionsMap[item.UserID]
|
||||
if !ok {
|
||||
userPermissionsMap[item.UserID] = item
|
||||
} else {
|
||||
if item.Permission > acl.Permission {
|
||||
// the higher permission wins
|
||||
userPermissionsMap[item.UserID] = item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if item.TeamID != 0 {
|
||||
acl, ok := teamPermissionsMap[item.TeamID]
|
||||
if !ok {
|
||||
teamPermissionsMap[item.TeamID] = item
|
||||
} else {
|
||||
if item.Permission > acl.Permission {
|
||||
// the higher permission wins
|
||||
teamPermissionsMap[item.TeamID] = item
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type keyType struct {
|
||||
UserID int64 `xorm:"user_id"`
|
||||
TeamID int64 `xorm:"team_id"`
|
||||
Role roleType
|
||||
Permission permissionType
|
||||
}
|
||||
// seen keeps track of inserted perrmissions to avoid duplicates (due to inheritance)
|
||||
seen := make(map[keyType]struct{}, len(items))
|
||||
for _, item := range items {
|
||||
if item.UserID == 0 && item.TeamID == 0 && (item.Role == nil || !item.Role.IsValid()) {
|
||||
return dashboards.ErrDashboardACLInfoMissing
|
||||
}
|
||||
|
||||
// ignore duplicate user permissions
|
||||
if item.UserID != 0 {
|
||||
acl, ok := userPermissionsMap[item.UserID]
|
||||
if ok {
|
||||
if acl.Id != item.Id {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ignore duplicate team permissions
|
||||
if item.TeamID != 0 {
|
||||
acl, ok := teamPermissionsMap[item.TeamID]
|
||||
if ok {
|
||||
if acl.Id != item.Id {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
key := keyType{UserID: item.UserID, TeamID: item.TeamID, Role: "", Permission: item.Permission}
|
||||
if item.Role != nil {
|
||||
key.Role = *item.Role
|
||||
}
|
||||
if _, ok := seen[key]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// unset Id so that the new record will get a different one
|
||||
item.Id = 0
|
||||
item.OrgID = orgID
|
||||
item.DashboardID = dashboardID
|
||||
item.Created = time.Now()
|
||||
item.Updated = time.Now()
|
||||
|
||||
m.sess.Nullable("user_id", "team_id")
|
||||
if _, err := m.sess.Insert(item); err != nil {
|
||||
return err
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
}
|
||||
|
||||
// Update dashboard HasACL flag
|
||||
dashboard := dashboards.Dashboard{HasACL: true}
|
||||
_, err := m.sess.Cols("has_acl").Where("id=?", dashboardID).Update(&dashboard)
|
||||
return err
|
||||
}
|
||||
|
||||
// based on SQLStore.GetDashboardACLInfoList()
|
||||
func (m *folderHelper) getACL(orgID, dashboardID int64) ([]*dashboardACL, error) {
|
||||
var err error
|
||||
|
||||
falseStr := m.mg.Dialect.BooleanStr(false)
|
||||
|
||||
result := make([]*dashboardACL, 0)
|
||||
rawSQL := `
|
||||
-- get distinct permissions for the dashboard and its parent folder
|
||||
SELECT DISTINCT
|
||||
da.id,
|
||||
da.user_id,
|
||||
da.team_id,
|
||||
da.permission,
|
||||
da.role
|
||||
FROM dashboard as d
|
||||
LEFT JOIN dashboard folder on folder.id = d.folder_id
|
||||
LEFT JOIN dashboard_acl AS da ON
|
||||
da.dashboard_id = d.id OR
|
||||
da.dashboard_id = d.folder_id OR
|
||||
(
|
||||
-- include default permissions --
|
||||
da.org_id = -1 AND (
|
||||
(folder.id IS NOT NULL AND folder.has_acl = ` + falseStr + `) OR
|
||||
(folder.id IS NULL AND d.has_acl = ` + falseStr + `)
|
||||
)
|
||||
)
|
||||
WHERE d.org_id = ? AND d.id = ? AND da.id IS NOT NULL
|
||||
ORDER BY da.id ASC
|
||||
`
|
||||
err = m.sess.SQL(rawSQL, orgID, dashboardID).Find(&result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// getOrgsThatHaveFolders returns a unique list of organization ID that have at least one folder
|
||||
func (m *folderHelper) getOrgsIDThatHaveFolders() (map[int64]struct{}, error) {
|
||||
// get folder if exists
|
||||
var rows []int64
|
||||
err := m.sess.Table(&dashboard{}).Where("is_folder=?", true).Distinct("org_id").Find(&rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make(map[int64]struct{}, len(rows))
|
||||
for _, s := range rows {
|
||||
result[s] = struct{}{}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"xorm.io/xorm"
|
||||
|
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
)
|
||||
|
||||
// UpdateRuleGroupIndexMigration updates a new field rule_group_index for alert rules that belong to a group with more than 1 alert.
|
||||
func UpdateRuleGroupIndexMigration(mg *migrator.Migrator) {
|
||||
mg.AddMigration("update group index for alert rules", &updateRulesOrderInGroup{})
|
||||
}
|
||||
|
||||
type updateRulesOrderInGroup struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (c updateRulesOrderInGroup) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
func (c updateRulesOrderInGroup) Exec(sess *xorm.Session, migrator *migrator.Migrator) error {
|
||||
var rows []*alertRule
|
||||
if err := sess.Table(alertRule{}).Asc("id").Find(&rows); err != nil {
|
||||
return fmt.Errorf("failed to read the list of alert rules: %w", err)
|
||||
}
|
||||
|
||||
if len(rows) == 0 {
|
||||
migrator.Logger.Debug("No rules to migrate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
groups := map[ngmodels.AlertRuleGroupKey][]*alertRule{}
|
||||
|
||||
for _, row := range rows {
|
||||
groupKey := ngmodels.AlertRuleGroupKey{
|
||||
OrgID: row.OrgID,
|
||||
NamespaceUID: row.NamespaceUID,
|
||||
RuleGroup: row.RuleGroup,
|
||||
}
|
||||
groups[groupKey] = append(groups[groupKey], row)
|
||||
}
|
||||
|
||||
toUpdate := make([]*alertRule, 0, len(rows))
|
||||
|
||||
for _, rules := range groups {
|
||||
for i, rule := range rules {
|
||||
if rule.RuleGroupIndex == i+1 {
|
||||
continue
|
||||
}
|
||||
rule.RuleGroupIndex = i + 1
|
||||
toUpdate = append(toUpdate, rule)
|
||||
}
|
||||
}
|
||||
|
||||
if len(toUpdate) == 0 {
|
||||
migrator.Logger.Debug("No rules to upgrade group index")
|
||||
return nil
|
||||
}
|
||||
|
||||
updated := time.Now()
|
||||
versions := make([]interface{}, 0, len(toUpdate))
|
||||
|
||||
for _, rule := range toUpdate {
|
||||
rule.Updated = updated
|
||||
version := rule.makeVersion()
|
||||
version.Version = rule.Version + 1
|
||||
version.ParentVersion = rule.Version
|
||||
rule.Version++
|
||||
_, err := sess.ID(rule.ID).Cols("version", "updated", "rule_group_idx").Update(rule)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("failed to update alert rule", "uid", rule.UID, "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
migrator.Logger.Debug("updated group index for alert rule", "rule_uid", rule.UID)
|
||||
versions = append(versions, version)
|
||||
}
|
||||
_, err := sess.Insert(versions...)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("failed to insert changes to alert_rule_version", "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type alertRule struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
Title string
|
||||
Condition string
|
||||
Data []alertQuery
|
||||
IntervalSeconds int64
|
||||
Version int64
|
||||
UID string `xorm:"uid"`
|
||||
NamespaceUID string `xorm:"namespace_uid"`
|
||||
RuleGroup string
|
||||
RuleGroupIndex int `xorm:"rule_group_idx"`
|
||||
NoDataState string
|
||||
ExecErrState string
|
||||
For duration
|
||||
Updated time.Time
|
||||
Annotations map[string]string
|
||||
Labels map[string]string
|
||||
IsPaused bool
|
||||
}
|
||||
|
||||
type alertRuleVersion struct {
|
||||
RuleOrgID int64 `xorm:"rule_org_id"`
|
||||
RuleUID string `xorm:"rule_uid"`
|
||||
RuleNamespaceUID string `xorm:"rule_namespace_uid"`
|
||||
RuleGroup string
|
||||
RuleGroupIndex int `xorm:"rule_group_idx"`
|
||||
ParentVersion int64
|
||||
RestoredFrom int64
|
||||
Version int64
|
||||
|
||||
Created time.Time
|
||||
Title string
|
||||
Condition string
|
||||
Data []alertQuery
|
||||
IntervalSeconds int64
|
||||
NoDataState string
|
||||
ExecErrState string
|
||||
// ideally this field should have been apimodels.ApiDuration
|
||||
// but this is currently not possible because of circular dependencies
|
||||
For duration
|
||||
Annotations map[string]string
|
||||
Labels map[string]string
|
||||
IsPaused bool
|
||||
}
|
||||
|
||||
func (a *alertRule) makeVersion() *alertRuleVersion {
|
||||
return &alertRuleVersion{
|
||||
RuleOrgID: a.OrgID,
|
||||
RuleUID: a.UID,
|
||||
RuleNamespaceUID: a.NamespaceUID,
|
||||
RuleGroup: a.RuleGroup,
|
||||
RuleGroupIndex: a.RuleGroupIndex,
|
||||
ParentVersion: 0,
|
||||
RestoredFrom: 0,
|
||||
Version: 1,
|
||||
|
||||
Created: time.Now().UTC(),
|
||||
Title: a.Title,
|
||||
Condition: a.Condition,
|
||||
Data: a.Data,
|
||||
IntervalSeconds: a.IntervalSeconds,
|
||||
NoDataState: a.NoDataState,
|
||||
ExecErrState: a.ExecErrState,
|
||||
For: a.For,
|
||||
Annotations: a.Annotations,
|
||||
Labels: map[string]string{},
|
||||
IsPaused: a.IsPaused,
|
||||
}
|
||||
}
|
||||
|
||||
type alertQuery struct {
|
||||
// RefID is the unique identifier of the query, set by the frontend call.
|
||||
RefID string `json:"refId"`
|
||||
|
||||
// QueryType is an optional identifier for the type of query.
|
||||
// It can be used to distinguish different types of queries.
|
||||
QueryType string `json:"queryType"`
|
||||
|
||||
// RelativeTimeRange is the relative Start and End of the query as sent by the frontend.
|
||||
RelativeTimeRange relativeTimeRange `json:"relativeTimeRange"`
|
||||
|
||||
DatasourceUID string `json:"datasourceUid"`
|
||||
|
||||
// JSON is the raw JSON query and includes the above properties as well as custom properties.
|
||||
Model json.RawMessage `json:"model"`
|
||||
}
|
||||
|
||||
// RelativeTimeRange is the per query start and end time
|
||||
// for requests.
|
||||
type relativeTimeRange struct {
|
||||
From duration `json:"from"`
|
||||
To duration `json:"to"`
|
||||
}
|
||||
|
||||
// duration is a type used for marshalling durations.
|
||||
type duration time.Duration
|
||||
|
||||
func (d duration) String() string {
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
func (d duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(time.Duration(d).Seconds())
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalJSON(b []byte) error {
|
||||
var v interface{}
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
switch value := v.(type) {
|
||||
case float64:
|
||||
*d = duration(time.Duration(value) * time.Second)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("invalid duration %v", v)
|
||||
}
|
||||
}
|
||||
60
pkg/services/sqlstore/migrations/ualert/securejsondata.go
Normal file
60
pkg/services/sqlstore/migrations/ualert/securejsondata.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
// SecureJsonData is used to store encrypted data (for example in data_source table). Only values are separately
|
||||
// encrypted.
|
||||
type SecureJsonData map[string][]byte
|
||||
|
||||
var seclogger = log.New("securejsondata")
|
||||
|
||||
// DecryptedValue returns single decrypted value from SecureJsonData. Similar to normal map access second return value
|
||||
// is true if the key exists and false if not.
|
||||
func (s SecureJsonData) DecryptedValue(key string) (string, bool) {
|
||||
if value, ok := s[key]; ok {
|
||||
decryptedData, err := util.Decrypt(value, setting.SecretKey)
|
||||
if err != nil {
|
||||
seclogger.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
return string(decryptedData), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Decrypt returns map of the same type but where the all the values are decrypted. Opposite of what
|
||||
// GetEncryptedJsonData is doing.
|
||||
func (s SecureJsonData) Decrypt() map[string]string {
|
||||
decrypted := make(map[string]string)
|
||||
for key, data := range s {
|
||||
decryptedData, err := util.Decrypt(data, setting.SecretKey)
|
||||
if err != nil {
|
||||
seclogger.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
decrypted[key] = string(decryptedData)
|
||||
}
|
||||
return decrypted
|
||||
}
|
||||
|
||||
// GetEncryptedJsonData returns map where all keys are encrypted.
|
||||
func GetEncryptedJsonData(sjd map[string]string) SecureJsonData {
|
||||
encrypted := make(SecureJsonData)
|
||||
for key, data := range sjd {
|
||||
encryptedData, err := util.Encrypt([]byte(data), setting.SecretKey)
|
||||
if err != nil {
|
||||
seclogger.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
encrypted[key] = encryptedData
|
||||
}
|
||||
return encrypted
|
||||
}
|
||||
180
pkg/services/sqlstore/migrations/ualert/silences.go
Normal file
180
pkg/services/sqlstore/migrations/ualert/silences.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
pb "github.com/prometheus/alertmanager/silence/silencepb"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
)
|
||||
|
||||
const (
|
||||
// Should be the same as 'NoDataAlertName' in pkg/services/schedule/compat.go.
|
||||
NoDataAlertName = "DatasourceNoData"
|
||||
|
||||
ErrorAlertName = "DatasourceError"
|
||||
)
|
||||
|
||||
func (m *migration) addErrorSilence(da dashAlert, rule *alertRule) error {
|
||||
if da.ParsedSettings.ExecutionErrorState != "keep_state" {
|
||||
return nil
|
||||
}
|
||||
|
||||
uid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return errors.New("failed to create uuid for silence")
|
||||
}
|
||||
|
||||
s := &pb.MeshSilence{
|
||||
Silence: &pb.Silence{
|
||||
Id: uid.String(),
|
||||
Matchers: []*pb.Matcher{
|
||||
{
|
||||
Type: pb.Matcher_EQUAL,
|
||||
Name: model.AlertNameLabel,
|
||||
Pattern: ErrorAlertName,
|
||||
},
|
||||
{
|
||||
Type: pb.Matcher_EQUAL,
|
||||
Name: "rule_uid",
|
||||
Pattern: rule.UID,
|
||||
},
|
||||
},
|
||||
StartsAt: time.Now(),
|
||||
EndsAt: time.Now().AddDate(1, 0, 0), // 1 year
|
||||
CreatedBy: "Grafana Migration",
|
||||
Comment: fmt.Sprintf("Created during migration to unified alerting to silence Error state for alert rule ID '%s' and Title '%s' because the option 'Keep Last State' was selected for Error state", rule.UID, rule.Title),
|
||||
},
|
||||
ExpiresAt: time.Now().AddDate(1, 0, 0), // 1 year
|
||||
}
|
||||
if _, ok := m.silences[da.OrgId]; !ok {
|
||||
m.silences[da.OrgId] = make([]*pb.MeshSilence, 0)
|
||||
}
|
||||
m.silences[da.OrgId] = append(m.silences[da.OrgId], s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) addNoDataSilence(da dashAlert, rule *alertRule) error {
|
||||
if da.ParsedSettings.NoDataState != "keep_state" {
|
||||
return nil
|
||||
}
|
||||
|
||||
uid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return errors.New("failed to create uuid for silence")
|
||||
}
|
||||
|
||||
s := &pb.MeshSilence{
|
||||
Silence: &pb.Silence{
|
||||
Id: uid.String(),
|
||||
Matchers: []*pb.Matcher{
|
||||
{
|
||||
Type: pb.Matcher_EQUAL,
|
||||
Name: model.AlertNameLabel,
|
||||
Pattern: NoDataAlertName,
|
||||
},
|
||||
{
|
||||
Type: pb.Matcher_EQUAL,
|
||||
Name: "rule_uid",
|
||||
Pattern: rule.UID,
|
||||
},
|
||||
},
|
||||
StartsAt: time.Now(),
|
||||
EndsAt: time.Now().AddDate(1, 0, 0), // 1 year.
|
||||
CreatedBy: "Grafana Migration",
|
||||
Comment: fmt.Sprintf("Created during migration to unified alerting to silence NoData state for alert rule ID '%s' and Title '%s' because the option 'Keep Last State' was selected for NoData state", rule.UID, rule.Title),
|
||||
},
|
||||
ExpiresAt: time.Now().AddDate(1, 0, 0), // 1 year.
|
||||
}
|
||||
_, ok := m.silences[da.OrgId]
|
||||
if !ok {
|
||||
m.silences[da.OrgId] = make([]*pb.MeshSilence, 0)
|
||||
}
|
||||
m.silences[da.OrgId] = append(m.silences[da.OrgId], s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) writeSilencesFile(orgID int64) error {
|
||||
var buf bytes.Buffer
|
||||
orgSilences, ok := m.silences[orgID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range orgSilences {
|
||||
if _, err := pbutil.WriteDelimited(&buf, e); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := openReplace(silencesFileNameForOrg(m.mg, orgID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, bytes.NewReader(buf.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func getSilenceFileNamesForAllOrgs(mg *migrator.Migrator) ([]string, error) {
|
||||
return filepath.Glob(filepath.Join(mg.Cfg.DataPath, "alerting", "*", "silences"))
|
||||
}
|
||||
|
||||
func silencesFileNameForOrg(mg *migrator.Migrator, orgID int64) string {
|
||||
return filepath.Join(mg.Cfg.DataPath, "alerting", strconv.Itoa(int(orgID)), "silences")
|
||||
}
|
||||
|
||||
// replaceFile wraps a file that is moved to another filename on closing.
|
||||
type replaceFile struct {
|
||||
*os.File
|
||||
filename string
|
||||
}
|
||||
|
||||
func (f *replaceFile) Close() error {
|
||||
if err := f.File.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.File.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(f.File.Name(), f.filename)
|
||||
}
|
||||
|
||||
// openReplace opens a new temporary file that is moved to filename on closing.
|
||||
func openReplace(filename string) (*replaceFile, error) {
|
||||
tmpFilename := fmt.Sprintf("%s.%x", filename, uint64(rand.Int63()))
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(tmpFilename), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//nolint:gosec
|
||||
f, err := os.Create(tmpFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rf := &replaceFile{
|
||||
File: f,
|
||||
filename: filename,
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func getLabelForSilenceMatching(ruleUID string) (string, string) {
|
||||
return "rule_uid", ruleUID
|
||||
}
|
||||
@@ -492,6 +492,9 @@ func addAlertImageMigrations(mg *migrator.Migrator) {
|
||||
}
|
||||
|
||||
func extractAlertmanagerConfigurationHistoryMigration(mg *migrator.Migrator) {
|
||||
if !mg.Cfg.UnifiedAlerting.IsEnabled() {
|
||||
return
|
||||
}
|
||||
// Since it's not always consistent as to what state the org ID indexes are in, just drop them all and rebuild from scratch.
|
||||
// This is not expensive since this table is guaranteed to have a small number of rows.
|
||||
mg.AddMigration("drop non-unique orgID index on alert_configuration", migrator.NewDropIndexMigration(migrator.Table{Name: "alert_configuration"}, &migrator.Index{Cols: []string{"org_id"}}))
|
||||
|
||||
208
pkg/services/sqlstore/migrations/ualert/template.go
Normal file
208
pkg/services/sqlstore/migrations/ualert/template.go
Normal file
@@ -0,0 +1,208 @@
|
||||
// This file contains code that parses templates from old alerting into a sequence
|
||||
// of tokens. Each token can be either a string literal or a variable.
|
||||
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/state/template"
|
||||
)
|
||||
|
||||
// Token contains either a string literal or a variable.
|
||||
type Token struct {
|
||||
Literal string
|
||||
Variable string
|
||||
}
|
||||
|
||||
func (t Token) IsLiteral() bool {
|
||||
return t.Literal != ""
|
||||
}
|
||||
|
||||
func (t Token) IsVariable() bool {
|
||||
return t.Variable != ""
|
||||
}
|
||||
|
||||
func (t Token) String() string {
|
||||
if t.IsLiteral() {
|
||||
return t.Literal
|
||||
} else if t.IsVariable() {
|
||||
return t.Variable
|
||||
} else {
|
||||
panic("empty token")
|
||||
}
|
||||
}
|
||||
|
||||
func MigrateTmpl(l log.Logger, oldTmpl string) string {
|
||||
var newTmpl string
|
||||
|
||||
tokens := tokenizeTmpl(l, oldTmpl)
|
||||
tokens = escapeLiterals(tokens)
|
||||
|
||||
if anyVariableToken(tokens) {
|
||||
tokens = variablesToMapLookups(tokens, "mergedLabels")
|
||||
newTmpl += fmt.Sprintf("{{- $mergedLabels := %s $values -}}\n", template.MergeLabelValuesFuncName)
|
||||
}
|
||||
|
||||
newTmpl += tokensToTmpl(tokens)
|
||||
return newTmpl
|
||||
}
|
||||
|
||||
func tokenizeTmpl(logger log.Logger, tmpl string) []Token {
|
||||
var (
|
||||
tokens []Token
|
||||
l int
|
||||
r int
|
||||
err error
|
||||
)
|
||||
|
||||
in := []rune(tmpl)
|
||||
for r < len(in) {
|
||||
if !startVariable(in[r:]) {
|
||||
r++
|
||||
continue
|
||||
}
|
||||
|
||||
token, offset, tokenErr := tokenizeVariable(in[r:])
|
||||
if tokenErr != nil {
|
||||
err = errors.Join(err, tokenErr)
|
||||
r += offset
|
||||
continue
|
||||
}
|
||||
|
||||
// we've found a variable, so everything from l -> r is the literal before the variable
|
||||
// ex: "foo ${bar}" -> Literal: "foo ", Variable: "bar"
|
||||
if r > l {
|
||||
tokens = append(tokens, Token{Literal: string(in[l:r])})
|
||||
}
|
||||
tokens = append(tokens, token)
|
||||
|
||||
// seek l and r past the variable
|
||||
r += offset
|
||||
l = r
|
||||
}
|
||||
|
||||
// any remaining runes will be a final literal
|
||||
if r > l {
|
||||
tokens = append(tokens, Token{Literal: string(in[l:r])})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Warn("Encountered malformed template", "template", tmpl, "err", err)
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
func tokenizeVariable(in []rune) (Token, int, error) {
|
||||
var (
|
||||
pos int
|
||||
r rune
|
||||
runes []rune
|
||||
)
|
||||
|
||||
if !startVariable(in) {
|
||||
panic("tokenizeVariable called with input that doesn't start with delimiter")
|
||||
}
|
||||
pos += 2 // seek past opening delimiter
|
||||
|
||||
// consume valid runes until we hit a closing brace
|
||||
// non-space whitespace and the opening delimiter are invalid
|
||||
for pos < len(in) {
|
||||
r = in[pos]
|
||||
|
||||
if unicode.IsSpace(r) && r != ' ' {
|
||||
return Token{}, pos, fmt.Errorf("unexpected whitespace")
|
||||
}
|
||||
|
||||
if startVariable(in[pos:]) {
|
||||
return Token{}, pos, fmt.Errorf("ambiguous delimiter")
|
||||
}
|
||||
|
||||
if r == '}' {
|
||||
pos++
|
||||
break
|
||||
}
|
||||
|
||||
runes = append(runes, r)
|
||||
pos++
|
||||
}
|
||||
|
||||
// variable must end with '}' delimiter
|
||||
if r != '}' {
|
||||
return Token{}, pos, fmt.Errorf("expected '}', got '%c'", r)
|
||||
}
|
||||
|
||||
return Token{Variable: string(runes)}, pos, nil
|
||||
}
|
||||
|
||||
func startVariable(in []rune) bool {
|
||||
return len(in) >= 2 && in[0] == '$' && in[1] == '{'
|
||||
}
|
||||
|
||||
func anyVariableToken(tokens []Token) bool {
|
||||
for _, token := range tokens {
|
||||
if token.IsVariable() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// tokensToTmpl returns the tokens as a Go template
|
||||
func tokensToTmpl(tokens []Token) string {
|
||||
buf := bytes.Buffer{}
|
||||
for _, token := range tokens {
|
||||
if token.IsVariable() {
|
||||
buf.WriteString("{{")
|
||||
buf.WriteString(token.String())
|
||||
buf.WriteString("}}")
|
||||
} else {
|
||||
buf.WriteString(token.String())
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// escapeLiterals escapes any token literals with substrings that would be interpreted as Go template syntax
|
||||
func escapeLiterals(tokens []Token) []Token {
|
||||
result := make([]Token, 0, len(tokens))
|
||||
for _, token := range tokens {
|
||||
if token.IsLiteral() && shouldEscape(token.Literal) {
|
||||
token.Literal = fmt.Sprintf("{{`%s`}}", token.Literal)
|
||||
}
|
||||
result = append(result, token)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func shouldEscape(literal string) bool {
|
||||
return strings.Contains(literal, "{{") || literal[len(literal)-1] == '{'
|
||||
}
|
||||
|
||||
// variablesToMapLookups converts any variables in a slice of tokens to Go template map lookups
|
||||
func variablesToMapLookups(tokens []Token, mapName string) []Token {
|
||||
result := make([]Token, 0, len(tokens))
|
||||
for _, token := range tokens {
|
||||
if token.IsVariable() {
|
||||
token.Variable = mapLookupString(token.Variable, mapName)
|
||||
}
|
||||
result = append(result, token)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func mapLookupString(v string, mapName string) string {
|
||||
for _, r := range v {
|
||||
if !(unicode.IsDigit(r) || unicode.IsLetter(r) || r == '_') {
|
||||
return fmt.Sprintf(`index $%s %s`, mapName, strconv.Quote(v)) // quote v to escape any special characters
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(`$%s.%s`, mapName, v)
|
||||
}
|
||||
321
pkg/services/sqlstore/migrations/ualert/template_test.go
Normal file
321
pkg/services/sqlstore/migrations/ualert/template_test.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTokenString(t *testing.T) {
|
||||
t1 := Token{Literal: "this is a literal"}
|
||||
assert.Equal(t, "this is a literal", t1.String())
|
||||
t2 := Token{Variable: "this is a variable"}
|
||||
assert.Equal(t, "this is a variable", t2.String())
|
||||
}
|
||||
|
||||
func TestTokenizeVariable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
text string
|
||||
token Token
|
||||
offset int
|
||||
err string
|
||||
}{{
|
||||
name: "variable with no trailing text",
|
||||
text: "${instance}",
|
||||
token: Token{Variable: "instance"},
|
||||
offset: 11,
|
||||
}, {
|
||||
name: "variable with trailing text",
|
||||
text: "${instance} is down",
|
||||
token: Token{Variable: "instance"},
|
||||
offset: 11,
|
||||
}, {
|
||||
name: "varaiable with numbers",
|
||||
text: "${instance1} is down",
|
||||
token: Token{Variable: "instance1"},
|
||||
offset: 12,
|
||||
}, {
|
||||
name: "variable with underscores",
|
||||
text: "${instance_with_underscores} is down",
|
||||
token: Token{Variable: "instance_with_underscores"},
|
||||
offset: 28,
|
||||
}, {
|
||||
name: "variable with spaces",
|
||||
text: "${instance with spaces} is down",
|
||||
token: Token{Variable: "instance with spaces"},
|
||||
offset: 23,
|
||||
}, {
|
||||
name: "variable with non-reserved special character",
|
||||
text: "${@instance1} is down",
|
||||
token: Token{Variable: "@instance1"},
|
||||
offset: 13,
|
||||
}, {
|
||||
name: "two variables without spaces",
|
||||
text: "${variable1}${variable2}",
|
||||
token: Token{Variable: "variable1"},
|
||||
offset: 12,
|
||||
}, {
|
||||
name: "variable with two closing braces stops at first brace",
|
||||
text: "${instance}} is down",
|
||||
token: Token{Variable: "instance"},
|
||||
offset: 11,
|
||||
}, {
|
||||
name: "variable with newline",
|
||||
text: "${instance\n} is down",
|
||||
offset: 10,
|
||||
err: "unexpected whitespace",
|
||||
}, {
|
||||
name: "variable with ambiguous delimiter returns error",
|
||||
text: "${${instance}",
|
||||
offset: 2,
|
||||
err: "ambiguous delimiter",
|
||||
}, {
|
||||
name: "variable without closing brace returns error",
|
||||
text: "${instance is down",
|
||||
offset: 18,
|
||||
err: "expected '}', got 'n'",
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
token, offset, err := tokenizeVariable([]rune(test.text))
|
||||
if test.err != "" {
|
||||
assert.EqualError(t, err, test.err)
|
||||
}
|
||||
assert.Equal(t, test.offset, offset)
|
||||
assert.Equal(t, test.token, token)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenizeTmpl(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tmpl string
|
||||
tokens []Token
|
||||
}{{
|
||||
name: "simple template can be tokenized",
|
||||
tmpl: "${instance} is down",
|
||||
tokens: []Token{{Variable: "instance"}, {Literal: " is down"}},
|
||||
}, {
|
||||
name: "complex template can be tokenized",
|
||||
tmpl: "More than ${value} ${status_code} in the last 5 minutes",
|
||||
tokens: []Token{
|
||||
{Literal: "More than "},
|
||||
{Variable: "value"},
|
||||
{Literal: " "},
|
||||
{Variable: "status_code"},
|
||||
{Literal: " in the last 5 minutes"},
|
||||
},
|
||||
}, {
|
||||
name: "variables without spaces between can be tokenized",
|
||||
tmpl: "${value}${status_code}",
|
||||
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}},
|
||||
}, {
|
||||
name: "variables without spaces between then literal can be tokenized",
|
||||
tmpl: "${value}${status_code} in the last 5 minutes",
|
||||
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}, {Literal: " in the last 5 minutes"}},
|
||||
}, {
|
||||
name: "variables with reserved characters can be tokenized",
|
||||
tmpl: "More than ${$value} ${{status_code} in the last 5 minutes",
|
||||
tokens: []Token{
|
||||
{Literal: "More than "},
|
||||
{Variable: "$value"},
|
||||
{Literal: " "},
|
||||
{Variable: "{status_code"},
|
||||
{Literal: " in the last 5 minutes"},
|
||||
},
|
||||
}, {
|
||||
name: "ambiguous delimiters are tokenized as literals",
|
||||
tmpl: "Instance ${instance and ${instance} is down",
|
||||
tokens: []Token{{Literal: "Instance ${instance and "}, {Variable: "instance"}, {Literal: " is down"}},
|
||||
}, {
|
||||
name: "all '$' runes preceding a variable are included in literal",
|
||||
tmpl: "Instance $${instance} is down",
|
||||
tokens: []Token{{Literal: "Instance $"}, {Variable: "instance"}, {Literal: " is down"}},
|
||||
}, {
|
||||
name: "sole '$' rune is included in literal",
|
||||
tmpl: "Instance $instance and ${instance} is down",
|
||||
tokens: []Token{{Literal: "Instance $instance and "}, {Variable: "instance"}, {Literal: " is down"}},
|
||||
}, {
|
||||
name: "extra closing brace is included in literal",
|
||||
tmpl: "Instance ${instance}} and ${instance} is down",
|
||||
tokens: []Token{{Literal: "Instance "}, {Variable: "instance"}, {Literal: "} and "}, {Variable: "instance"}, {Literal: " is down"}},
|
||||
}, {
|
||||
name: "variable with newline tokenized as literal",
|
||||
tmpl: "${value}${status_code\n}${value} in the last 5 minutes",
|
||||
tokens: []Token{{Variable: "value"}, {Literal: "${status_code\n}"}, {Variable: "value"}, {Literal: " in the last 5 minutes"}},
|
||||
}, {
|
||||
name: "extra closing brace between variables is included in literal",
|
||||
tmpl: "${value}${status_code}}${value} in the last 5 minutes",
|
||||
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}, {Literal: "}"}, {Variable: "value"}, {Literal: " in the last 5 minutes"}},
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
tokens := tokenizeTmpl(log.NewNopLogger(), test.tmpl)
|
||||
assert.Equal(t, test.tokens, tokens)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokensToTmpl(t *testing.T) {
|
||||
tokens := []Token{{Variable: "instance"}, {Literal: " is down"}}
|
||||
assert.Equal(t, "{{instance}} is down", tokensToTmpl(tokens))
|
||||
}
|
||||
|
||||
func TestTokensToTmplNewlines(t *testing.T) {
|
||||
tokens := []Token{{Variable: "instance"}, {Literal: " is down\n"}, {Variable: "job"}, {Literal: " is down"}}
|
||||
assert.Equal(t, "{{instance}} is down\n{{job}} is down", tokensToTmpl(tokens))
|
||||
}
|
||||
|
||||
func TestMapLookupString(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "when there are no spaces",
|
||||
input: "instance",
|
||||
expected: "$labels.instance",
|
||||
},
|
||||
{
|
||||
name: "when there are spaces",
|
||||
input: "instance with spaces",
|
||||
expected: `index $labels "instance with spaces"`,
|
||||
},
|
||||
{
|
||||
name: "when there are quotes",
|
||||
input: `instance with "quotes"`,
|
||||
expected: `index $labels "instance with \"quotes\""`,
|
||||
},
|
||||
{
|
||||
name: "when there are backslashes",
|
||||
input: `instance with \backslashes\`,
|
||||
expected: `index $labels "instance with \\backslashes\\"`,
|
||||
},
|
||||
{
|
||||
name: "when there are legacy delimiter characters",
|
||||
input: `instance{ with $delim} characters`,
|
||||
expected: `index $labels "instance{ with $delim} characters"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, mapLookupString(tc.input, "labels"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVariablesToMapLookups(t *testing.T) {
|
||||
tokens := []Token{{Variable: "instance"}, {Literal: " is down"}}
|
||||
expected := []Token{{Variable: "$labels.instance"}, {Literal: " is down"}}
|
||||
assert.Equal(t, expected, variablesToMapLookups(tokens, "labels"))
|
||||
}
|
||||
|
||||
func TestVariablesToMapLookupsSpace(t *testing.T) {
|
||||
tokens := []Token{{Variable: "instance with spaces"}, {Literal: " is down"}}
|
||||
expected := []Token{{Variable: "index $labels \"instance with spaces\""}, {Literal: " is down"}}
|
||||
assert.Equal(t, expected, variablesToMapLookups(tokens, "labels"))
|
||||
}
|
||||
|
||||
func TestEscapeLiterals(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input []Token
|
||||
expected []Token
|
||||
}{
|
||||
{
|
||||
name: "when there are no literals",
|
||||
input: []Token{{Variable: "instance"}},
|
||||
expected: []Token{{Variable: "instance"}},
|
||||
},
|
||||
{
|
||||
name: "literal with double braces: {{",
|
||||
input: []Token{{Literal: "instance {{"}},
|
||||
expected: []Token{{Literal: "{{`instance {{`}}"}},
|
||||
},
|
||||
{
|
||||
name: "literal that ends with closing brace: {",
|
||||
input: []Token{{Literal: "instance {"}},
|
||||
expected: []Token{{Literal: "{{`instance {`}}"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, escapeLiterals(tc.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateTmpl(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
vars bool
|
||||
}{
|
||||
{
|
||||
name: "template does not contain variables",
|
||||
input: "instance is down",
|
||||
expected: "instance is down",
|
||||
vars: false,
|
||||
},
|
||||
{
|
||||
name: "template contains variable",
|
||||
input: "${instance} is down",
|
||||
expected: withDeduplicateMap("{{$mergedLabels.instance}} is down"),
|
||||
vars: true,
|
||||
},
|
||||
{
|
||||
name: "template contains double braces",
|
||||
input: "{{CRITICAL}} instance is down",
|
||||
expected: "{{`{{CRITICAL}} instance is down`}}",
|
||||
vars: false,
|
||||
},
|
||||
{
|
||||
name: "template contains opening brace before variable",
|
||||
input: `${${instance} is down`,
|
||||
expected: withDeduplicateMap("{{`${`}}{{$mergedLabels.instance}} is down"),
|
||||
vars: true,
|
||||
},
|
||||
{
|
||||
name: "template contains newline",
|
||||
input: "CRITICAL\n${instance} is down",
|
||||
expected: withDeduplicateMap("CRITICAL\n{{$mergedLabels.instance}} is down"),
|
||||
vars: true,
|
||||
},
|
||||
{
|
||||
name: "partial migration, no variables",
|
||||
input: "${instance is down",
|
||||
expected: "${instance is down",
|
||||
},
|
||||
{
|
||||
name: "partial migration, with variables",
|
||||
input: "${instance} is down ${${nestedVar}}",
|
||||
expected: withDeduplicateMap("{{$mergedLabels.instance}}{{` is down ${`}}{{$mergedLabels.nestedVar}}}"),
|
||||
vars: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tmpl := MigrateTmpl(log.NewNopLogger(), tc.input)
|
||||
|
||||
assert.Equal(t, tc.expected, tmpl)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withDeduplicateMap(input string) string {
|
||||
// hardcode function name to fail tests if it changes
|
||||
funcName := "mergeLabelValues"
|
||||
|
||||
return fmt.Sprintf("{{- $mergedLabels := %s $values -}}\n", funcName) + input
|
||||
}
|
||||
25
pkg/services/sqlstore/migrations/ualert/testing.go
Normal file
25
pkg/services/sqlstore/migrations/ualert/testing.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/prometheus/alertmanager/silence/silencepb"
|
||||
)
|
||||
|
||||
// newTestMigration generates an empty migration to use in tests.
|
||||
func newTestMigration(t *testing.T) *migration {
|
||||
t.Helper()
|
||||
|
||||
return &migration{
|
||||
mg: &migrator.Migrator{
|
||||
|
||||
Logger: log.New("test"),
|
||||
},
|
||||
seenUIDs: uidSet{
|
||||
set: make(map[string]struct{}),
|
||||
},
|
||||
silences: make(map[int64][]*silencepb.MeshSilence),
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,48 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
alertingNotify "github.com/grafana/alerting/notify"
|
||||
pb "github.com/prometheus/alertmanager/silence/silencepb"
|
||||
"xorm.io/xorm"
|
||||
|
||||
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
const GENERAL_FOLDER = "General Alerting"
|
||||
const DASHBOARD_FOLDER = "%s Alerts - %s"
|
||||
|
||||
// MaxFolderName is the maximum length of the folder name generated using DASHBOARD_FOLDER format
|
||||
const MaxFolderName = 255
|
||||
|
||||
// FOLDER_CREATED_BY us used to track folders created by this migration
|
||||
// during alert migration cleanup.
|
||||
const FOLDER_CREATED_BY = -8
|
||||
|
||||
const KV_NAMESPACE = "alertmanager"
|
||||
|
||||
var migTitle = "move dashboard alerts to unified alerting"
|
||||
|
||||
var rmMigTitle = "remove unified alerting data"
|
||||
|
||||
const clearMigrationEntryTitle = "clear migration entry %q"
|
||||
const codeMigration = "code migration"
|
||||
|
||||
// It is defined in pkg/expr/service.go as "DatasourceType"
|
||||
const expressionDatasourceUID = "__expr__"
|
||||
|
||||
type MigrationError struct {
|
||||
AlertId int64
|
||||
Err error
|
||||
@@ -27,16 +54,99 @@ func (e MigrationError) Error() string {
|
||||
|
||||
func (e *MigrationError) Unwrap() error { return e.Err }
|
||||
|
||||
// FixEarlyMigration fixes UA configs created before 8.2 with org_id=0 and moves some files like __default__.tmpl.
|
||||
// The only use of this migration is when a user enabled ng-alerting before 8.2.
|
||||
func FixEarlyMigration(mg *migrator.Migrator) {
|
||||
func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
_, migrationRun := logs[migTitle]
|
||||
|
||||
switch {
|
||||
// If unified alerting is enabled and the upgrade migration has not been run
|
||||
case mg.Cfg.UnifiedAlerting.IsEnabled() && !migrationRun:
|
||||
// Remove the migration entry that removes all unified alerting data. This is so when the feature
|
||||
// flag is removed in future the "remove unified alerting data" migration will be run again.
|
||||
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, rmMigTitle), &clearMigrationEntry{
|
||||
migrationID: rmMigTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("Alert migration error: could not clear alert migration for removing data", "error", err)
|
||||
}
|
||||
mg.AddMigration(migTitle, &migration{
|
||||
// We deduplicate for case-insensitive matching in MySQL-compatible backend flavours because they use case-insensitive collation.
|
||||
seenUIDs: uidSet{set: make(map[string]struct{}), caseInsensitive: mg.Dialect.SupportEngine()},
|
||||
silences: make(map[int64][]*pb.MeshSilence),
|
||||
})
|
||||
// If unified alerting is disabled and upgrade migration has been run
|
||||
case !mg.Cfg.UnifiedAlerting.IsEnabled() && migrationRun:
|
||||
// If legacy alerting is also disabled, there is nothing to do
|
||||
if setting.AlertingEnabled != nil && !*setting.AlertingEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
// Safeguard to prevent data loss when migrating from UA to LA
|
||||
if !mg.Cfg.ForceMigration {
|
||||
panic("Grafana has already been migrated to Unified Alerting.\nAny alert rules created while using Unified Alerting will be deleted by rolling back.\n\nSet force_migration=true in your grafana.ini and restart Grafana to roll back and delete Unified Alerting configuration data.")
|
||||
}
|
||||
// Remove the migration entry that creates unified alerting data. This is so when the feature
|
||||
// flag is enabled in the future the migration "move dashboard alerts to unified alerting" will be run again.
|
||||
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, migTitle), &clearMigrationEntry{
|
||||
migrationID: migTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("Alert migration error: could not clear dashboard alert migration", "error", err)
|
||||
}
|
||||
mg.AddMigration(rmMigTitle, &rmMigration{})
|
||||
}
|
||||
}
|
||||
|
||||
// RerunDashAlertMigration force the dashboard alert migration to run
|
||||
// to make sure that the Alertmanager configurations will be created for each organisation
|
||||
func RerunDashAlertMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cloneMigTitle := fmt.Sprintf("clone %s", migTitle)
|
||||
mg.AddMigration(cloneMigTitle, &upgradeNgAlerting{})
|
||||
|
||||
_, migrationRun := logs[cloneMigTitle]
|
||||
ngEnabled := mg.Cfg.UnifiedAlerting.IsEnabled()
|
||||
|
||||
switch {
|
||||
case ngEnabled && !migrationRun:
|
||||
// The only use of this migration is when a user enabled ng-alerting before 8.2.
|
||||
mg.AddMigration(cloneMigTitle, &upgradeNgAlerting{})
|
||||
// if user disables the feature flag and enables it back.
|
||||
// This migration does not need to be run because the original migration AddDashAlertMigration does what's needed
|
||||
}
|
||||
}
|
||||
|
||||
func AddDashboardUIDPanelIDMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Error("Alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
migrationID := "update dashboard_uid and panel_id from existing annotations"
|
||||
mg.AddMigration(migrationID, &updateDashboardUIDPanelIDMigration{})
|
||||
_, migrationRun := logs[migrationID]
|
||||
ngEnabled := mg.Cfg.UnifiedAlerting.IsEnabled()
|
||||
undoMigrationID := "undo " + migrationID
|
||||
|
||||
if ngEnabled && !migrationRun {
|
||||
// If ngalert is enabled and the migration has not been run then run it.
|
||||
mg.AddMigration(migrationID, &updateDashboardUIDPanelIDMigration{})
|
||||
} else if !ngEnabled && migrationRun {
|
||||
// If ngalert is disabled and the migration has been run then remove it
|
||||
// from the migration log so it will run if ngalert is re-enabled.
|
||||
mg.AddMigration(undoMigrationID, &clearMigrationEntry{
|
||||
migrationID: migrationID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// updateDashboardUIDPanelIDMigration sets the dashboard_uid and panel_id columns
|
||||
@@ -88,6 +198,327 @@ func (m *updateDashboardUIDPanelIDMigration) Exec(sess *xorm.Session, mg *migrat
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearMigrationEntry removes an entry fromt the migration_log table.
|
||||
// This migration is not recorded in the migration_log so that it can re-run several times.
|
||||
type clearMigrationEntry struct {
|
||||
migrator.MigrationBase
|
||||
|
||||
migrationID string
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) SQL(dialect migrator.Dialect) string {
|
||||
return "clear migration entry code migration"
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
_, err := sess.SQL(`DELETE from migration_log where migration_id = ?`, m.migrationID).Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear migration entry %v: %w", m.migrationID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) SkipMigrationLog() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type migration struct {
|
||||
migrator.MigrationBase
|
||||
// session and mg are attached for convenience.
|
||||
sess *xorm.Session
|
||||
mg *migrator.Migrator
|
||||
|
||||
seenUIDs uidSet
|
||||
silences map[int64][]*pb.MeshSilence
|
||||
}
|
||||
|
||||
func (m *migration) SQL(dialect migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
m.sess = sess
|
||||
m.mg = mg
|
||||
|
||||
dashAlerts, err := m.slurpDashAlerts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mg.Logger.Info("Alerts found to migrate", "alerts", len(dashAlerts))
|
||||
|
||||
// [orgID, dataSourceId] -> UID
|
||||
dsIDMap, err := m.slurpDSIDs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// [orgID, dashboardId] -> dashUID
|
||||
dashIDMap, err := m.slurpDashUIDs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// cache for folders created for dashboards that have custom permissions
|
||||
folderCache := make(map[string]*dashboard)
|
||||
// cache for the general folders
|
||||
generalFolderCache := make(map[int64]*dashboard)
|
||||
|
||||
folderHelper := folderHelper{
|
||||
sess: sess,
|
||||
mg: mg,
|
||||
}
|
||||
|
||||
gf := func(dash dashboard, da dashAlert) (*dashboard, error) {
|
||||
f, ok := generalFolderCache[dash.OrgId]
|
||||
if !ok {
|
||||
// get or create general folder
|
||||
f, err = folderHelper.getOrCreateGeneralFolder(dash.OrgId)
|
||||
if err != nil {
|
||||
return nil, MigrationError{
|
||||
Err: fmt.Errorf("failed to get or create general folder under organisation %d: %w", dash.OrgId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
generalFolderCache[dash.OrgId] = f
|
||||
}
|
||||
// No need to assign default permissions to general folder
|
||||
// because they are included to the query result if it's a folder with no permissions
|
||||
// https://github.com/grafana/grafana/blob/076e2ce06a6ecf15804423fcc8dca1b620a321e5/pkg/services/sqlstore/dashboard_acl.go#L109
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Per org map of newly created rules to which notification channels it should send to.
|
||||
rulesPerOrg := make(map[int64]map[*alertRule][]uidOrID)
|
||||
|
||||
for _, da := range dashAlerts {
|
||||
l := mg.Logger.New("ruleID", da.Id, "ruleName", da.Name, "dashboardUID", da.DashboardUID, "orgID", da.OrgId)
|
||||
l.Debug("Migrating alert rule to Unified Alerting")
|
||||
newCond, err := transConditions(*da.ParsedSettings, da.OrgId, dsIDMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
da.DashboardUID = dashIDMap[[2]int64{da.OrgId, da.DashboardId}]
|
||||
|
||||
// get dashboard
|
||||
dash := dashboard{}
|
||||
exists, err := m.sess.Where("org_id=? AND uid=?", da.OrgId, da.DashboardUID).Get(&dash)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("failed to get dashboard %s under organisation %d: %w", da.DashboardUID, da.OrgId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("dashboard with UID %v under organisation %d not found: %w", da.DashboardUID, da.OrgId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
|
||||
var folder *dashboard
|
||||
switch {
|
||||
case dash.HasACL:
|
||||
folderName := getAlertFolderNameFromDashboard(&dash)
|
||||
f, ok := folderCache[folderName]
|
||||
if !ok {
|
||||
l.Info("Create a new folder for alerts that belongs to dashboard because it has custom permissions", "folder", folderName)
|
||||
// create folder and assign the permissions of the dashboard (included default and inherited)
|
||||
f, err = folderHelper.createFolder(dash.OrgId, folderName)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("failed to create folder: %w", err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
permissions, err := folderHelper.getACL(dash.OrgId, dash.Id)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("failed to get dashboard %d under organisation %d permissions: %w", dash.Id, dash.OrgId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
err = folderHelper.setACL(f.OrgId, f.Id, permissions)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("failed to set folder %d under organisation %d permissions: %w", f.Id, f.OrgId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
folderCache[folderName] = f
|
||||
}
|
||||
folder = f
|
||||
case dash.FolderId > 0:
|
||||
// get folder if exists
|
||||
f, err := folderHelper.getFolder(dash, da)
|
||||
if err != nil {
|
||||
// If folder does not exist then the dashboard is an orphan and we migrate the alert to the general folder.
|
||||
l.Warn("Failed to find folder for dashboard. Migrate rule to the default folder", "rule_name", da.Name, "dashboard_uid", da.DashboardUID, "missing_folder_id", dash.FolderId)
|
||||
folder, err = gf(dash, da)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
folder = &f
|
||||
}
|
||||
default:
|
||||
folder, err = gf(dash, da)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if folder.Uid == "" {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("empty folder identifier"),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
rule, err := m.makeAlertRule(l, *newCond, da, folder.Uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to migrate alert rule '%s' [ID:%d, DashboardUID:%s, orgID:%d]: %w", da.Name, da.Id, da.DashboardUID, da.OrgId, err)
|
||||
}
|
||||
|
||||
if _, ok := rulesPerOrg[rule.OrgID]; !ok {
|
||||
rulesPerOrg[rule.OrgID] = make(map[*alertRule][]uidOrID)
|
||||
}
|
||||
if _, ok := rulesPerOrg[rule.OrgID][rule]; !ok {
|
||||
rulesPerOrg[rule.OrgID][rule] = extractChannelIDs(da)
|
||||
} else {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("duplicate generated rule UID"),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for orgID := range rulesPerOrg {
|
||||
if err := m.writeSilencesFile(orgID); err != nil {
|
||||
m.mg.Logger.Error("Alert migration error: failed to write silence file", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
amConfigPerOrg, err := m.setupAlertmanagerConfigs(rulesPerOrg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.insertRules(mg, rulesPerOrg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for orgID, amConfig := range amConfigPerOrg {
|
||||
if err := m.writeAlertmanagerConfig(orgID, amConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) insertRules(mg *migrator.Migrator, rulesPerOrg map[int64]map[*alertRule][]uidOrID) error {
|
||||
for _, rules := range rulesPerOrg {
|
||||
for rule := range rules {
|
||||
var err error
|
||||
if strings.HasPrefix(mg.Dialect.DriverName(), migrator.Postgres) {
|
||||
err = mg.InTransaction(func(sess *xorm.Session) error {
|
||||
_, err := sess.Insert(rule)
|
||||
return err
|
||||
})
|
||||
} else {
|
||||
_, err = m.sess.Insert(rule)
|
||||
}
|
||||
if err != nil {
|
||||
// TODO better error handling, if constraint
|
||||
rule.Title += fmt.Sprintf(" %v", rule.UID)
|
||||
rule.RuleGroup += fmt.Sprintf(" %v", rule.UID)
|
||||
|
||||
_, err = m.sess.Insert(rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// create entry in alert_rule_version
|
||||
_, err = m.sess.Insert(rule.makeVersion())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) writeAlertmanagerConfig(orgID int64, amConfig *PostableUserConfig) error {
|
||||
rawAmConfig, err := json.Marshal(amConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove an existing configuration, which could have been left during switching back to legacy alerting
|
||||
_, _ = m.sess.Delete(AlertConfiguration{OrgID: orgID})
|
||||
|
||||
// We don't need to apply the configuration, given the multi org alertmanager will do an initial sync before the server is ready.
|
||||
_, err = m.sess.Insert(AlertConfiguration{
|
||||
AlertmanagerConfiguration: string(rawAmConfig),
|
||||
// Since we are migration for a snapshot of the code, it is always going to migrate to
|
||||
// the v1 config.
|
||||
ConfigurationVersion: "v1",
|
||||
OrgID: orgID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateAlertmanagerConfig validates the alertmanager configuration produced by the migration against the receivers.
|
||||
func (m *migration) validateAlertmanagerConfig(config *PostableUserConfig) error {
|
||||
for _, r := range config.AlertmanagerConfig.Receivers {
|
||||
for _, gr := range r.GrafanaManagedReceivers {
|
||||
data, err := gr.Settings.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
cfg = &alertingNotify.GrafanaIntegrationConfig{
|
||||
UID: gr.UID,
|
||||
Name: gr.Name,
|
||||
Type: gr.Type,
|
||||
DisableResolveMessage: gr.DisableResolveMessage,
|
||||
Settings: data,
|
||||
SecureSettings: gr.SecureSettings,
|
||||
}
|
||||
)
|
||||
|
||||
// decryptFunc represents the legacy way of decrypting data. Before the migration, we don't need any new way,
|
||||
// given that the previous alerting will never support it.
|
||||
decryptFunc := func(_ context.Context, sjd map[string][]byte, key string, fallback string) string {
|
||||
if value, ok := sjd[key]; ok {
|
||||
decryptedData, err := util.Decrypt(value, setting.SecretKey)
|
||||
if err != nil {
|
||||
m.mg.Logger.Warn("Unable to decrypt key '%s' for %s receiver with uid %s, returning fallback.", key, gr.Type, gr.UID)
|
||||
return fallback
|
||||
}
|
||||
return string(decryptedData)
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
_, err = alertingNotify.BuildReceiverConfiguration(context.Background(), &alertingNotify.APIReceiver{
|
||||
GrafanaIntegrations: alertingNotify.GrafanaIntegrations{Integrations: []*alertingNotify.GrafanaIntegrationConfig{cfg}},
|
||||
}, decryptFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AlertConfiguration struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
@@ -97,6 +528,84 @@ type AlertConfiguration struct {
|
||||
CreatedAt int64 `xorm:"created"`
|
||||
}
|
||||
|
||||
// rmMigration removes Grafana 8 alert data
|
||||
type rmMigration struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (m *rmMigration) SQL(dialect migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
func (m *rmMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
_, err := sess.Exec("delete from alert_rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from alert_rule_version")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from dashboard_acl where dashboard_id IN (select id from dashboard where created_by = ?)", FOLDER_CREATED_BY)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from dashboard where created_by = ?", FOLDER_CREATED_BY)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from alert_configuration")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from ngalert_configuration")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = sess.Exec("delete from alert_instance")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
exists, err := sess.IsTableExist("kv_store")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
_, err = sess.Exec("delete from kv_store where namespace = ?", KV_NAMESPACE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
files, err := getSilenceFileNamesForAllOrgs(mg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
if err := os.Remove(f); err != nil {
|
||||
mg.Logger.Error("Alert migration error: failed to remove silence file", "file", f, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rmMigrationWithoutLogging is similar migration to rmMigration
|
||||
// but is not recorded in the migration_log table so that it can rerun in the future
|
||||
type rmMigrationWithoutLogging = rmMigration
|
||||
|
||||
func (m *rmMigrationWithoutLogging) SkipMigrationLog() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type upgradeNgAlerting struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
@@ -226,3 +735,191 @@ func (u *upgradeNgAlerting) updateAlertmanagerFiles(orgId int64, migrator *migra
|
||||
func (u *upgradeNgAlerting) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
// getAlertFolderNameFromDashboard generates a folder name for alerts that belong to a dashboard. Formats the string according to DASHBOARD_FOLDER format.
|
||||
// If the resulting string exceeds the migrations.MaxTitleLength, the dashboard title is stripped to be at the maximum length
|
||||
func getAlertFolderNameFromDashboard(dash *dashboard) string {
|
||||
maxLen := MaxFolderName - len(fmt.Sprintf(DASHBOARD_FOLDER, "", dash.Uid))
|
||||
title := dash.Title
|
||||
if len(title) > maxLen {
|
||||
title = title[:maxLen]
|
||||
}
|
||||
return fmt.Sprintf(DASHBOARD_FOLDER, title, dash.Uid) // include UID to the name to avoid collision
|
||||
}
|
||||
|
||||
// CreateDefaultFoldersForAlertingMigration creates a folder dedicated for alerting if no folders exist
|
||||
func CreateDefaultFoldersForAlertingMigration(mg *migrator.Migrator) {
|
||||
if !mg.Cfg.UnifiedAlerting.IsEnabled() {
|
||||
return
|
||||
}
|
||||
mg.AddMigration("create default alerting folders", &createDefaultFoldersForAlertingMigration{})
|
||||
}
|
||||
|
||||
type createDefaultFoldersForAlertingMigration struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (c createDefaultFoldersForAlertingMigration) Exec(sess *xorm.Session, migrator *migrator.Migrator) error {
|
||||
helper := folderHelper{
|
||||
sess: sess,
|
||||
mg: migrator,
|
||||
}
|
||||
|
||||
var rows []struct {
|
||||
Id int64
|
||||
Name string
|
||||
}
|
||||
|
||||
if err := sess.Table("org").Cols("id", "name").Find(&rows); err != nil {
|
||||
return fmt.Errorf("failed to read the list of organizations: %w", err)
|
||||
}
|
||||
|
||||
orgsWithFolders, err := helper.getOrgsIDThatHaveFolders()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list organizations that have at least one folder: %w", err)
|
||||
}
|
||||
|
||||
for _, row := range rows {
|
||||
// if there's at least one folder in the org or if alerting is disabled for that org, skip adding the default folder
|
||||
if _, ok := orgsWithFolders[row.Id]; ok {
|
||||
migrator.Logger.Debug("Skip adding default alerting folder because organization already has at least one folder", "org_id", row.Id)
|
||||
continue
|
||||
}
|
||||
if _, ok := migrator.Cfg.UnifiedAlerting.DisabledOrgs[row.Id]; ok {
|
||||
migrator.Logger.Debug("Skip adding default alerting folder because alerting is disabled for the organization ", "org_id", row.Id)
|
||||
continue
|
||||
}
|
||||
folder, err := helper.createGeneralFolder(row.Id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the default alerting folder for organization %s (ID: %d): %w", row.Name, row.Id, err)
|
||||
}
|
||||
migrator.Logger.Info("Created the default folder for alerting", "org_id", row.Id, "folder_name", folder.Title, "folder_uid", folder.Uid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c createDefaultFoldersForAlertingMigration) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
// UpdateRuleGroupIndexMigration updates a new field rule_group_index for alert rules that belong to a group with more than 1 alert.
|
||||
func UpdateRuleGroupIndexMigration(mg *migrator.Migrator) {
|
||||
if !mg.Cfg.UnifiedAlerting.IsEnabled() {
|
||||
return
|
||||
}
|
||||
mg.AddMigration("update group index for alert rules", &updateRulesOrderInGroup{})
|
||||
}
|
||||
|
||||
type updateRulesOrderInGroup struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (c updateRulesOrderInGroup) SQL(migrator.Dialect) string {
|
||||
return codeMigration
|
||||
}
|
||||
|
||||
func (c updateRulesOrderInGroup) Exec(sess *xorm.Session, migrator *migrator.Migrator) error {
|
||||
var rows []*alertRule
|
||||
if err := sess.Table(alertRule{}).Asc("id").Find(&rows); err != nil {
|
||||
return fmt.Errorf("failed to read the list of alert rules: %w", err)
|
||||
}
|
||||
|
||||
if len(rows) == 0 {
|
||||
migrator.Logger.Debug("No rules to migrate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
groups := map[ngmodels.AlertRuleGroupKey][]*alertRule{}
|
||||
|
||||
for _, row := range rows {
|
||||
groupKey := ngmodels.AlertRuleGroupKey{
|
||||
OrgID: row.OrgID,
|
||||
NamespaceUID: row.NamespaceUID,
|
||||
RuleGroup: row.RuleGroup,
|
||||
}
|
||||
groups[groupKey] = append(groups[groupKey], row)
|
||||
}
|
||||
|
||||
toUpdate := make([]*alertRule, 0, len(rows))
|
||||
|
||||
for _, rules := range groups {
|
||||
for i, rule := range rules {
|
||||
if rule.RuleGroupIndex == i+1 {
|
||||
continue
|
||||
}
|
||||
rule.RuleGroupIndex = i + 1
|
||||
toUpdate = append(toUpdate, rule)
|
||||
}
|
||||
}
|
||||
|
||||
if len(toUpdate) == 0 {
|
||||
migrator.Logger.Debug("No rules to upgrade group index")
|
||||
return nil
|
||||
}
|
||||
|
||||
updated := time.Now()
|
||||
versions := make([]any, 0, len(toUpdate))
|
||||
|
||||
for _, rule := range toUpdate {
|
||||
rule.Updated = updated
|
||||
version := rule.makeVersion()
|
||||
version.Version = rule.Version + 1
|
||||
version.ParentVersion = rule.Version
|
||||
rule.Version++
|
||||
_, err := sess.ID(rule.ID).Cols("version", "updated", "rule_group_idx").Update(rule)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("Failed to update alert rule", "uid", rule.UID, "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
migrator.Logger.Debug("Updated group index for alert rule", "rule_uid", rule.UID)
|
||||
versions = append(versions, version)
|
||||
}
|
||||
_, err := sess.Insert(versions...)
|
||||
if err != nil {
|
||||
migrator.Logger.Error("Failed to insert changes to alert_rule_version", "err", err)
|
||||
return fmt.Errorf("unable to update alert rules with group index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// uidSet is a wrapper around map[string]struct{} and util.GenerateShortUID() which aims help generate uids in quick
|
||||
// succession while taking into consideration case sensitivity requirements. if caseInsensitive is true, all generated
|
||||
// uids must also be unique when compared in a case-insensitive manner.
|
||||
type uidSet struct {
|
||||
set map[string]struct{}
|
||||
caseInsensitive bool
|
||||
}
|
||||
|
||||
// contains checks whether the given uid has already been generated in this uidSet.
|
||||
func (s *uidSet) contains(uid string) bool {
|
||||
dedup := uid
|
||||
if s.caseInsensitive {
|
||||
dedup = strings.ToLower(dedup)
|
||||
}
|
||||
_, seen := s.set[dedup]
|
||||
return seen
|
||||
}
|
||||
|
||||
// add adds the given uid to the uidSet.
|
||||
func (s *uidSet) add(uid string) {
|
||||
dedup := uid
|
||||
if s.caseInsensitive {
|
||||
dedup = strings.ToLower(dedup)
|
||||
}
|
||||
s.set[dedup] = struct{}{}
|
||||
}
|
||||
|
||||
// generateUid will generate a new unique uid that is not already contained in the uidSet.
|
||||
// If it fails to create one that has not already been generated it will make multiple, but not unlimited, attempts.
|
||||
// If all attempts are exhausted an error will be returned.
|
||||
func (s *uidSet) generateUid() (string, error) {
|
||||
for i := 0; i < 5; i++ {
|
||||
gen := util.GenerateShortUID()
|
||||
if !s.contains(gen) {
|
||||
s.add(gen)
|
||||
return gen, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("failed to generate UID")
|
||||
}
|
||||
|
||||
194
pkg/services/sqlstore/migrations/ualert/ualert_test.go
Normal file
194
pkg/services/sqlstore/migrations/ualert/ualert_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package ualert
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
)
|
||||
|
||||
var MigTitle = migTitle
|
||||
var RmMigTitle = rmMigTitle
|
||||
var ClearMigrationEntryTitle = clearMigrationEntryTitle
|
||||
|
||||
type RmMigration = rmMigration
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface for Matchers. Vendored from definitions.ObjectMatchers.
|
||||
func (m *ObjectMatchers) UnmarshalJSON(data []byte) error {
|
||||
var rawMatchers [][3]string
|
||||
if err := json.Unmarshal(data, &rawMatchers); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rawMatcher := range rawMatchers {
|
||||
var matchType labels.MatchType
|
||||
switch rawMatcher[1] {
|
||||
case "=":
|
||||
matchType = labels.MatchEqual
|
||||
case "!=":
|
||||
matchType = labels.MatchNotEqual
|
||||
case "=~":
|
||||
matchType = labels.MatchRegexp
|
||||
case "!~":
|
||||
matchType = labels.MatchNotRegexp
|
||||
default:
|
||||
return fmt.Errorf("unsupported match type %q in matcher", rawMatcher[1])
|
||||
}
|
||||
|
||||
rawMatcher[2] = strings.TrimPrefix(rawMatcher[2], "\"")
|
||||
rawMatcher[2] = strings.TrimSuffix(rawMatcher[2], "\"")
|
||||
|
||||
matcher, err := labels.NewMatcher(matchType, rawMatcher[0], rawMatcher[2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = append(*m, matcher)
|
||||
}
|
||||
sort.Sort(labels.Matchers(*m))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_validateAlertmanagerConfig(t *testing.T) {
|
||||
tc := []struct {
|
||||
name string
|
||||
receivers []*PostableGrafanaReceiver
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "when a slack receiver does not have a valid URL - it should error",
|
||||
receivers: []*PostableGrafanaReceiver{
|
||||
{
|
||||
UID: "test-uid",
|
||||
Name: "SlackWithBadURL",
|
||||
Type: "slack",
|
||||
Settings: simplejson.NewFromAny(map[string]interface{}{}),
|
||||
SecureSettings: map[string]string{"url": invalidUri},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("failed to validate integration \"SlackWithBadURL\" (UID test-uid) of type \"slack\": invalid URL %q", invalidUri),
|
||||
},
|
||||
{
|
||||
name: "when a slack receiver has an invalid recipient - it should not error",
|
||||
receivers: []*PostableGrafanaReceiver{
|
||||
{
|
||||
UID: util.GenerateShortUID(),
|
||||
Name: "SlackWithBadRecipient",
|
||||
Type: "slack",
|
||||
Settings: simplejson.NewFromAny(map[string]interface{}{"recipient": "this passes"}),
|
||||
SecureSettings: map[string]string{"url": "http://webhook.slack.com/myuser"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when the configuration is valid - it should not error",
|
||||
receivers: []*PostableGrafanaReceiver{
|
||||
{
|
||||
UID: util.GenerateShortUID(),
|
||||
Name: "SlackWithBadURL",
|
||||
Type: "slack",
|
||||
Settings: simplejson.NewFromAny(map[string]interface{}{"recipient": "#a-good-channel"}),
|
||||
SecureSettings: map[string]string{"url": "http://webhook.slack.com/myuser"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mg := newTestMigration(t)
|
||||
|
||||
config := configFromReceivers(t, tt.receivers)
|
||||
require.NoError(t, config.EncryptSecureSettings()) // make sure we encrypt the settings
|
||||
err := mg.validateAlertmanagerConfig(config)
|
||||
if tt.err != nil {
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, tt.err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func configFromReceivers(t *testing.T, receivers []*PostableGrafanaReceiver) *PostableUserConfig {
|
||||
t.Helper()
|
||||
|
||||
return &PostableUserConfig{
|
||||
AlertmanagerConfig: PostableApiAlertingConfig{
|
||||
Receivers: []*PostableApiReceiver{
|
||||
{GrafanaManagedReceivers: receivers},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PostableUserConfig) EncryptSecureSettings() error {
|
||||
for _, r := range c.AlertmanagerConfig.Receivers {
|
||||
for _, gr := range r.GrafanaManagedReceivers {
|
||||
encryptedData := GetEncryptedJsonData(gr.SecureSettings)
|
||||
for k, v := range encryptedData {
|
||||
gr.SecureSettings[k] = base64.StdEncoding.EncodeToString(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const invalidUri = "<22>6<EFBFBD>M<EFBFBD><4D>)uk譹1(<28>h`$<24>o<EFBFBD>N>mĕ<6D><C495><EFBFBD><EFBFBD>cS2<53>dh![ę<> <09><><EFBFBD>`csB<73>!<21><>OSxP<78>{<7B>"
|
||||
|
||||
func Test_getAlertFolderNameFromDashboard(t *testing.T) {
|
||||
t.Run("should include full title", func(t *testing.T) {
|
||||
dash := &dashboard{
|
||||
Uid: util.GenerateShortUID(),
|
||||
Title: "TEST",
|
||||
}
|
||||
folder := getAlertFolderNameFromDashboard(dash)
|
||||
require.Contains(t, folder, dash.Title)
|
||||
require.Contains(t, folder, dash.Uid)
|
||||
})
|
||||
t.Run("should cut title to the length", func(t *testing.T) {
|
||||
title := ""
|
||||
for {
|
||||
title += util.GenerateShortUID()
|
||||
if len(title) > MaxFolderName {
|
||||
title = title[:MaxFolderName]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dash := &dashboard{
|
||||
Uid: util.GenerateShortUID(),
|
||||
Title: title,
|
||||
}
|
||||
folder := getAlertFolderNameFromDashboard(dash)
|
||||
require.Len(t, folder, MaxFolderName)
|
||||
require.Contains(t, folder, dash.Uid)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_shortUIDCaseInsensitiveConflicts(t *testing.T) {
|
||||
s := uidSet{
|
||||
set: make(map[string]struct{}),
|
||||
caseInsensitive: true,
|
||||
}
|
||||
|
||||
// 10000 uids seems to be enough to cause a collision in almost every run if using util.GenerateShortUID directly.
|
||||
for i := 0; i < 10000; i++ {
|
||||
_, _ = s.generateUid()
|
||||
}
|
||||
|
||||
// check if any are case-insensitive duplicates.
|
||||
deduped := make(map[string]struct{})
|
||||
for k := range s.set {
|
||||
deduped[strings.ToLower(k)] = struct{}{}
|
||||
}
|
||||
|
||||
require.Equal(t, len(s.set), len(deduped))
|
||||
}
|
||||
Reference in New Issue
Block a user