Alerting: Remove vendored models in migration service (#74503)

This PR replaces the vendored models in the migration with their equivalent ngalert models. It also replaces the raw SQL selects and inserts with service calls.

It also fills in some gaps in the testing suite around:

    - Migration of alert rules: verifying that the actual data model (queries, conditions) are correct 9a7cfa9
    - Secure settings migration: verifying that secure fields remain encrypted for all available notifiers and certain fields migrate from plain text to encrypted secure settings correctly e7d3993

Replacing the checks for custom dashboard ACLs will be replaced in a separate targeted PR as it will be complex enough alone.
This commit is contained in:
Matthew Jacobson
2023-10-11 17:22:09 +01:00
committed by GitHub
parent 046e9b7672
commit 6a8649d544
49 changed files with 4564 additions and 3647 deletions

View File

@@ -24,6 +24,7 @@ import (
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/notifier"
"github.com/grafana/grafana/pkg/services/ngalert/provisioning"
ngfakes "github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/secrets/fakes"
secretsManager "github.com/grafana/grafana/pkg/services/secrets/manager"
@@ -663,7 +664,7 @@ func createMultiOrgAlertmanager(t *testing.T) *notifier.MultiOrgAlertmanager {
orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3})
provStore := provisioning.NewFakeProvisioningStore()
tmpDir := t.TempDir()
kvStore := notifier.NewFakeKVStore(t)
kvStore := ngfakes.NewFakeKVStore(t)
secretsService := secretsManager.SetupTestService(t, fakes.NewFakeSecretsStore())
reg := prometheus.NewPedanticRegistry()
m := metrics.NewNGAlert(reg)

View File

@@ -0,0 +1,290 @@
package migration
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/log"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/tsdb/graphite"
"github.com/grafana/grafana/pkg/util"
)
const (
// ContactLabel is a private label created during migration and used in notification policies.
// It stores a string array of all contact point names an alert rule should send to.
// It was created as a means to simplify post-migration notification policies.
ContactLabel = "__contacts__"
)
func addMigrationInfo(da *migrationStore.DashAlert, dashboardUID string) (map[string]string, map[string]string) {
tagsMap := simplejson.NewFromAny(da.ParsedSettings.AlertRuleTags).MustMap()
lbls := make(map[string]string, len(tagsMap))
for k, v := range tagsMap {
lbls[k] = simplejson.NewFromAny(v).MustString()
}
annotations := make(map[string]string, 3)
annotations[ngmodels.DashboardUIDAnnotation] = dashboardUID
annotations[ngmodels.PanelIDAnnotation] = fmt.Sprintf("%v", da.PanelID)
annotations["__alertId__"] = fmt.Sprintf("%v", da.ID)
return lbls, annotations
}
// MigrateAlert migrates a single dashboard alert from legacy alerting to unified alerting.
func (om *OrgMigration) migrateAlert(ctx context.Context, l log.Logger, da *migrationStore.DashAlert, dashboardUID string, folderUID string) (*ngmodels.AlertRule, error) {
l.Debug("Migrating alert rule to Unified Alerting")
cond, err := transConditions(ctx, da, om.migrationStore)
if err != nil {
return nil, fmt.Errorf("transform conditions: %w", err)
}
lbls, annotations := addMigrationInfo(da, dashboardUID)
message := MigrateTmpl(l.New("field", "message"), da.Message)
annotations["message"] = message
data, err := migrateAlertRuleQueries(l, cond.Data)
if err != nil {
return nil, fmt.Errorf("failed to migrate alert rule queries: %w", err)
}
isPaused := false
if da.State == "paused" {
isPaused = true
}
// Here we ensure that the alert rule title is unique within the folder.
dedupSet := om.AlertTitleDeduplicator(folderUID)
name := truncateRuleName(da.Name)
if dedupSet.contains(name) {
dedupedName := dedupSet.deduplicate(name)
l.Debug("Duplicate alert rule name detected, renaming", "old_name", name, "new_name", dedupedName)
name = dedupedName
}
dedupSet.add(name)
ar := &ngmodels.AlertRule{
OrgID: da.OrgID,
Title: name,
UID: util.GenerateShortUID(),
Condition: cond.Condition,
Data: data,
IntervalSeconds: ruleAdjustInterval(da.Frequency),
Version: 1,
NamespaceUID: folderUID, // Folder already created, comes from env var.
DashboardUID: &dashboardUID,
PanelID: &da.PanelID,
RuleGroup: name,
For: da.For,
Updated: time.Now().UTC(),
Annotations: annotations,
Labels: lbls,
RuleGroupIndex: 1, // Every rule is in its own group.
IsPaused: isPaused,
NoDataState: transNoData(l, da.ParsedSettings.NoDataState),
ExecErrState: transExecErr(l, da.ParsedSettings.ExecutionErrorState),
}
// Label for routing and silences.
n, v := getLabelForSilenceMatching(ar.UID)
ar.Labels[n] = v
if da.ParsedSettings.ExecutionErrorState == string(legacymodels.ExecutionErrorKeepState) {
if err := om.addErrorSilence(ar); err != nil {
om.log.Error("Alert migration error: failed to create silence for Error", "rule_name", ar.Title, "err", err)
}
}
if da.ParsedSettings.NoDataState == string(legacymodels.NoDataKeepState) {
if err := om.addNoDataSilence(ar); err != nil {
om.log.Error("Alert migration error: failed to create silence for NoData", "rule_name", ar.Title, "err", err)
}
}
return ar, nil
}
// migrateAlertRuleQueries attempts to fix alert rule queries so they can work in unified alerting. Queries of some data sources are not compatible with unified alerting.
func migrateAlertRuleQueries(l log.Logger, data []ngmodels.AlertQuery) ([]ngmodels.AlertQuery, error) {
result := make([]ngmodels.AlertQuery, 0, len(data))
for _, d := range data {
// queries that are expression are not relevant, skip them.
if d.DatasourceUID == expressionDatasourceUID {
result = append(result, d)
continue
}
var fixedData map[string]json.RawMessage
err := json.Unmarshal(d.Model, &fixedData)
if err != nil {
return nil, err
}
// remove hidden tag from the query (if exists)
delete(fixedData, "hide")
fixedData = fixGraphiteReferencedSubQueries(fixedData)
fixedData = fixPrometheusBothTypeQuery(l, fixedData)
updatedModel, err := json.Marshal(fixedData)
if err != nil {
return nil, err
}
d.Model = updatedModel
result = append(result, d)
}
return result, nil
}
// fixGraphiteReferencedSubQueries attempts to fix graphite referenced sub queries, given unified alerting does not support this.
// targetFull of Graphite data source contains the expanded version of field 'target', so let's copy that.
func fixGraphiteReferencedSubQueries(queryData map[string]json.RawMessage) map[string]json.RawMessage {
fullQuery, ok := queryData[graphite.TargetFullModelField]
if ok {
delete(queryData, graphite.TargetFullModelField)
queryData[graphite.TargetModelField] = fullQuery
}
return queryData
}
// fixPrometheusBothTypeQuery converts Prometheus 'Both' type queries to range queries.
func fixPrometheusBothTypeQuery(l log.Logger, queryData map[string]json.RawMessage) map[string]json.RawMessage {
// There is the possibility to support this functionality by:
// - Splitting the query into two: one for instant and one for range.
// - Splitting the condition into two: one for each query, separated by OR.
// However, relying on a 'Both' query instead of multiple conditions to do this in legacy is likely
// to be unintentional. In addition, this would require more robust operator precedence in classic conditions.
// Given these reasons, we opt to convert them to range queries and log a warning.
var instant bool
if instantRaw, ok := queryData["instant"]; ok {
if err := json.Unmarshal(instantRaw, &instant); err != nil {
// Nothing to do here, we can't parse the instant field.
if isPrometheus, _ := isPrometheusQuery(queryData); isPrometheus {
l.Info("Failed to parse instant field on Prometheus query", "instant", string(instantRaw), "err", err)
}
return queryData
}
}
var rng bool
if rangeRaw, ok := queryData["range"]; ok {
if err := json.Unmarshal(rangeRaw, &rng); err != nil {
// Nothing to do here, we can't parse the range field.
if isPrometheus, _ := isPrometheusQuery(queryData); isPrometheus {
l.Info("Failed to parse range field on Prometheus query", "range", string(rangeRaw), "err", err)
}
return queryData
}
}
if !instant || !rng {
// Only apply this fix to 'Both' type queries.
return queryData
}
isPrometheus, err := isPrometheusQuery(queryData)
if err != nil {
l.Info("Unable to convert alert rule that resembles a Prometheus 'Both' type query to 'Range'", "err", err)
return queryData
}
if !isPrometheus {
// Only apply this fix to Prometheus.
return queryData
}
// Convert 'Both' type queries to `Range` queries by disabling the `Instant` portion.
l.Warn("Prometheus 'Both' type queries are not supported in unified alerting. Converting to range query.")
queryData["instant"] = []byte("false")
return queryData
}
// isPrometheusQuery checks if the query is for Prometheus.
func isPrometheusQuery(queryData map[string]json.RawMessage) (bool, error) {
ds, ok := queryData["datasource"]
if !ok {
return false, fmt.Errorf("missing datasource field")
}
var datasource struct {
Type string `json:"type"`
}
if err := json.Unmarshal(ds, &datasource); err != nil {
return false, fmt.Errorf("failed to parse datasource '%s': %w", string(ds), err)
}
if datasource.Type == "" {
return false, fmt.Errorf("missing type field '%s'", string(ds))
}
return datasource.Type == "prometheus", nil
}
func ruleAdjustInterval(freq int64) int64 {
// 10 corresponds to the SchedulerCfg, but TODO not worrying about fetching for now.
var baseFreq int64 = 10
if freq <= baseFreq {
return 10
}
return freq - (freq % baseFreq)
}
func transNoData(l log.Logger, s string) ngmodels.NoDataState {
switch legacymodels.NoDataOption(s) {
case legacymodels.NoDataSetOK:
return ngmodels.OK // values from ngalert/models/rule
case "", legacymodels.NoDataSetNoData:
return ngmodels.NoData
case legacymodels.NoDataSetAlerting:
return ngmodels.Alerting
case legacymodels.NoDataKeepState:
return ngmodels.NoData // "keep last state" translates to no data because we now emit a special alert when the state is "noData". The result is that the evaluation will not return firing and instead we'll raise the special alert.
default:
l.Warn("Unable to translate execution of NoData state. Using default execution", "old", s, "new", ngmodels.NoData)
return ngmodels.NoData
}
}
func transExecErr(l log.Logger, s string) ngmodels.ExecutionErrorState {
switch legacymodels.ExecutionErrorOption(s) {
case "", legacymodels.ExecutionErrorSetAlerting:
return ngmodels.AlertingErrState
case legacymodels.ExecutionErrorKeepState:
// Keep last state is translated to error as we now emit a
// DatasourceError alert when the state is error
return ngmodels.ErrorErrState
case legacymodels.ExecutionErrorSetOk:
return ngmodels.OkErrState
default:
l.Warn("Unable to translate execution of Error state. Using default execution", "old", s, "new", ngmodels.ErrorErrState)
return ngmodels.ErrorErrState
}
}
// truncateRuleName truncates the rule name to the maximum allowed length.
func truncateRuleName(daName string) string {
if len(daName) > store.AlertDefinitionMaxTitleLength {
return daName[:store.AlertDefinitionMaxTitleLength]
}
return daName
}
func extractChannelIDs(d *migrationStore.DashAlert) (channelUids []migrationStore.UidOrID) {
// Extracting channel UID/ID.
for _, ui := range d.ParsedSettings.Notifications {
if ui.UID != "" {
channelUids = append(channelUids, ui.UID)
continue
}
// In certain circumstances, id is used instead of uid.
// We add this if there was no uid.
if ui.ID > 0 {
channelUids = append(channelUids, ui.ID)
}
}
return channelUids
}

View File

@@ -0,0 +1,247 @@
package migration
import (
"context"
"strings"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log/logtest"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
)
func TestMigrateAlertRuleQueries(t *testing.T) {
tc := []struct {
name string
input *simplejson.Json
expected string
err error
}{
{
name: "when a query has a sub query - it is extracted",
input: simplejson.NewFromAny(map[string]any{"targetFull": "thisisafullquery", "target": "ahalfquery"}),
expected: `{"target":"thisisafullquery"}`,
},
{
name: "when a query does not have a sub query - it no-ops",
input: simplejson.NewFromAny(map[string]any{"target": "ahalfquery"}),
expected: `{"target":"ahalfquery"}`,
},
{
name: "when query was hidden, it removes the flag",
input: simplejson.NewFromAny(map[string]any{"hide": true}),
expected: `{}`,
},
{
name: "when prometheus both type query, convert to range",
input: simplejson.NewFromAny(map[string]any{
"datasource": map[string]string{
"type": "prometheus",
},
"instant": true,
"range": true,
}),
expected: `{"datasource":{"type":"prometheus"},"instant":false,"range":true}`,
},
{
name: "when prometheus instant type query, do nothing",
input: simplejson.NewFromAny(map[string]any{
"datasource": map[string]string{
"type": "prometheus",
},
"instant": true,
}),
expected: `{"datasource":{"type":"prometheus"},"instant":true}`,
},
{
name: "when non-prometheus with instant and range, do nothing",
input: simplejson.NewFromAny(map[string]any{
"datasource": map[string]string{
"type": "something",
},
"instant": true,
"range": true,
}),
expected: `{"datasource":{"type":"something"},"instant":true,"range":true}`,
},
}
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
model, err := tt.input.Encode()
require.NoError(t, err)
queries, err := migrateAlertRuleQueries(&logtest.Fake{}, []models.AlertQuery{{Model: model}})
if tt.err != nil {
require.Error(t, err)
require.EqualError(t, err, tt.err.Error())
return
}
require.NoError(t, err)
r, err := queries[0].Model.MarshalJSON()
require.NoError(t, err)
require.JSONEq(t, tt.expected, string(r))
})
}
}
func TestAddMigrationInfo(t *testing.T) {
tt := []struct {
name string
alert *migrationStore.DashAlert
dashboard string
expectedLabels map[string]string
expectedAnnotations map[string]string
}{
{
name: "when alert rule tags are a JSON array, they're ignored.",
alert: &migrationStore.DashAlert{Alert: &legacymodels.Alert{ID: 43, PanelID: 42}, ParsedSettings: &migrationStore.DashAlertSettings{AlertRuleTags: []string{"one", "two", "three", "four"}}},
dashboard: "dashboard",
expectedLabels: map[string]string{},
expectedAnnotations: map[string]string{"__alertId__": "43", "__dashboardUid__": "dashboard", "__panelId__": "42"},
},
{
name: "when alert rule tags are a JSON object",
alert: &migrationStore.DashAlert{Alert: &legacymodels.Alert{ID: 43, PanelID: 42}, ParsedSettings: &migrationStore.DashAlertSettings{AlertRuleTags: map[string]any{"key": "value", "key2": "value2"}}},
dashboard: "dashboard",
expectedLabels: map[string]string{"key": "value", "key2": "value2"},
expectedAnnotations: map[string]string{"__alertId__": "43", "__dashboardUid__": "dashboard", "__panelId__": "42"},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
labels, annotations := addMigrationInfo(tc.alert, tc.dashboard)
require.Equal(t, tc.expectedLabels, labels)
require.Equal(t, tc.expectedAnnotations, annotations)
})
}
}
func TestMakeAlertRule(t *testing.T) {
sqlStore := db.InitTestDB(t)
t.Run("when mapping rule names", func(t *testing.T) {
t.Run("leaves basic names untouched", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.Equal(t, da.Name, ar.Title)
require.Equal(t, ar.Title, ar.RuleGroup)
})
t.Run("truncates very long names to max length", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.Name = strings.Repeat("a", store.AlertDefinitionMaxTitleLength+1)
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.Len(t, ar.Title, store.AlertDefinitionMaxTitleLength)
})
t.Run("deduplicate names in same org and folder", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.Name = strings.Repeat("a", store.AlertDefinitionMaxTitleLength+1)
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.Len(t, ar.Title, store.AlertDefinitionMaxTitleLength)
da = createTestDashAlert()
da.Name = strings.Repeat("a", store.AlertDefinitionMaxTitleLength+1)
ar, err = m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.Len(t, ar.Title, store.AlertDefinitionMaxTitleLength)
parts := strings.SplitN(ar.Title, "_", 2)
require.Len(t, parts, 2)
require.Greater(t, len(parts[1]), 8, "unique identifier should be longer than 9 characters")
require.Equal(t, store.AlertDefinitionMaxTitleLength-1, len(parts[0])+len(parts[1]), "truncated name + underscore + unique identifier should together be DefaultFieldMaxLength")
require.Equal(t, ar.Title, ar.RuleGroup)
})
})
t.Run("alert is not paused", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.False(t, ar.IsPaused)
})
t.Run("paused dash alert is paused", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.State = "paused"
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.NoError(t, err)
require.True(t, ar.IsPaused)
})
t.Run("use default if execution of NoData is not known", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.ParsedSettings.NoDataState = uuid.NewString()
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.Nil(t, err)
require.Equal(t, models.NoData, ar.NoDataState)
})
t.Run("use default if execution of Error is not known", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.ParsedSettings.ExecutionErrorState = uuid.NewString()
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.Nil(t, err)
require.Equal(t, models.ErrorErrState, ar.ExecErrState)
})
t.Run("migrate message template", func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
da := createTestDashAlert()
da.Message = "Instance ${instance} is down"
ar, err := m.migrateAlert(context.Background(), &logtest.Fake{}, &da, "dashboard", "folder")
require.Nil(t, err)
expected :=
"{{- $mergedLabels := mergeLabelValues $values -}}\n" +
"Instance {{$mergedLabels.instance}} is down"
require.Equal(t, expected, ar.Annotations["message"])
})
}
func createTestDashAlert() migrationStore.DashAlert {
return migrationStore.DashAlert{
Alert: &legacymodels.Alert{
ID: 1,
Name: "test",
},
ParsedSettings: &migrationStore.DashAlertSettings{},
}
}

View File

@@ -0,0 +1,440 @@
package migration
import (
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"regexp"
"sort"
"strings"
"time"
alertingNotify "github.com/grafana/alerting/notify"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/pkg/labels"
"github.com/prometheus/common/model"
"github.com/grafana/grafana/pkg/components/simplejson"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/secrets"
"github.com/grafana/grafana/pkg/util"
)
const (
// DisabledRepeatInterval is a large duration that will be used as a pseudo-disable in case a legacy channel doesn't have SendReminders enabled.
DisabledRepeatInterval = model.Duration(time.Duration(8736) * time.Hour) // 1y
)
// channelReceiver is a convenience struct that contains a notificationChannel and its corresponding migrated PostableApiReceiver.
type channelReceiver struct {
channel *legacymodels.AlertNotification
receiver *apimodels.PostableApiReceiver
}
// setupAlertmanagerConfigs creates Alertmanager configs with migrated receivers and routes.
func (om *OrgMigration) migrateChannels(allChannels []*legacymodels.AlertNotification, pairs []*AlertPair) (*apimodels.PostableUserConfig, error) {
var defaultChannels []*legacymodels.AlertNotification
var channels []*legacymodels.AlertNotification
for _, c := range allChannels {
if c.Type == "hipchat" || c.Type == "sensu" {
om.log.Error("Alert migration error: discontinued notification channel found", "type", c.Type, "name", c.Name, "uid", c.UID)
continue
}
if c.IsDefault {
defaultChannels = append(defaultChannels, c)
}
channels = append(channels, c)
}
amConfig := &apimodels.PostableUserConfig{
AlertmanagerConfig: apimodels.PostableApiAlertingConfig{
Receivers: make([]*apimodels.PostableApiReceiver, 0),
},
}
// Create all newly migrated receivers from legacy notification channels.
receiversMap, receivers, err := om.createReceivers(channels)
if err != nil {
return nil, fmt.Errorf("create receiver: %w", err)
}
// No need to create an Alertmanager configuration if there are no receivers left that aren't obsolete.
if len(receivers) == 0 {
om.log.Warn("No available receivers")
return nil, nil
}
for _, cr := range receivers {
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, cr.receiver)
}
defaultReceivers := make(map[string]struct{})
// If the organization has default channels build a map of default receivers, used to create alert-specific routes later.
for _, c := range defaultChannels {
defaultReceivers[c.Name] = struct{}{}
}
defaultReceiver, defaultRoute, err := om.createDefaultRouteAndReceiver(defaultChannels)
if err != nil {
return nil, fmt.Errorf("failed to create default route & receiver in orgId %d: %w", om.orgID, err)
}
amConfig.AlertmanagerConfig.Route = defaultRoute
if defaultReceiver != nil {
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, defaultReceiver)
}
for _, cr := range receivers {
route, err := createRoute(cr)
if err != nil {
return nil, fmt.Errorf("failed to create route for receiver %s in orgId %d: %w", cr.receiver.Name, om.orgID, err)
}
amConfig.AlertmanagerConfig.Route.Routes = append(amConfig.AlertmanagerConfig.Route.Routes, route)
}
for _, pair := range pairs {
channelUids := extractChannelIDs(pair.DashAlert)
filteredReceiverNames := om.filterReceiversForAlert(pair.AlertRule.Title, channelUids, receiversMap, defaultReceivers)
if len(filteredReceiverNames) != 0 {
// Only create a contact label if there are specific receivers, otherwise it defaults to the root-level route.
pair.AlertRule.Labels[ContactLabel] = contactListToString(filteredReceiverNames)
}
}
// Validate the alertmanager configuration produced, this gives a chance to catch bad configuration at migration time.
// Validation between legacy and unified alerting can be different (e.g. due to bug fixes) so this would fail the migration in that case.
if err := om.validateAlertmanagerConfig(amConfig); err != nil {
return nil, fmt.Errorf("failed to validate AlertmanagerConfig in orgId %d: %w", om.orgID, err)
}
return amConfig, nil
}
// validateAlertmanagerConfig validates the alertmanager configuration produced by the migration against the receivers.
func (om *OrgMigration) validateAlertmanagerConfig(config *apimodels.PostableUserConfig) error {
for _, r := range config.AlertmanagerConfig.Receivers {
for _, gr := range r.GrafanaManagedReceivers {
data, err := gr.Settings.MarshalJSON()
if err != nil {
return err
}
var (
cfg = &alertingNotify.GrafanaIntegrationConfig{
UID: gr.UID,
Name: gr.Name,
Type: gr.Type,
DisableResolveMessage: gr.DisableResolveMessage,
Settings: data,
SecureSettings: gr.SecureSettings,
}
)
_, err = alertingNotify.BuildReceiverConfiguration(context.Background(), &alertingNotify.APIReceiver{
GrafanaIntegrations: alertingNotify.GrafanaIntegrations{Integrations: []*alertingNotify.GrafanaIntegrationConfig{cfg}},
}, om.encryptionService.GetDecryptedValue)
if err != nil {
return err
}
}
}
return nil
}
// contactListToString creates a sorted string representation of a given map (set) of receiver names. Each name will be comma-separated and double-quoted. Names should not contain double quotes.
func contactListToString(m map[string]any) string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, quote(k))
}
sort.Strings(keys)
return strings.Join(keys, ",")
}
// quote will surround the given string in double quotes.
func quote(s string) string {
return `"` + s + `"`
}
// Create a notifier (PostableGrafanaReceiver) from a legacy notification channel
func (om *OrgMigration) createNotifier(c *legacymodels.AlertNotification) (*apimodels.PostableGrafanaReceiver, error) {
uid, err := om.determineChannelUid(c)
if err != nil {
return nil, err
}
settings, secureSettings, err := om.migrateSettingsToSecureSettings(c.Type, c.Settings, c.SecureSettings)
if err != nil {
return nil, err
}
data, err := settings.MarshalJSON()
if err != nil {
return nil, err
}
return &apimodels.PostableGrafanaReceiver{
UID: uid,
Name: c.Name,
Type: c.Type,
DisableResolveMessage: c.DisableResolveMessage,
Settings: data,
SecureSettings: secureSettings,
}, nil
}
// Create one receiver for every unique notification channel.
func (om *OrgMigration) createReceivers(allChannels []*legacymodels.AlertNotification) (map[migrationStore.UidOrID]*apimodels.PostableApiReceiver, []channelReceiver, error) {
receivers := make([]channelReceiver, 0, len(allChannels))
receiversMap := make(map[migrationStore.UidOrID]*apimodels.PostableApiReceiver)
set := make(map[string]struct{}) // Used to deduplicate sanitized names.
for _, c := range allChannels {
notifier, err := om.createNotifier(c)
if err != nil {
return nil, nil, err
}
// We remove double quotes because this character will be used as the separator in the ContactLabel. To prevent partial matches in the Route Matcher we choose to sanitize them early on instead of complicating the Matcher regex.
sanitizedName := strings.ReplaceAll(c.Name, `"`, `_`)
// There can be name collisions after we sanitize. We check for this and attempt to make the name unique again using a short hash of the original name.
if _, ok := set[sanitizedName]; ok {
sanitizedName = sanitizedName + fmt.Sprintf("_%.3x", md5.Sum([]byte(c.Name)))
om.log.Warn("Alert contains duplicate contact name after sanitization, appending unique suffix", "type", c.Type, "name", c.Name, "new_name", sanitizedName, "uid", c.UID)
}
notifier.Name = sanitizedName
set[sanitizedName] = struct{}{}
cr := channelReceiver{
channel: c,
receiver: &apimodels.PostableApiReceiver{
Receiver: config.Receiver{
Name: sanitizedName, // Channel name is unique within an Org.
},
PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{
GrafanaManagedReceivers: []*apimodels.PostableGrafanaReceiver{notifier},
},
},
}
receivers = append(receivers, cr)
// Store receivers for creating routes from alert rules later.
if c.UID != "" {
receiversMap[c.UID] = cr.receiver
}
if c.ID != 0 {
// In certain circumstances, the alert rule uses ID instead of uid. So, we add this to be able to lookup by ID in case.
receiversMap[c.ID] = cr.receiver
}
}
return receiversMap, receivers, nil
}
// Create the root-level route with the default receiver. If no new receiver is created specifically for the root-level route, the returned receiver will be nil.
func (om *OrgMigration) createDefaultRouteAndReceiver(defaultChannels []*legacymodels.AlertNotification) (*apimodels.PostableApiReceiver, *apimodels.Route, error) {
defaultReceiverName := "autogen-contact-point-default"
defaultRoute := &apimodels.Route{
Receiver: defaultReceiverName,
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngmodels.FolderTitleLabel, model.AlertNameLabel}, // To keep parity with pre-migration notifications.
RepeatInterval: nil,
}
newDefaultReceiver := &apimodels.PostableApiReceiver{
Receiver: config.Receiver{
Name: defaultReceiverName,
},
PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{
GrafanaManagedReceivers: []*apimodels.PostableGrafanaReceiver{},
},
}
// Return early if there are no default channels
if len(defaultChannels) == 0 {
return newDefaultReceiver, defaultRoute, nil
}
repeatInterval := DisabledRepeatInterval // If no channels have SendReminders enabled, we will use this large value as a pseudo-disable.
if len(defaultChannels) > 1 {
// If there are more than one default channels we create a separate contact group that is used only in the root policy. This is to simplify the migrated notification policy structure.
// If we ever allow more than one receiver per route this won't be necessary.
for _, c := range defaultChannels {
// Need to create a new notifier to prevent uid conflict.
defaultNotifier, err := om.createNotifier(c)
if err != nil {
return nil, nil, err
}
newDefaultReceiver.GrafanaManagedReceivers = append(newDefaultReceiver.GrafanaManagedReceivers, defaultNotifier)
// Choose the lowest send reminder duration from all the notifiers to use for default route.
if c.SendReminder && c.Frequency < time.Duration(repeatInterval) {
repeatInterval = model.Duration(c.Frequency)
}
}
} else {
// If there is only a single default channel, we don't need a separate receiver to hold it. We can reuse the existing receiver for that single notifier.
defaultRoute.Receiver = defaultChannels[0].Name
if defaultChannels[0].SendReminder {
repeatInterval = model.Duration(defaultChannels[0].Frequency)
}
// No need to create a new receiver.
newDefaultReceiver = nil
}
defaultRoute.RepeatInterval = &repeatInterval
return newDefaultReceiver, defaultRoute, nil
}
// Create one route per contact point, matching based on ContactLabel.
func createRoute(cr channelReceiver) (*apimodels.Route, error) {
// We create a regex matcher so that each alert rule need only have a single ContactLabel entry for all contact points it sends to.
// For example, if an alert needs to send to contact1 and contact2 it will have ContactLabel=`"contact1","contact2"` and will match both routes looking
// for `.*"contact1".*` and `.*"contact2".*`.
// We quote and escape here to ensure the regex will correctly match the ContactLabel on the alerts.
name := fmt.Sprintf(`.*%s.*`, regexp.QuoteMeta(quote(cr.receiver.Name)))
mat, err := labels.NewMatcher(labels.MatchRegexp, ContactLabel, name)
if err != nil {
return nil, err
}
repeatInterval := DisabledRepeatInterval
if cr.channel.SendReminder {
repeatInterval = model.Duration(cr.channel.Frequency)
}
return &apimodels.Route{
Receiver: cr.receiver.Name,
ObjectMatchers: apimodels.ObjectMatchers{mat},
Continue: true, // We continue so that each sibling contact point route can separately match.
RepeatInterval: &repeatInterval,
}, nil
}
// Filter receivers to select those that were associated to the given rule as channels.
func (om *OrgMigration) filterReceiversForAlert(name string, channelIDs []migrationStore.UidOrID, receivers map[migrationStore.UidOrID]*apimodels.PostableApiReceiver, defaultReceivers map[string]struct{}) map[string]any {
if len(channelIDs) == 0 {
// If there are no channels associated, we use the default route.
return nil
}
// Filter receiver names.
filteredReceiverNames := make(map[string]any)
for _, uidOrId := range channelIDs {
recv, ok := receivers[uidOrId]
if ok {
filteredReceiverNames[recv.Name] = struct{}{} // Deduplicate on contact point name.
} else {
om.log.Warn("Alert linked to obsolete notification channel, ignoring", "alert", name, "uid", uidOrId)
}
}
coveredByDefault := func(names map[string]any) bool {
// Check if all receivers are also default ones and if so, just use the default route.
for n := range names {
if _, ok := defaultReceivers[n]; !ok {
return false
}
}
return true
}
if len(filteredReceiverNames) == 0 || coveredByDefault(filteredReceiverNames) {
// Use the default route instead.
return nil
}
// Add default receivers alongside rule-specific ones.
for n := range defaultReceivers {
filteredReceiverNames[n] = struct{}{}
}
return filteredReceiverNames
}
func (om *OrgMigration) determineChannelUid(c *legacymodels.AlertNotification) (string, error) {
legacyUid := c.UID
if legacyUid == "" {
newUid := util.GenerateShortUID()
om.seenUIDs.add(newUid)
om.log.Info("Legacy notification had an empty uid, generating a new one", "id", c.ID, "uid", newUid)
return newUid, nil
}
if om.seenUIDs.contains(legacyUid) {
newUid := util.GenerateShortUID()
om.seenUIDs.add(newUid)
om.log.Warn("Legacy notification had a UID that collides with a migrated record, generating a new one", "id", c.ID, "old", legacyUid, "new", newUid)
return newUid, nil
}
om.seenUIDs.add(legacyUid)
return legacyUid, nil
}
var secureKeysToMigrate = map[string][]string{
"slack": {"url", "token"},
"pagerduty": {"integrationKey"},
"webhook": {"password"},
"prometheus-alertmanager": {"basicAuthPassword"},
"opsgenie": {"apiKey"},
"telegram": {"bottoken"},
"line": {"token"},
"pushover": {"apiToken", "userKey"},
"threema": {"api_secret"},
}
// Some settings were migrated from settings to secure settings in between.
// See https://grafana.com/docs/grafana/latest/installation/upgrading/#ensure-encryption-of-existing-alert-notification-channel-secrets.
// migrateSettingsToSecureSettings takes care of that.
func (om *OrgMigration) migrateSettingsToSecureSettings(chanType string, settings *simplejson.Json, secureSettings SecureJsonData) (*simplejson.Json, map[string]string, error) {
keys := secureKeysToMigrate[chanType]
newSecureSettings := secureSettings.Decrypt()
cloneSettings := simplejson.New()
settingsMap, err := settings.Map()
if err != nil {
return nil, nil, err
}
for k, v := range settingsMap {
cloneSettings.Set(k, v)
}
for _, k := range keys {
if v, ok := newSecureSettings[k]; ok && v != "" {
continue
}
sv := cloneSettings.Get(k).MustString()
if sv != "" {
newSecureSettings[k] = sv
cloneSettings.Del(k)
}
}
err = om.encryptSecureSettings(newSecureSettings)
if err != nil {
return nil, nil, err
}
return cloneSettings, newSecureSettings, nil
}
func (om *OrgMigration) encryptSecureSettings(secureSettings map[string]string) error {
for key, value := range secureSettings {
encryptedData, err := om.encryptionService.Encrypt(context.Background(), []byte(value), secrets.WithoutScope())
if err != nil {
return fmt.Errorf("failed to encrypt secure settings: %w", err)
}
secureSettings[key] = base64.StdEncoding.EncodeToString(encryptedData)
}
return nil
}

View File

@@ -0,0 +1,591 @@
package migration
import (
"context"
"encoding/base64"
"encoding/json"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/pkg/labels"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/db"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
ngModels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/notifier/channels_config"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
func TestFilterReceiversForAlert(t *testing.T) {
tc := []struct {
name string
channelIds []migrationStore.UidOrID
receivers map[migrationStore.UidOrID]*apimodels.PostableApiReceiver
defaultReceivers map[string]struct{}
expected map[string]any
}{
{
name: "when an alert has multiple channels, each should filter for the correct receiver",
channelIds: []migrationStore.UidOrID{"uid1", "uid2"},
receivers: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("recv1", nil),
"uid2": createPostableApiReceiver("recv2", nil),
"uid3": createPostableApiReceiver("recv3", nil),
},
defaultReceivers: map[string]struct{}{},
expected: map[string]any{
"recv1": struct{}{},
"recv2": struct{}{},
},
},
{
name: "when default receivers exist, they should be added to an alert's filtered receivers",
channelIds: []migrationStore.UidOrID{"uid1"},
receivers: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("recv1", nil),
"uid2": createPostableApiReceiver("recv2", nil),
"uid3": createPostableApiReceiver("recv3", nil),
},
defaultReceivers: map[string]struct{}{
"recv2": {},
},
expected: map[string]any{
"recv1": struct{}{}, // From alert
"recv2": struct{}{}, // From default
},
},
{
name: "when an alert has a channels associated by ID instead of UID, it should be included",
channelIds: []migrationStore.UidOrID{int64(42)},
receivers: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
int64(42): createPostableApiReceiver("recv1", nil),
},
defaultReceivers: map[string]struct{}{},
expected: map[string]any{
"recv1": struct{}{},
},
},
{
name: "when an alert's receivers are covered by the defaults, return nil to use default receiver downstream",
channelIds: []migrationStore.UidOrID{"uid1"},
receivers: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("recv1", nil),
"uid2": createPostableApiReceiver("recv2", nil),
"uid3": createPostableApiReceiver("recv3", nil),
},
defaultReceivers: map[string]struct{}{
"recv1": {},
"recv2": {},
},
expected: nil, // recv1 is already a default
},
}
sqlStore := db.InitTestDB(t)
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
res := m.filterReceiversForAlert("", tt.channelIds, tt.receivers, tt.defaultReceivers)
require.Equal(t, tt.expected, res)
})
}
}
func TestCreateRoute(t *testing.T) {
tc := []struct {
name string
channel *legacymodels.AlertNotification
recv *apimodels.PostableApiReceiver
expected *apimodels.Route
}{
{
name: "when a receiver is passed in, the route should regex match based on quoted name with continue=true",
channel: &legacymodels.AlertNotification{},
recv: createPostableApiReceiver("recv1", nil),
expected: &apimodels.Route{
Receiver: "recv1",
ObjectMatchers: apimodels.ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
Routes: nil,
Continue: true,
GroupByStr: nil,
RepeatInterval: durationPointer(DisabledRepeatInterval),
},
},
{
name: "notification channel should be escaped for regex in the matcher",
channel: &legacymodels.AlertNotification{},
recv: createPostableApiReceiver(`. ^ $ * + - ? ( ) [ ] { } \ |`, nil),
expected: &apimodels.Route{
Receiver: `. ^ $ * + - ? ( ) [ ] { } \ |`,
ObjectMatchers: apimodels.ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"\. \^ \$ \* \+ - \? \( \) \[ \] \{ \} \\ \|".*`}},
Routes: nil,
Continue: true,
GroupByStr: nil,
RepeatInterval: durationPointer(DisabledRepeatInterval),
},
},
{
name: "when a channel has sendReminder=true, the route should use the frequency in repeat interval",
channel: &legacymodels.AlertNotification{SendReminder: true, Frequency: time.Duration(42) * time.Hour},
recv: createPostableApiReceiver("recv1", nil),
expected: &apimodels.Route{
Receiver: "recv1",
ObjectMatchers: apimodels.ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
Routes: nil,
Continue: true,
GroupByStr: nil,
RepeatInterval: durationPointer(model.Duration(time.Duration(42) * time.Hour)),
},
},
{
name: "when a channel has sendReminder=false, the route should ignore the frequency in repeat interval and use DisabledRepeatInterval",
channel: &legacymodels.AlertNotification{SendReminder: false, Frequency: time.Duration(42) * time.Hour},
recv: createPostableApiReceiver("recv1", nil),
expected: &apimodels.Route{
Receiver: "recv1",
ObjectMatchers: apimodels.ObjectMatchers{{Type: 2, Name: ContactLabel, Value: `.*"recv1".*`}},
Routes: nil,
Continue: true,
GroupByStr: nil,
RepeatInterval: durationPointer(DisabledRepeatInterval),
},
},
}
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
res, err := createRoute(channelReceiver{
channel: tt.channel,
receiver: tt.recv,
})
require.NoError(t, err)
// Order of nested routes is not guaranteed.
cOpt := []cmp.Option{
cmpopts.SortSlices(func(a, b *apimodels.Route) bool {
if a.Receiver != b.Receiver {
return a.Receiver < b.Receiver
}
return a.ObjectMatchers[0].Value < b.ObjectMatchers[0].Value
}),
cmpopts.IgnoreUnexported(apimodels.Route{}, labels.Matcher{}),
}
if !cmp.Equal(tt.expected, res, cOpt...) {
t.Errorf("Unexpected Route: %v", cmp.Diff(tt.expected, res, cOpt...))
}
})
}
}
func createNotChannel(t *testing.T, uid string, id int64, name string) *legacymodels.AlertNotification {
t.Helper()
return &legacymodels.AlertNotification{UID: uid, ID: id, Name: name, Settings: simplejson.New()}
}
func createNotChannelWithReminder(t *testing.T, uid string, id int64, name string, frequency time.Duration) *legacymodels.AlertNotification {
t.Helper()
return &legacymodels.AlertNotification{UID: uid, ID: id, Name: name, SendReminder: true, Frequency: frequency, Settings: simplejson.New()}
}
func TestCreateReceivers(t *testing.T) {
tc := []struct {
name string
allChannels []*legacymodels.AlertNotification
defaultChannels []*legacymodels.AlertNotification
expRecvMap map[migrationStore.UidOrID]*apimodels.PostableApiReceiver
expRecv []channelReceiver
expErr error
}{
{
name: "when given notification channels migrate them to receivers",
allChannels: []*legacymodels.AlertNotification{createNotChannel(t, "uid1", int64(1), "name1"), createNotChannel(t, "uid2", int64(2), "name2")},
expRecvMap: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("name1", []string{"name1"}),
"uid2": createPostableApiReceiver("name2", []string{"name2"}),
int64(1): createPostableApiReceiver("name1", []string{"name1"}),
int64(2): createPostableApiReceiver("name2", []string{"name2"}),
},
expRecv: []channelReceiver{
{
channel: createNotChannel(t, "uid1", int64(1), "name1"),
receiver: createPostableApiReceiver("name1", []string{"name1"}),
},
{
channel: createNotChannel(t, "uid2", int64(2), "name2"),
receiver: createPostableApiReceiver("name2", []string{"name2"}),
},
},
},
{
name: "when given notification channel contains double quote sanitize with underscore",
allChannels: []*legacymodels.AlertNotification{createNotChannel(t, "uid1", int64(1), "name\"1")},
expRecvMap: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("name_1", []string{"name_1"}),
int64(1): createPostableApiReceiver("name_1", []string{"name_1"}),
},
expRecv: []channelReceiver{
{
channel: createNotChannel(t, "uid1", int64(1), "name\"1"),
receiver: createPostableApiReceiver("name_1", []string{"name_1"}),
},
},
},
{
name: "when given notification channels collide after sanitization add short hash to end",
allChannels: []*legacymodels.AlertNotification{createNotChannel(t, "uid1", int64(1), "name\"1"), createNotChannel(t, "uid2", int64(2), "name_1")},
expRecvMap: map[migrationStore.UidOrID]*apimodels.PostableApiReceiver{
"uid1": createPostableApiReceiver("name_1", []string{"name_1"}),
"uid2": createPostableApiReceiver("name_1_dba13d", []string{"name_1_dba13d"}),
int64(1): createPostableApiReceiver("name_1", []string{"name_1"}),
int64(2): createPostableApiReceiver("name_1_dba13d", []string{"name_1_dba13d"}),
},
expRecv: []channelReceiver{
{
channel: createNotChannel(t, "uid1", int64(1), "name\"1"),
receiver: createPostableApiReceiver("name_1", []string{"name_1"}),
},
{
channel: createNotChannel(t, "uid2", int64(2), "name_1"),
receiver: createPostableApiReceiver("name_1_dba13d", []string{"name_1_dba13d"}),
},
},
},
}
sqlStore := db.InitTestDB(t)
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
recvMap, recvs, err := m.createReceivers(tt.allChannels)
if tt.expErr != nil {
require.Error(t, err)
require.EqualError(t, err, tt.expErr.Error())
return
}
require.NoError(t, err)
// We ignore certain fields for the purposes of this test
for _, recv := range recvs {
for _, not := range recv.receiver.GrafanaManagedReceivers {
not.UID = ""
not.Settings = nil
not.SecureSettings = nil
}
}
require.Equal(t, tt.expRecvMap, recvMap)
require.ElementsMatch(t, tt.expRecv, recvs)
})
}
}
func TestMigrateNotificationChannelSecureSettings(t *testing.T) {
legacyEncryptFn := func(data string) string {
raw, err := util.Encrypt([]byte(data), setting.SecretKey)
require.NoError(t, err)
return string(raw)
}
decryptFn := func(data string, m *OrgMigration) string {
decoded, err := base64.StdEncoding.DecodeString(data)
require.NoError(t, err)
raw, err := m.encryptionService.Decrypt(context.Background(), decoded)
require.NoError(t, err)
return string(raw)
}
gen := func(nType string, fn func(channel *legacymodels.AlertNotification)) *legacymodels.AlertNotification {
not := &legacymodels.AlertNotification{
UID: "uid",
ID: 1,
Name: "channel name",
Type: nType,
Settings: simplejson.NewFromAny(map[string]any{
"something": "some value",
}),
SecureSettings: map[string][]byte{},
}
if fn != nil {
fn(not)
}
return not
}
genExpSlack := func(fn func(channel *apimodels.PostableGrafanaReceiver)) *apimodels.PostableGrafanaReceiver {
rawSettings, err := json.Marshal(map[string]string{
"something": "some value",
})
require.NoError(t, err)
recv := &apimodels.PostableGrafanaReceiver{
UID: "uid",
Name: "channel name",
Type: "slack",
Settings: rawSettings,
SecureSettings: map[string]string{
"token": "secure token",
"url": "secure url",
},
}
if fn != nil {
fn(recv)
}
return recv
}
tc := []struct {
name string
channel *legacymodels.AlertNotification
expRecv *apimodels.PostableGrafanaReceiver
expErr error
}{
{
name: "when secure settings exist, migrate them to receiver secure settings",
channel: gen("slack", func(channel *legacymodels.AlertNotification) {
channel.SecureSettings = map[string][]byte{
"token": []byte(legacyEncryptFn("secure token")),
"url": []byte(legacyEncryptFn("secure url")),
}
}),
expRecv: genExpSlack(nil),
},
{
name: "when no secure settings are encrypted, do nothing",
channel: gen("slack", nil),
expRecv: genExpSlack(func(recv *apimodels.PostableGrafanaReceiver) {
delete(recv.SecureSettings, "token")
delete(recv.SecureSettings, "url")
}),
},
{
name: "when some secure settings are available unencrypted in settings, migrate them to secureSettings and encrypt",
channel: gen("slack", func(channel *legacymodels.AlertNotification) {
channel.SecureSettings = map[string][]byte{
"url": []byte(legacyEncryptFn("secure url")),
}
channel.Settings.Set("token", "secure token")
}),
expRecv: genExpSlack(nil),
},
}
sqlStore := db.InitTestDB(t)
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
recv, err := m.createNotifier(tt.channel)
if tt.expErr != nil {
require.Error(t, err)
require.EqualError(t, err, tt.expErr.Error())
return
}
require.NoError(t, err)
if len(tt.expRecv.SecureSettings) > 0 {
require.NotEqual(t, tt.expRecv, recv) // Make sure they were actually encrypted at first.
}
for k, v := range recv.SecureSettings {
recv.SecureSettings[k] = decryptFn(v, m)
}
require.Equal(t, tt.expRecv, recv)
})
}
// Generate tests for each notification channel type.
t.Run("secure settings migrations for each notifier type", func(t *testing.T) {
notifiers := channels_config.GetAvailableNotifiers()
t.Run("migrate notification channel secure settings to receiver secure settings", func(t *testing.T) {
for _, notifier := range notifiers {
nType := notifier.Type
secureSettings, err := channels_config.GetSecretKeysForContactPointType(nType)
require.NoError(t, err)
t.Run(nType, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
channel := gen(nType, func(channel *legacymodels.AlertNotification) {
for _, key := range secureSettings {
channel.SecureSettings[key] = []byte(legacyEncryptFn("secure " + key))
}
})
recv, err := m.createNotifier(channel)
require.NoError(t, err)
require.Equal(t, nType, recv.Type)
if len(secureSettings) > 0 {
for _, key := range secureSettings {
require.NotEqual(t, "secure "+key, recv.SecureSettings[key]) // Make sure they were actually encrypted at first.
}
}
require.Len(t, recv.SecureSettings, len(secureSettings))
for _, key := range secureSettings {
require.Equal(t, "secure "+key, decryptFn(recv.SecureSettings[key], m))
}
})
}
})
t.Run("for certain legacy channel types, migrate secure fields stored in settings to secure settings", func(t *testing.T) {
for _, notifier := range notifiers {
nType := notifier.Type
secureSettings, ok := secureKeysToMigrate[nType]
if !ok {
continue
}
t.Run(nType, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
channel := gen(nType, func(channel *legacymodels.AlertNotification) {
for _, key := range secureSettings {
// Key difference to above. We store the secure settings in the settings field and expect
// them to be migrated to secureSettings.
channel.Settings.Set(key, "secure "+key)
}
})
recv, err := m.createNotifier(channel)
require.NoError(t, err)
require.Equal(t, nType, recv.Type)
if len(secureSettings) > 0 {
for _, key := range secureSettings {
require.NotEqual(t, "secure "+key, recv.SecureSettings[key]) // Make sure they were actually encrypted at first.
}
}
require.Len(t, recv.SecureSettings, len(secureSettings))
for _, key := range secureSettings {
require.Equal(t, "secure "+key, decryptFn(recv.SecureSettings[key], m))
}
})
}
})
})
}
func TestCreateDefaultRouteAndReceiver(t *testing.T) {
tc := []struct {
name string
amConfig *apimodels.PostableUserConfig
defaultChannels []*legacymodels.AlertNotification
expRecv *apimodels.PostableApiReceiver
expRoute *apimodels.Route
expErr error
}{
{
name: "when given multiple default notification channels migrate them to a single receiver",
defaultChannels: []*legacymodels.AlertNotification{createNotChannel(t, "uid1", int64(1), "name1"), createNotChannel(t, "uid2", int64(2), "name2")},
expRecv: createPostableApiReceiver("autogen-contact-point-default", []string{"name1", "name2"}),
expRoute: &apimodels.Route{
Receiver: "autogen-contact-point-default",
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
RepeatInterval: durationPointer(DisabledRepeatInterval),
},
},
{
name: "when given multiple default notification channels migrate them to a single receiver with RepeatInterval set to be the minimum of all channel frequencies",
defaultChannels: []*legacymodels.AlertNotification{
createNotChannelWithReminder(t, "uid1", int64(1), "name1", time.Duration(42)),
createNotChannelWithReminder(t, "uid2", int64(2), "name2", time.Duration(100000)),
},
expRecv: createPostableApiReceiver("autogen-contact-point-default", []string{"name1", "name2"}),
expRoute: &apimodels.Route{
Receiver: "autogen-contact-point-default",
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
RepeatInterval: durationPointer(model.Duration(42)),
},
},
{
name: "when given no default notification channels create a single empty receiver for default",
defaultChannels: []*legacymodels.AlertNotification{},
expRecv: createPostableApiReceiver("autogen-contact-point-default", nil),
expRoute: &apimodels.Route{
Receiver: "autogen-contact-point-default",
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
RepeatInterval: nil,
},
},
{
name: "when given a single default notification channels don't create a new default receiver",
defaultChannels: []*legacymodels.AlertNotification{createNotChannel(t, "uid1", int64(1), "name1")},
expRecv: nil,
expRoute: &apimodels.Route{
Receiver: "name1",
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
RepeatInterval: durationPointer(DisabledRepeatInterval),
},
},
{
name: "when given a single default notification channel with SendReminder=true, use the channels Frequency as the RepeatInterval",
defaultChannels: []*legacymodels.AlertNotification{createNotChannelWithReminder(t, "uid1", int64(1), "name1", time.Duration(42))},
expRecv: nil,
expRoute: &apimodels.Route{
Receiver: "name1",
Routes: make([]*apimodels.Route, 0),
GroupByStr: []string{ngModels.FolderTitleLabel, model.AlertNameLabel},
RepeatInterval: durationPointer(model.Duration(42)),
},
},
}
sqlStore := db.InitTestDB(t)
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
m := service.newOrgMigration(1)
recv, route, err := m.createDefaultRouteAndReceiver(tt.defaultChannels)
if tt.expErr != nil {
require.Error(t, err)
require.EqualError(t, err, tt.expErr.Error())
return
}
require.NoError(t, err)
// We ignore certain fields for the purposes of this test
if recv != nil {
for _, not := range recv.GrafanaManagedReceivers {
not.UID = ""
not.Settings = nil
not.SecureSettings = nil
}
}
require.Equal(t, tt.expRecv, recv)
require.Equal(t, tt.expRoute, route)
})
}
}
func createPostableApiReceiver(name string, integrationNames []string) *apimodels.PostableApiReceiver {
integrations := make([]*apimodels.PostableGrafanaReceiver, 0, len(integrationNames))
for _, integrationName := range integrationNames {
integrations = append(integrations, &apimodels.PostableGrafanaReceiver{Name: integrationName})
}
return &apimodels.PostableApiReceiver{
Receiver: config.Receiver{
Name: name,
},
PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{
GrafanaManagedReceivers: integrations,
},
}
}
func durationPointer(d model.Duration) *model.Duration {
return &d
}

View File

@@ -0,0 +1,359 @@
package migration
import (
"context"
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/services/datasources"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/tsdb/legacydata"
"github.com/grafana/grafana/pkg/tsdb/legacydata/interval"
"github.com/grafana/grafana/pkg/util"
)
// It is defined in pkg/expr/service.go as "DatasourceType"
const expressionDatasourceUID = "__expr__"
//nolint:gocyclo
func transConditions(ctx context.Context, alert *migrationStore.DashAlert, store migrationStore.Store) (*condition, error) {
// TODO: needs a significant refactor to reduce complexity.
usr := getMigrationUser(alert.OrgID)
set := alert.ParsedSettings
refIDtoCondIdx := make(map[string][]int) // a map of original refIds to their corresponding condition index
for i, cond := range set.Conditions {
if len(cond.Query.Params) != 3 {
return nil, fmt.Errorf("unexpected number of query parameters in cond %v, want 3 got %v", i+1, len(cond.Query.Params))
}
refID := cond.Query.Params[0]
refIDtoCondIdx[refID] = append(refIDtoCondIdx[refID], i)
}
newRefIDstoCondIdx := make(map[string][]int) // a map of the new refIds to their coresponding condition index
refIDs := make([]string, 0, len(refIDtoCondIdx)) // a unique sorted list of the original refIDs
for refID := range refIDtoCondIdx {
refIDs = append(refIDs, refID)
}
sort.Strings(refIDs)
newRefIDsToTimeRanges := make(map[string][2]string) // a map of new RefIDs to their time range string tuple representation
for _, refID := range refIDs {
condIdxes := refIDtoCondIdx[refID]
if len(condIdxes) == 1 {
// If the refID is used in only condition, keep the letter a new refID
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[0])
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[0]].Query.Params[1], set.Conditions[condIdxes[0]].Query.Params[2]}
continue
}
// track unique time ranges within the same refID
timeRangesToCondIdx := make(map[[2]string][]int) // a map of the time range tuple to the condition index
for _, idx := range condIdxes {
timeParamFrom := set.Conditions[idx].Query.Params[1]
timeParamTo := set.Conditions[idx].Query.Params[2]
key := [2]string{timeParamFrom, timeParamTo}
timeRangesToCondIdx[key] = append(timeRangesToCondIdx[key], idx)
}
if len(timeRangesToCondIdx) == 1 {
// if all shared time range, no need to create a new query with a new RefID
for i := range condIdxes {
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[i])
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[i]].Query.Params[1], set.Conditions[condIdxes[i]].Query.Params[2]}
}
continue
}
// This referenced query/refID has different time ranges, so new queries are needed for each unique time range.
timeRanges := make([][2]string, 0, len(timeRangesToCondIdx)) // a sorted list of unique time ranges for the query
for tr := range timeRangesToCondIdx {
timeRanges = append(timeRanges, tr)
}
sort.Slice(timeRanges, func(i, j int) bool {
switch {
case timeRanges[i][0] < timeRanges[j][0]:
return true
case timeRanges[i][0] > timeRanges[j][0]:
return false
default:
return timeRanges[i][1] < timeRanges[j][1]
}
})
for _, tr := range timeRanges {
idxes := timeRangesToCondIdx[tr]
for i := 0; i < len(idxes); i++ {
newLetter, err := getNewRefID(newRefIDstoCondIdx)
if err != nil {
return nil, err
}
newRefIDstoCondIdx[newLetter] = append(newRefIDstoCondIdx[newLetter], idxes[i])
newRefIDsToTimeRanges[newLetter] = [2]string{set.Conditions[idxes[i]].Query.Params[1], set.Conditions[idxes[i]].Query.Params[2]}
}
}
}
newRefIDs := make([]string, 0, len(newRefIDstoCondIdx)) // newRefIds is a sorted list of the unique refIds of new queries
for refID := range newRefIDstoCondIdx {
newRefIDs = append(newRefIDs, refID)
}
sort.Strings(newRefIDs)
newCond := &condition{}
condIdxToNewRefID := make(map[int]string) // a map of condition indices to the RefIDs of new queries
// build the new data source queries
for _, refID := range newRefIDs {
condIdxes := newRefIDstoCondIdx[refID]
for i, condIdx := range condIdxes {
condIdxToNewRefID[condIdx] = refID
if i > 0 {
// only create each unique query once
continue
}
var queryObj map[string]any // copy the model
err := json.Unmarshal(set.Conditions[condIdx].Query.Model, &queryObj)
if err != nil {
return nil, err
}
var queryType string
if v, ok := queryObj["queryType"]; ok {
if s, ok := v.(string); ok {
queryType = s
}
}
// Could have an alert saved but datasource deleted, so can not require match.
dsUid := ""
if ds, err := store.GetDatasource(ctx, set.Conditions[condIdx].Query.DatasourceID, usr); err == nil {
dsUid = ds.UID
} else {
if !errors.Is(err, datasources.ErrDataSourceNotFound) {
return nil, err
}
}
queryObj["refId"] = refID
// See services/alerting/conditions/query.go's newQueryCondition
queryObj["maxDataPoints"] = interval.DefaultRes
simpleJson, err := simplejson.NewJson(set.Conditions[condIdx].Query.Model)
if err != nil {
return nil, err
}
rawFrom := newRefIDsToTimeRanges[refID][0]
rawTo := newRefIDsToTimeRanges[refID][1]
calculatedInterval, err := calculateInterval(legacydata.NewDataTimeRange(rawFrom, rawTo), simpleJson, nil)
if err != nil {
return nil, err
}
queryObj["intervalMs"] = calculatedInterval.Milliseconds()
encodedObj, err := json.Marshal(queryObj)
if err != nil {
return nil, err
}
rTR, err := getRelativeDuration(rawFrom, rawTo)
if err != nil {
return nil, err
}
alertQuery := ngmodels.AlertQuery{
RefID: refID,
Model: encodedObj,
RelativeTimeRange: *rTR,
DatasourceUID: dsUid,
QueryType: queryType,
}
newCond.Data = append(newCond.Data, alertQuery)
}
}
// build the new classic condition pointing our new equivalent queries
conditions := make([]classicConditionJSON, len(set.Conditions))
for i, cond := range set.Conditions {
newCond := classicConditionJSON{}
newCond.Evaluator = migrationStore.ConditionEvalJSON{
Type: cond.Evaluator.Type,
Params: cond.Evaluator.Params,
}
newCond.Operator.Type = cond.Operator.Type
newCond.Query.Params = append(newCond.Query.Params, condIdxToNewRefID[i])
newCond.Reducer.Type = cond.Reducer.Type
conditions[i] = newCond
}
ccRefID, err := getNewRefID(newRefIDstoCondIdx) // get refID for the classic condition
if err != nil {
return nil, err
}
newCond.Condition = ccRefID // set the alert condition to point to the classic condition
newCond.OrgID = alert.OrgID
exprModel := struct {
Type string `json:"type"`
RefID string `json:"refId"`
Conditions []classicConditionJSON `json:"conditions"`
}{
"classic_conditions",
ccRefID,
conditions,
}
exprModelJSON, err := json.Marshal(&exprModel)
if err != nil {
return nil, err
}
ccAlertQuery := ngmodels.AlertQuery{
RefID: ccRefID,
Model: exprModelJSON,
DatasourceUID: expressionDatasourceUID,
}
newCond.Data = append(newCond.Data, ccAlertQuery)
sort.Slice(newCond.Data, func(i, j int) bool {
return newCond.Data[i].RefID < newCond.Data[j].RefID
})
return newCond, nil
}
type condition struct {
// Condition is the RefID of the query or expression from
// the Data property to get the results for.
Condition string `json:"condition"`
OrgID int64 `json:"-"`
// Data is an array of data source queries and/or server side expressions.
Data []ngmodels.AlertQuery `json:"data"`
}
const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
// getNewRefID finds first capital letter in the alphabet not in use
// to use for a new RefID. It errors if it runs out of letters.
func getNewRefID(refIDs map[string][]int) (string, error) {
for _, r := range alpha {
sR := string(r)
if _, ok := refIDs[sR]; ok {
continue
}
return sR, nil
}
for i := 0; i < 20; i++ {
sR := util.GenerateShortUID()
if _, ok := refIDs[sR]; ok {
continue
}
return sR, nil
}
return "", fmt.Errorf("failed to generate unique RefID")
}
// getRelativeDuration turns the alerting durations for dashboard conditions
// into a relative time range.
func getRelativeDuration(rawFrom, rawTo string) (*ngmodels.RelativeTimeRange, error) {
fromD, err := getFrom(rawFrom)
if err != nil {
return nil, err
}
toD, err := getTo(rawTo)
if err != nil {
return nil, err
}
return &ngmodels.RelativeTimeRange{
From: ngmodels.Duration(fromD),
To: ngmodels.Duration(toD),
}, nil
}
func getFrom(from string) (time.Duration, error) {
fromRaw := strings.Replace(from, "now-", "", 1)
d, err := time.ParseDuration("-" + fromRaw)
if err != nil {
return 0, err
}
return -d, err
}
func getTo(to string) (time.Duration, error) {
if to == "now" {
return 0, nil
} else if strings.HasPrefix(to, "now-") {
withoutNow := strings.Replace(to, "now-", "", 1)
d, err := time.ParseDuration("-" + withoutNow)
if err != nil {
return 0, err
}
return -d, nil
}
d, err := time.ParseDuration(to)
if err != nil {
return 0, err
}
return -d, nil
}
type classicConditionJSON struct {
Evaluator migrationStore.ConditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
} `json:"operator"`
Query struct {
Params []string `json:"params"`
} `json:"query"`
Reducer struct {
// Params []any `json:"params"` (Unused)
Type string `json:"type"`
} `json:"reducer"`
}
// Copied from services/alerting/conditions/query.go's calculateInterval
func calculateInterval(timeRange legacydata.DataTimeRange, model *simplejson.Json, dsInfo *datasources.DataSource) (time.Duration, error) {
// if there is no min-interval specified in the datasource or in the dashboard-panel,
// the value of 1ms is used (this is how it is done in the dashboard-interval-calculation too,
// see https://github.com/grafana/grafana/blob/9a0040c0aeaae8357c650cec2ee644a571dddf3d/packages/grafana-data/src/datetime/rangeutil.ts#L264)
defaultMinInterval := time.Millisecond * 1
// interval.GetIntervalFrom has two problems (but they do not affect us here):
// - it returns the min-interval, so it should be called interval.GetMinIntervalFrom
// - it falls back to model.intervalMs. it should not, because that one is the real final
// interval-value calculated by the browser. but, in this specific case (old-alert),
// that value is not set, so the fallback never happens.
minInterval, err := interval.GetIntervalFrom(dsInfo, model, defaultMinInterval)
if err != nil {
return time.Duration(0), err
}
calc := interval.NewCalculator()
intvl := calc.Calculate(timeRange, minInterval)
return intvl.Value, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
package migration
import (
"strings"
pb "github.com/prometheus/alertmanager/silence/silencepb"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/folder"
migmodels "github.com/grafana/grafana/pkg/services/ngalert/migration/models"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/secrets"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
// OrgMigration is a helper struct for migrating alerts for a single org. It contains state, services, and caches.
type OrgMigration struct {
cfg *setting.Cfg
log log.Logger
migrationStore migrationStore.Store
encryptionService secrets.Service
orgID int64
seenUIDs Deduplicator
silences []*pb.MeshSilence
alertRuleTitleDedup map[string]Deduplicator // Folder -> Deduplicator (Title).
// cache for folders created for dashboards that have custom permissions
folderCache map[string]*folder.Folder
generalAlertingFolder *folder.Folder
state *migmodels.OrgMigrationState
}
// newOrgMigration creates a new OrgMigration for the given orgID.
func (ms *MigrationService) newOrgMigration(orgID int64) *OrgMigration {
return &OrgMigration{
cfg: ms.cfg,
log: ms.log.New("orgID", orgID),
migrationStore: ms.migrationStore,
encryptionService: ms.encryptionService,
orgID: orgID,
// We deduplicate for case-insensitive matching in MySQL-compatible backend flavours because they use case-insensitive collation.
seenUIDs: Deduplicator{set: make(map[string]struct{}), caseInsensitive: ms.migrationStore.CaseInsensitive()},
silences: make([]*pb.MeshSilence, 0),
alertRuleTitleDedup: make(map[string]Deduplicator),
folderCache: make(map[string]*folder.Folder),
state: &migmodels.OrgMigrationState{
OrgID: orgID,
CreatedFolders: make([]string, 0),
},
}
}
func (om *OrgMigration) AlertTitleDeduplicator(folderUID string) Deduplicator {
if _, ok := om.alertRuleTitleDedup[folderUID]; !ok {
om.alertRuleTitleDedup[folderUID] = Deduplicator{
set: make(map[string]struct{}),
caseInsensitive: om.migrationStore.CaseInsensitive(),
maxLen: store.AlertDefinitionMaxTitleLength,
}
}
return om.alertRuleTitleDedup[folderUID]
}
type AlertPair struct {
AlertRule *models.AlertRule
DashAlert *migrationStore.DashAlert
}
// Deduplicator is a wrapper around map[string]struct{} and util.GenerateShortUID() which aims help maintain and generate
// unique strings (such as uids or titles). if caseInsensitive is true, all uniqueness is determined in a
// case-insensitive manner. if maxLen is greater than 0, all strings will be truncated to maxLen before being checked in
// contains and dedup will always return a string of length maxLen or less.
type Deduplicator struct {
set map[string]struct{}
caseInsensitive bool
maxLen int
}
// contains checks whether the given string has already been seen by this Deduplicator.
func (s *Deduplicator) contains(u string) bool {
dedup := u
if s.caseInsensitive {
dedup = strings.ToLower(dedup)
}
if s.maxLen > 0 && len(dedup) > s.maxLen {
dedup = dedup[:s.maxLen]
}
_, seen := s.set[dedup]
return seen
}
// deduplicate returns a unique string based on the given string by appending a uuid to it. Will truncate the given string if
// the resulting string would be longer than maxLen.
func (s *Deduplicator) deduplicate(dedup string) string {
uid := util.GenerateShortUID()
if s.maxLen > 0 && len(dedup)+1+len(uid) > s.maxLen {
trunc := s.maxLen - 1 - len(uid)
dedup = dedup[:trunc]
}
return dedup + "_" + uid
}
// add adds the given string to the Deduplicator.
func (s *Deduplicator) add(uid string) {
dedup := uid
if s.caseInsensitive {
dedup = strings.ToLower(dedup)
}
s.set[dedup] = struct{}{}
}

View File

@@ -0,0 +1,7 @@
package models
// OrgMigrationState contains information about the state of an org migration.
type OrgMigrationState struct {
OrgID int64 `json:"orgId"`
CreatedFolders []string `json:"createdFolders"`
}

View File

@@ -0,0 +1,153 @@
package migration
import (
"context"
"errors"
"fmt"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/auth/identity"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/datasources"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/user"
)
const DASHBOARD_FOLDER = "%s Alerts - %s"
// MaxFolderName is the maximum length of the folder name generated using DASHBOARD_FOLDER format
const MaxFolderName = 255
var (
migratorPermissions = []accesscontrol.Permission{
{Action: dashboards.ActionFoldersRead, Scope: dashboards.ScopeFoldersAll},
{Action: dashboards.ActionDashboardsRead, Scope: dashboards.ScopeDashboardsAll},
{Action: dashboards.ActionFoldersPermissionsRead, Scope: dashboards.ScopeFoldersAll},
{Action: dashboards.ActionDashboardsPermissionsRead, Scope: dashboards.ScopeDashboardsAll},
{Action: dashboards.ActionFoldersCreate},
{Action: dashboards.ActionDashboardsCreate, Scope: dashboards.ScopeFoldersAll},
{Action: datasources.ActionRead, Scope: datasources.ScopeAll},
}
generalAlertingFolderTitle = "General Alerting"
)
// getMigrationUser returns a background user for the given orgID with permissions to execute migration-related tasks.
func getMigrationUser(orgID int64) identity.Requester {
return accesscontrol.BackgroundUser("ngalert_migration", orgID, org.RoleAdmin, migratorPermissions)
}
// getAlertFolderNameFromDashboard generates a folder name for alerts that belong to a dashboard. Formats the string according to DASHBOARD_FOLDER format.
// If the resulting string exceeds the migrations.MaxTitleLength, the dashboard title is stripped to be at the maximum length
func getAlertFolderNameFromDashboard(dash *dashboards.Dashboard) string {
maxLen := MaxFolderName - len(fmt.Sprintf(DASHBOARD_FOLDER, "", dash.UID))
title := dash.Title
if len(title) > maxLen {
title = title[:maxLen]
}
return fmt.Sprintf(DASHBOARD_FOLDER, title, dash.UID) // include UID to the name to avoid collision
}
func (om *OrgMigration) getOrCreateMigratedFolder(ctx context.Context, log log.Logger, dashID int64) (*dashboards.Dashboard, *folder.Folder, error) {
dash, err := om.migrationStore.GetDashboard(ctx, om.orgID, dashID)
if err != nil {
if errors.Is(err, dashboards.ErrFolderNotFound) {
return nil, nil, fmt.Errorf("dashboard with ID %v under organisation %d not found: %w", dashID, om.orgID, err)
}
return nil, nil, fmt.Errorf("failed to get dashboard with ID %v under organisation %d: %w", dashID, om.orgID, err)
}
l := log.New(
"dashboardTitle", dash.Title,
"dashboardUID", dash.UID,
)
var migratedFolder *folder.Folder
switch {
case dash.HasACL:
folderName := getAlertFolderNameFromDashboard(dash)
f, ok := om.folderCache[folderName]
if !ok {
l.Info("create a new folder for alerts that belongs to dashboard because it has custom permissions", "folder", folderName)
// create folder and assign the permissions of the dashboard (included default and inherited)
f, err = om.createFolder(ctx, om.orgID, folderName)
if err != nil {
return nil, nil, fmt.Errorf("create new folder: %w", err)
}
permissions, err := om.migrationStore.GetACL(ctx, dash.OrgID, dash.ID)
if err != nil {
return nil, nil, fmt.Errorf("failed to get dashboard %d under organisation %d permissions: %w", dash.ID, dash.OrgID, err)
}
err = om.migrationStore.SetACL(ctx, f.OrgID, f.ID, permissions)
if err != nil {
return nil, nil, fmt.Errorf("failed to set folder %d under organisation %d permissions: %w", f.ID, f.OrgID, err)
}
om.folderCache[folderName] = f
}
migratedFolder = f
case dash.FolderID > 0:
// get folder if exists
f, err := om.migrationStore.GetFolder(ctx, &folder.GetFolderQuery{ID: &dash.FolderID, OrgID: dash.OrgID, SignedInUser: getMigrationUser(dash.OrgID)})
if err != nil {
// If folder does not exist then the dashboard is an orphan and we migrate the alert to the general folder.
l.Warn("Failed to find folder for dashboard. Migrate rule to the default folder", "missing_folder_id", dash.FolderID, "error", err)
migratedFolder, err = om.getOrCreateGeneralFolder(ctx, dash.OrgID)
if err != nil {
return nil, nil, err
}
} else {
migratedFolder = f
}
default:
migratedFolder, err = om.getOrCreateGeneralFolder(ctx, dash.OrgID)
if err != nil {
return nil, nil, err
}
}
if migratedFolder.UID == "" {
return nil, nil, fmt.Errorf("empty folder identifier")
}
return dash, migratedFolder, nil
}
// getOrCreateGeneralFolder returns the general folder under the specific organisation
// If the general folder does not exist it creates it.
func (om *OrgMigration) getOrCreateGeneralFolder(ctx context.Context, orgID int64) (*folder.Folder, error) {
if om.generalAlertingFolder != nil {
return om.generalAlertingFolder, nil
}
f, err := om.migrationStore.GetFolder(ctx, &folder.GetFolderQuery{OrgID: orgID, Title: &generalAlertingFolderTitle, SignedInUser: getMigrationUser(orgID)})
if err != nil {
if errors.Is(err, dashboards.ErrFolderNotFound) {
// create folder
generalAlertingFolder, err := om.createFolder(ctx, orgID, generalAlertingFolderTitle)
if err != nil {
return nil, fmt.Errorf("create general alerting folder '%s': %w", generalAlertingFolderTitle, err)
}
om.generalAlertingFolder = generalAlertingFolder
return om.generalAlertingFolder, nil
}
return nil, fmt.Errorf("get general alerting folder '%s': %w", generalAlertingFolderTitle, err)
}
om.generalAlertingFolder = f
return om.generalAlertingFolder, nil
}
// createFolder creates a new folder with given permissions.
func (om *OrgMigration) createFolder(ctx context.Context, orgID int64, title string) (*folder.Folder, error) {
f, err := om.migrationStore.CreateFolder(ctx, &folder.CreateFolderCommand{
OrgID: orgID,
Title: title,
SignedInUser: getMigrationUser(orgID).(*user.SignedInUser),
})
if err != nil {
return nil, err
}
om.state.CreatedFolders = append(om.state.CreatedFolders, f.UID)
return f, nil
}

View File

@@ -0,0 +1,31 @@
package migration
import (
"os"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
// SecureJsonData is used to store encrypted data (for example in data_source table). Only values are separately
// encrypted.
type SecureJsonData map[string][]byte
var seclogger = log.New("securejsondata")
// Decrypt returns map of the same type but where the all the values are decrypted. Opposite of what
// GetEncryptedJsonData is doing.
func (s SecureJsonData) Decrypt() map[string]string {
decrypted := make(map[string]string)
for key, data := range s {
decryptedData, err := util.Decrypt(data, setting.SecretKey)
if err != nil {
seclogger.Error(err.Error())
os.Exit(1)
}
decrypted[key] = string(decryptedData)
}
return decrypted
}

View File

@@ -0,0 +1,137 @@
package migration
import (
"context"
"fmt"
"time"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/serverlock"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
"github.com/grafana/grafana/pkg/services/secrets"
"github.com/grafana/grafana/pkg/setting"
)
// actionName is the unique row-level lock name for serverlock.ServerLockService.
const actionName = "alerting migration"
//nolint:stylecheck
var ForceMigrationError = fmt.Errorf("Grafana has already been migrated to Unified Alerting. Any alert rules created while using Unified Alerting will be deleted by rolling back. Set force_migration=true in your grafana.ini and restart Grafana to roll back and delete Unified Alerting configuration data.")
type MigrationService struct {
lock *serverlock.ServerLockService
cfg *setting.Cfg
log log.Logger
store db.DB
migrationStore migrationStore.Store
encryptionService secrets.Service
}
func ProvideService(
lock *serverlock.ServerLockService,
cfg *setting.Cfg,
store db.DB,
migrationStore migrationStore.Store,
encryptionService secrets.Service,
) (*MigrationService, error) {
return &MigrationService{
lock: lock,
log: log.New("ngalert.migration"),
cfg: cfg,
store: store,
migrationStore: migrationStore,
encryptionService: encryptionService,
}, nil
}
// Run starts the migration. This will either migrate from legacy alerting to unified alerting or revert the migration.
// If the migration status in the kvstore is not set and unified alerting is enabled, the migration will be executed.
// If the migration status in the kvstore is set and both unified alerting is disabled and ForceMigration is set to true, the migration will be reverted.
func (ms *MigrationService) Run(ctx context.Context) error {
var errMigration error
errLock := ms.lock.LockExecuteAndRelease(ctx, actionName, time.Minute*10, func(context.Context) {
ms.log.Info("Starting")
errMigration = ms.store.InTransaction(ctx, func(ctx context.Context) error {
migrated, err := ms.migrationStore.IsMigrated(ctx)
if err != nil {
return fmt.Errorf("getting migration status: %w", err)
}
if migrated == ms.cfg.UnifiedAlerting.IsEnabled() {
// Nothing to do.
ms.log.Info("No migrations to run")
return nil
}
if migrated {
// If legacy alerting is also disabled, there is nothing to do
if setting.AlertingEnabled != nil && !*setting.AlertingEnabled {
return nil
}
// Safeguard to prevent data loss when reverting from UA to LA.
if !ms.cfg.ForceMigration {
return ForceMigrationError
}
// Revert migration
ms.log.Info("Reverting legacy migration")
err := ms.migrationStore.RevertAllOrgs(ctx)
if err != nil {
return fmt.Errorf("reverting migration: %w", err)
}
ms.log.Info("Legacy migration reverted")
return nil
}
ms.log.Info("Starting legacy migration")
err = ms.migrateAllOrgs(ctx)
if err != nil {
return fmt.Errorf("executing migration: %w", err)
}
err = ms.migrationStore.SetMigrated(ctx, true)
if err != nil {
return fmt.Errorf("setting migration status: %w", err)
}
ms.log.Info("Completed legacy migration")
return nil
})
})
if errLock != nil {
ms.log.Warn("Server lock for alerting migration already exists")
return nil
}
if errMigration != nil {
return fmt.Errorf("migration failed: %w", errMigration)
}
return nil
}
// IsDisabled returns true if the cfg is nil.
func (ms *MigrationService) IsDisabled() bool {
return ms.cfg == nil
}
// migrateAllOrgs executes the migration for all orgs.
func (ms *MigrationService) migrateAllOrgs(ctx context.Context) error {
orgs, err := ms.migrationStore.GetAllOrgs(ctx)
if err != nil {
return fmt.Errorf("get orgs: %w", err)
}
for _, o := range orgs {
om := ms.newOrgMigration(o.ID)
if err := om.migrateOrg(ctx); err != nil {
return fmt.Errorf("migrate org %d: %w", o.ID, err)
}
err = om.migrationStore.SetOrgMigrationState(ctx, o.ID, om.state)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,190 @@
package migration
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/db"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/setting"
)
// TestServiceRevert tests migration revert.
func TestServiceRevert(t *testing.T) {
alerts := []*legacymodels.Alert{
createAlert(t, 1, 1, 1, "alert1", []string{"notifier1"}),
}
channels := []*legacymodels.AlertNotification{
createAlertNotification(t, int64(1), "notifier1", "email", emailSettings, false),
}
dashes := []*dashboards.Dashboard{
createDashboard(t, 1, 1, "dash1-1", 5, nil),
createDashboard(t, 2, 1, "dash2-1", 5, nil),
createDashboard(t, 8, 1, "dash-in-general-1", 0, nil),
}
folders := []*dashboards.Dashboard{
createFolder(t, 5, 1, "folder5-1"),
}
t.Run("revert deletes UA resources", func(t *testing.T) {
sqlStore := db.InitTestDB(t)
x := sqlStore.GetEngine()
setupLegacyAlertsTables(t, x, channels, alerts, folders, dashes)
dashCount, err := x.Table("dashboard").Count(&dashboards.Dashboard{})
require.NoError(t, err)
require.Equal(t, int64(4), dashCount)
// Run migration.
ctx := context.Background()
cfg := &setting.Cfg{
ForceMigration: true,
UnifiedAlerting: setting.UnifiedAlertingSettings{
Enabled: pointer(true),
},
}
service := NewTestMigrationService(t, sqlStore, cfg)
err = service.migrationStore.SetMigrated(ctx, false)
require.NoError(t, err)
err = service.Run(ctx)
require.NoError(t, err)
// Verify migration was run.
migrated, err := service.migrationStore.IsMigrated(ctx)
require.NoError(t, err)
require.Equal(t, true, migrated)
// Currently, we fill in some random data for tables that aren't populated during migration.
_, err = x.Table("ngalert_configuration").Insert(models.AdminConfiguration{})
require.NoError(t, err)
_, err = x.Table("alert_instance").Insert(models.AlertInstance{
AlertInstanceKey: models.AlertInstanceKey{
RuleOrgID: 1,
RuleUID: "alert1",
LabelsHash: "",
},
CurrentState: models.InstanceStateNormal,
CurrentStateSince: time.Now(),
CurrentStateEnd: time.Now(),
LastEvalTime: time.Now(),
})
require.NoError(t, err)
// Verify various UA resources exist
tables := []string{
"alert_rule",
"alert_rule_version",
"alert_configuration",
"ngalert_configuration",
"alert_instance",
}
for _, table := range tables {
count, err := x.Table(table).Count()
require.NoError(t, err)
require.True(t, count > 0, "table %s should have at least one row", table)
}
// Revert migration.
service.cfg.UnifiedAlerting.Enabled = pointer(false)
err = service.Run(context.Background())
require.NoError(t, err)
// Verify revert was run.
migrated, err = service.migrationStore.IsMigrated(ctx)
require.NoError(t, err)
require.Equal(t, false, migrated)
// Verify various UA resources are gone
for _, table := range tables {
count, err := x.Table(table).Count()
require.NoError(t, err)
require.Equal(t, int64(0), count, "table %s should have no rows", table)
}
})
t.Run("revert deletes folders created during migration", func(t *testing.T) {
sqlStore := db.InitTestDB(t)
x := sqlStore.GetEngine()
alerts = []*legacymodels.Alert{
createAlert(t, 1, 8, 1, "alert1", []string{"notifier1"}),
}
setupLegacyAlertsTables(t, x, channels, alerts, folders, dashes)
dashCount, err := x.Table("dashboard").Count(&dashboards.Dashboard{})
require.NoError(t, err)
require.Equal(t, int64(4), dashCount)
// Run migration.
ctx := context.Background()
cfg := &setting.Cfg{
ForceMigration: true,
UnifiedAlerting: setting.UnifiedAlertingSettings{
Enabled: pointer(true),
},
}
service := NewTestMigrationService(t, sqlStore, cfg)
err = service.migrationStore.SetMigrated(ctx, false)
require.NoError(t, err)
err = service.Run(ctx)
require.NoError(t, err)
// Verify migration was run.
migrated, err := service.migrationStore.IsMigrated(ctx)
require.NoError(t, err)
require.Equal(t, true, migrated)
// Verify we created some folders.
newDashCount, err := x.Table("dashboard").Count(&dashboards.Dashboard{})
require.NoError(t, err)
require.Truef(t, newDashCount > dashCount, "newDashCount: %d should be greater than dashCount: %d", newDashCount, dashCount)
// Check that dashboards and folders from before migration still exist.
require.NotNil(t, getDashboard(t, x, 1, "dash1-1"))
require.NotNil(t, getDashboard(t, x, 1, "dash2-1"))
require.NotNil(t, getDashboard(t, x, 1, "dash-in-general-1"))
state, err := service.migrationStore.GetOrgMigrationState(ctx, 1)
require.NoError(t, err)
// Verify list of created folders.
require.NotEmpty(t, state.CreatedFolders)
for _, uid := range state.CreatedFolders {
require.NotNil(t, getDashboard(t, x, 1, uid))
}
// Revert migration.
service.cfg.UnifiedAlerting.Enabled = pointer(false)
err = service.Run(context.Background())
require.NoError(t, err)
// Verify revert was run.
migrated, err = service.migrationStore.IsMigrated(ctx)
require.NoError(t, err)
require.Equal(t, false, migrated)
// Verify we are back to the original count.
newDashCount, err = x.Table("dashboard").Count(&dashboards.Dashboard{})
require.NoError(t, err)
require.Equalf(t, dashCount, newDashCount, "newDashCount: %d should be equal to dashCount: %d after revert", newDashCount, dashCount)
// Check that dashboards and folders from before migration still exist.
require.NotNil(t, getDashboard(t, x, 1, "dash1-1"))
require.NotNil(t, getDashboard(t, x, 1, "dash2-1"))
require.NotNil(t, getDashboard(t, x, 1, "dash-in-general-1"))
// Check that folders created during migration are gone.
for _, uid := range state.CreatedFolders {
require.Nil(t, getDashboard(t, x, 1, uid))
}
})
}

View File

@@ -0,0 +1,157 @@
package migration
import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"time"
"github.com/google/uuid"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
pb "github.com/prometheus/alertmanager/silence/silencepb"
"github.com/prometheus/common/model"
"github.com/grafana/grafana/pkg/services/ngalert/models"
)
const (
// Should be the same as 'NoDataAlertName' in pkg/services/schedule/compat.go.
NoDataAlertName = "DatasourceNoData"
ErrorAlertName = "DatasourceError"
)
func (om *OrgMigration) addErrorSilence(rule *models.AlertRule) error {
uid, err := uuid.NewRandom()
if err != nil {
return errors.New("create uuid for silence")
}
s := &pb.MeshSilence{
Silence: &pb.Silence{
Id: uid.String(),
Matchers: []*pb.Matcher{
{
Type: pb.Matcher_EQUAL,
Name: model.AlertNameLabel,
Pattern: ErrorAlertName,
},
{
Type: pb.Matcher_EQUAL,
Name: "rule_uid",
Pattern: rule.UID,
},
},
StartsAt: time.Now(),
EndsAt: time.Now().AddDate(1, 0, 0), // 1 year
CreatedBy: "Grafana Migration",
Comment: fmt.Sprintf("Created during migration to unified alerting to silence Error state for alert rule ID '%s' and Title '%s' because the option 'Keep Last State' was selected for Error state", rule.UID, rule.Title),
},
ExpiresAt: time.Now().AddDate(1, 0, 0), // 1 year
}
om.silences = append(om.silences, s)
return nil
}
func (om *OrgMigration) addNoDataSilence(rule *models.AlertRule) error {
uid, err := uuid.NewRandom()
if err != nil {
return errors.New("create uuid for silence")
}
s := &pb.MeshSilence{
Silence: &pb.Silence{
Id: uid.String(),
Matchers: []*pb.Matcher{
{
Type: pb.Matcher_EQUAL,
Name: model.AlertNameLabel,
Pattern: NoDataAlertName,
},
{
Type: pb.Matcher_EQUAL,
Name: "rule_uid",
Pattern: rule.UID,
},
},
StartsAt: time.Now(),
EndsAt: time.Now().AddDate(1, 0, 0), // 1 year.
CreatedBy: "Grafana Migration",
Comment: fmt.Sprintf("Created during migration to unified alerting to silence NoData state for alert rule ID '%s' and Title '%s' because the option 'Keep Last State' was selected for NoData state", rule.UID, rule.Title),
},
ExpiresAt: time.Now().AddDate(1, 0, 0), // 1 year.
}
om.silences = append(om.silences, s)
return nil
}
func (om *OrgMigration) writeSilencesFile() error {
var buf bytes.Buffer
om.log.Debug("Writing silences file", "silences", len(om.silences))
for _, e := range om.silences {
if _, err := pbutil.WriteDelimited(&buf, e); err != nil {
return err
}
}
f, err := openReplace(silencesFileNameForOrg(om.cfg.DataPath, om.orgID))
if err != nil {
return err
}
if _, err := io.Copy(f, bytes.NewReader(buf.Bytes())); err != nil {
return err
}
return f.Close()
}
func silencesFileNameForOrg(dataPath string, orgID int64) string {
return filepath.Join(dataPath, "alerting", strconv.Itoa(int(orgID)), "silences")
}
// replaceFile wraps a file that is moved to another filename on closing.
type replaceFile struct {
*os.File
filename string
}
func (f *replaceFile) Close() error {
if err := f.File.Sync(); err != nil {
return err
}
if err := f.File.Close(); err != nil {
return err
}
return os.Rename(f.File.Name(), f.filename)
}
// openReplace opens a new temporary file that is moved to filename on closing.
func openReplace(filename string) (*replaceFile, error) {
tmpFilename := fmt.Sprintf("%s.%x", filename, uint64(rand.Int63()))
if err := os.MkdirAll(filepath.Dir(tmpFilename), os.ModePerm); err != nil {
return nil, err
}
//nolint:gosec
f, err := os.Create(tmpFilename)
if err != nil {
return nil, err
}
rf := &replaceFile{
File: f,
filename: filename,
}
return rf, nil
}
func getLabelForSilenceMatching(ruleUID string) (string, string) {
return "rule_uid", ruleUID
}

View File

@@ -0,0 +1,511 @@
package store
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/kvstore"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/accesscontrol"
legacyalerting "github.com/grafana/grafana/pkg/services/alerting"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
"github.com/grafana/grafana/pkg/services/auth/identity"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/datasources"
"github.com/grafana/grafana/pkg/services/folder"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
migmodels "github.com/grafana/grafana/pkg/services/ngalert/migration/models"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/notifier"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
// Store is the database abstraction for migration persistence.
type Store interface {
InsertAlertRules(ctx context.Context, rules ...models.AlertRule) error
SaveAlertmanagerConfiguration(ctx context.Context, orgID int64, amConfig *apimodels.PostableUserConfig) error
GetAllOrgs(ctx context.Context) ([]*org.OrgDTO, error)
GetDatasource(ctx context.Context, datasourceID int64, user identity.Requester) (*datasources.DataSource, error)
GetNotificationChannels(ctx context.Context, orgID int64) ([]*legacymodels.AlertNotification, error)
GetOrgDashboardAlerts(ctx context.Context, orgID int64) (map[int64][]*DashAlert, int, error)
GetACL(ctx context.Context, orgID int64, dashID int64) ([]*DashboardACL, error)
SetACL(ctx context.Context, orgID int64, dashboardID int64, items []*DashboardACL) error
GetDashboard(ctx context.Context, orgID int64, id int64) (*dashboards.Dashboard, error)
GetFolder(ctx context.Context, cmd *folder.GetFolderQuery) (*folder.Folder, error)
CreateFolder(ctx context.Context, cmd *folder.CreateFolderCommand) (*folder.Folder, error)
IsMigrated(ctx context.Context) (bool, error)
SetMigrated(ctx context.Context, migrated bool) error
GetOrgMigrationState(ctx context.Context, orgID int64) (*migmodels.OrgMigrationState, error)
SetOrgMigrationState(ctx context.Context, orgID int64, summary *migmodels.OrgMigrationState) error
RevertAllOrgs(ctx context.Context) error
CaseInsensitive() bool
}
type migrationStore struct {
store db.DB
cfg *setting.Cfg
log log.Logger
kv kvstore.KVStore
alertingStore *store.DBstore
dashboardService dashboards.DashboardService
folderService folder.Service
dataSourceCache datasources.CacheService
orgService org.Service
legacyAlertNotificationService *legacyalerting.AlertNotificationService
}
// MigrationStore implements the Store interface.
var _ Store = (*migrationStore)(nil)
func ProvideMigrationStore(
cfg *setting.Cfg,
sqlStore db.DB,
kv kvstore.KVStore,
alertingStore *store.DBstore,
dashboardService dashboards.DashboardService,
folderService folder.Service,
dataSourceCache datasources.CacheService,
orgService org.Service,
legacyAlertNotificationService *legacyalerting.AlertNotificationService,
) (Store, error) {
return &migrationStore{
log: log.New("ngalert.migration-store"),
cfg: cfg,
store: sqlStore,
kv: kv,
alertingStore: alertingStore,
dashboardService: dashboardService,
folderService: folderService,
dataSourceCache: dataSourceCache,
orgService: orgService,
legacyAlertNotificationService: legacyAlertNotificationService,
}, nil
}
// KVNamespace is the kvstore namespace used for the migration status.
const KVNamespace = "ngalert.migration"
// migratedKey is the kvstore key used for the migration status.
const migratedKey = "migrated"
// stateKey is the kvstore key used for the OrgMigrationState.
const stateKey = "stateKey"
const anyOrg = 0
// IsMigrated returns the migration status from the kvstore.
func (ms *migrationStore) IsMigrated(ctx context.Context) (bool, error) {
kv := kvstore.WithNamespace(ms.kv, anyOrg, KVNamespace)
content, exists, err := kv.Get(ctx, migratedKey)
if err != nil {
return false, err
}
if !exists {
return false, nil
}
return strconv.ParseBool(content)
}
// SetMigrated sets the migration status in the kvstore.
func (ms *migrationStore) SetMigrated(ctx context.Context, migrated bool) error {
kv := kvstore.WithNamespace(ms.kv, anyOrg, KVNamespace)
return kv.Set(ctx, migratedKey, strconv.FormatBool(migrated))
}
// GetOrgMigrationState returns a summary of a previous migration.
func (ms *migrationStore) GetOrgMigrationState(ctx context.Context, orgID int64) (*migmodels.OrgMigrationState, error) {
kv := kvstore.WithNamespace(ms.kv, orgID, KVNamespace)
content, exists, err := kv.Get(ctx, stateKey)
if err != nil {
return nil, err
}
if !exists {
return &migmodels.OrgMigrationState{OrgID: orgID}, nil
}
var summary migmodels.OrgMigrationState
err = json.Unmarshal([]byte(content), &summary)
if err != nil {
return nil, err
}
return &summary, nil
}
// SetOrgMigrationState sets the summary of a previous migration.
func (ms *migrationStore) SetOrgMigrationState(ctx context.Context, orgID int64, summary *migmodels.OrgMigrationState) error {
kv := kvstore.WithNamespace(ms.kv, orgID, KVNamespace)
raw, err := json.Marshal(summary)
if err != nil {
return err
}
return kv.Set(ctx, stateKey, string(raw))
}
func (ms *migrationStore) InsertAlertRules(ctx context.Context, rules ...models.AlertRule) error {
if ms.store.GetDialect().DriverName() == migrator.Postgres {
// Postgresql which will automatically rollback the whole transaction on constraint violation.
// So, for postgresql, insertions will execute in a subtransaction.
err := ms.store.InTransaction(ctx, func(subCtx context.Context) error {
_, err := ms.alertingStore.InsertAlertRules(subCtx, rules)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
} else {
_, err := ms.alertingStore.InsertAlertRules(ctx, rules)
if err != nil {
return err
}
}
return nil
}
func (ms *migrationStore) SaveAlertmanagerConfiguration(ctx context.Context, orgID int64, amConfig *apimodels.PostableUserConfig) error {
rawAmConfig, err := json.Marshal(amConfig)
if err != nil {
return err
}
cmd := models.SaveAlertmanagerConfigurationCmd{
AlertmanagerConfiguration: string(rawAmConfig),
ConfigurationVersion: fmt.Sprintf("v%d", models.AlertConfigurationVersion),
Default: false,
OrgID: orgID,
LastApplied: 0,
}
return ms.alertingStore.SaveAlertmanagerConfiguration(ctx, &cmd)
}
// revertPermissions are the permissions required for the background user to revert the migration.
var revertPermissions = []accesscontrol.Permission{
{Action: dashboards.ActionFoldersDelete, Scope: dashboards.ScopeFoldersAll},
{Action: dashboards.ActionFoldersRead, Scope: dashboards.ScopeFoldersAll},
}
// RevertAllOrgs reverts the migration, deleting all unified alerting resources such as alert rules, alertmanager configurations, and silence files.
// In addition, it will delete all folders and permissions originally created by this migration, these are stored in the kvstore.
func (ms *migrationStore) RevertAllOrgs(ctx context.Context) error {
return ms.store.WithTransactionalDbSession(ctx, func(sess *db.Session) error {
if _, err := sess.Exec("DELETE FROM alert_rule"); err != nil {
return err
}
if _, err := sess.Exec("DELETE FROM alert_rule_version"); err != nil {
return err
}
orgs, err := ms.GetAllOrgs(ctx)
if err != nil {
return fmt.Errorf("get orgs: %w", err)
}
for _, o := range orgs {
if err := ms.DeleteMigratedFolders(ctx, o.ID); err != nil {
return err
}
}
if _, err := sess.Exec("DELETE FROM alert_configuration"); err != nil {
return err
}
if _, err := sess.Exec("DELETE FROM ngalert_configuration"); err != nil {
return err
}
if _, err := sess.Exec("DELETE FROM alert_instance"); err != nil {
return err
}
if _, err := sess.Exec("DELETE FROM kv_store WHERE namespace = ?", notifier.KVNamespace); err != nil {
return err
}
if _, err := sess.Exec("DELETE FROM kv_store WHERE namespace = ?", KVNamespace); err != nil {
return err
}
files, err := filepath.Glob(filepath.Join(ms.cfg.DataPath, "alerting", "*", "silences"))
if err != nil {
return err
}
for _, f := range files {
if err := os.Remove(f); err != nil {
ms.log.Error("Failed to remove silence file", "file", f, "err", err)
}
}
err = ms.SetMigrated(ctx, false)
if err != nil {
return fmt.Errorf("setting migration status: %w", err)
}
return nil
})
}
// DeleteMigratedFolders deletes all folders created by the previous migration run for the given org. This includes all folder permissions.
// If the folder is not empty of all descendants the operation will fail and return an error.
func (ms *migrationStore) DeleteMigratedFolders(ctx context.Context, orgID int64) error {
summary, err := ms.GetOrgMigrationState(ctx, orgID)
if err != nil {
return err
}
return ms.DeleteFolders(ctx, orgID, summary.CreatedFolders...)
}
// DeleteFolders deletes the folders from the given orgs with the given UIDs. This includes all folder permissions.
// If the folder is not empty of all descendants the operation will fail and return an error.
func (ms *migrationStore) DeleteFolders(ctx context.Context, orgID int64, uids ...string) error {
if len(uids) == 0 {
return nil
}
usr := accesscontrol.BackgroundUser("ngalert_migration_revert", orgID, org.RoleAdmin, revertPermissions)
for _, folderUID := range uids {
cmd := folder.DeleteFolderCommand{
UID: folderUID,
OrgID: orgID,
SignedInUser: usr.(*user.SignedInUser),
}
err := ms.folderService.Delete(ctx, &cmd) // Also handles permissions and other related entities.
if err != nil {
return err
}
}
return nil
}
func (ms *migrationStore) GetDashboard(ctx context.Context, orgID int64, id int64) (*dashboards.Dashboard, error) {
return ms.dashboardService.GetDashboard(ctx, &dashboards.GetDashboardQuery{ID: id, OrgID: orgID})
}
func (ms *migrationStore) GetAllOrgs(ctx context.Context) ([]*org.OrgDTO, error) {
orgQuery := &org.SearchOrgsQuery{}
return ms.orgService.Search(ctx, orgQuery)
}
func (ms *migrationStore) GetDatasource(ctx context.Context, datasourceID int64, user identity.Requester) (*datasources.DataSource, error) {
return ms.dataSourceCache.GetDatasource(ctx, datasourceID, user, false)
}
// GetNotificationChannels returns all channels for this org.
func (ms *migrationStore) GetNotificationChannels(ctx context.Context, orgID int64) ([]*legacymodels.AlertNotification, error) {
return ms.legacyAlertNotificationService.GetAllAlertNotifications(ctx, &legacymodels.GetAllAlertNotificationsQuery{
OrgID: orgID,
})
}
func (ms *migrationStore) GetFolder(ctx context.Context, cmd *folder.GetFolderQuery) (*folder.Folder, error) {
return ms.folderService.Get(ctx, cmd)
}
func (ms *migrationStore) CreateFolder(ctx context.Context, cmd *folder.CreateFolderCommand) (*folder.Folder, error) {
return ms.folderService.Create(ctx, cmd)
}
// based on SQLStore.GetDashboardACLInfoList()
func (ms *migrationStore) GetACL(ctx context.Context, orgID, dashboardID int64) ([]*DashboardACL, error) {
var err error
falseStr := ms.store.GetDialect().BooleanStr(false)
result := make([]*DashboardACL, 0)
rawSQL := `
-- get distinct permissions for the dashboard and its parent folder
SELECT DISTINCT
da.id,
da.user_id,
da.team_id,
da.permission,
da.role
FROM dashboard as d
LEFT JOIN dashboard folder on folder.id = d.folder_id
LEFT JOIN dashboard_acl AS da ON
da.dashboard_id = d.id OR
da.dashboard_id = d.folder_id OR
(
-- include default permissions --
da.org_id = -1 AND (
(folder.id IS NOT NULL AND folder.has_acl = ` + falseStr + `) OR
(folder.id IS NULL AND d.has_acl = ` + falseStr + `)
)
)
WHERE d.org_id = ? AND d.id = ? AND da.id IS NOT NULL
ORDER BY da.id ASC
`
err = ms.store.WithDbSession(ctx, func(sess *db.Session) error {
return sess.SQL(rawSQL, orgID, dashboardID).Find(&result)
})
if err != nil {
return nil, err
}
return result, err
}
// based on SQLStore.UpdateDashboardACL()
// it should be called from inside a transaction
func (ms *migrationStore) SetACL(ctx context.Context, orgID int64, dashboardID int64, items []*DashboardACL) error {
if dashboardID <= 0 {
return fmt.Errorf("folder id must be greater than zero for a folder permission")
}
return ms.store.WithDbSession(ctx, func(sess *db.Session) error {
// userPermissionsMap is a map keeping the highest permission per user
// for handling conficting inherited (folder) and non-inherited (dashboard) user permissions
userPermissionsMap := make(map[int64]*DashboardACL, len(items))
// teamPermissionsMap is a map keeping the highest permission per team
// for handling conficting inherited (folder) and non-inherited (dashboard) team permissions
teamPermissionsMap := make(map[int64]*DashboardACL, len(items))
for _, item := range items {
if item.UserID != 0 {
acl, ok := userPermissionsMap[item.UserID]
if !ok {
userPermissionsMap[item.UserID] = item
} else {
if item.Permission > acl.Permission {
// the higher permission wins
userPermissionsMap[item.UserID] = item
}
}
}
if item.TeamID != 0 {
acl, ok := teamPermissionsMap[item.TeamID]
if !ok {
teamPermissionsMap[item.TeamID] = item
} else {
if item.Permission > acl.Permission {
// the higher permission wins
teamPermissionsMap[item.TeamID] = item
}
}
}
}
type keyType struct {
UserID int64 `xorm:"user_id"`
TeamID int64 `xorm:"team_id"`
Role RoleType
Permission permissionType
}
// seen keeps track of inserted perrmissions to avoid duplicates (due to inheritance)
seen := make(map[keyType]struct{}, len(items))
for _, item := range items {
if item.UserID == 0 && item.TeamID == 0 && (item.Role == nil || !item.Role.IsValid()) {
return dashboards.ErrDashboardACLInfoMissing
}
// ignore duplicate user permissions
if item.UserID != 0 {
acl, ok := userPermissionsMap[item.UserID]
if ok {
if acl.Id != item.Id {
continue
}
}
}
// ignore duplicate team permissions
if item.TeamID != 0 {
acl, ok := teamPermissionsMap[item.TeamID]
if ok {
if acl.Id != item.Id {
continue
}
}
}
key := keyType{UserID: item.UserID, TeamID: item.TeamID, Role: "", Permission: item.Permission}
if item.Role != nil {
key.Role = *item.Role
}
if _, ok := seen[key]; ok {
continue
}
// unset Id so that the new record will get a different one
item.Id = 0
item.OrgID = orgID
item.DashboardID = dashboardID
item.Created = time.Now()
item.Updated = time.Now()
sess.Nullable("user_id", "team_id")
if _, err := sess.Insert(item); err != nil {
return err
}
seen[key] = struct{}{}
}
// Update dashboard HasACL flag
dashboard := dashboards.Dashboard{HasACL: true}
_, err := sess.Cols("has_acl").Where("id=?", dashboardID).Update(&dashboard)
return err
})
}
// GetOrgDashboardAlerts loads all legacy dashboard alerts for the given org mapped by dashboard id.
func (ms *migrationStore) GetOrgDashboardAlerts(ctx context.Context, orgID int64) (map[int64][]*DashAlert, int, error) {
var alerts []legacymodels.Alert
err := ms.store.WithDbSession(ctx, func(sess *db.Session) error {
return sess.SQL("select * from alert WHERE org_id = ? AND dashboard_id IN (SELECT id from dashboard)", orgID).Find(&alerts)
})
if err != nil {
return nil, 0, err
}
mappedAlerts := make(map[int64][]*DashAlert)
for i := range alerts {
alert := alerts[i]
rawSettings, err := json.Marshal(alert.Settings)
if err != nil {
return nil, 0, fmt.Errorf("get settings for alert rule ID:%d, name:'%s', orgID:%d: %w", alert.ID, alert.Name, alert.OrgID, err)
}
var parsedSettings DashAlertSettings
err = json.Unmarshal(rawSettings, &parsedSettings)
if err != nil {
return nil, 0, fmt.Errorf("parse settings for alert rule ID:%d, name:'%s', orgID:%d: %w", alert.ID, alert.Name, alert.OrgID, err)
}
mappedAlerts[alert.DashboardID] = append(mappedAlerts[alert.DashboardID], &DashAlert{
Alert: &alerts[i],
ParsedSettings: &parsedSettings,
})
}
return mappedAlerts, len(alerts), nil
}
func (ms *migrationStore) CaseInsensitive() bool {
return ms.store.GetDialect().SupportEngine()
}

View File

@@ -0,0 +1,91 @@
package store
import (
"encoding/json"
"time"
legacymodels "github.com/grafana/grafana/pkg/services/alerting/models"
)
type RoleType string
const (
RoleNone RoleType = "None"
RoleViewer RoleType = "Viewer"
RoleEditor RoleType = "Editor"
RoleAdmin RoleType = "Admin"
)
func (r RoleType) IsValid() bool {
return r == RoleViewer || r == RoleAdmin || r == RoleEditor || r == RoleNone
}
type permissionType int
type DashboardACL struct {
// nolint:stylecheck
Id int64
OrgID int64 `xorm:"org_id"`
DashboardID int64 `xorm:"dashboard_id"`
UserID int64 `xorm:"user_id"`
TeamID int64 `xorm:"team_id"`
Role *RoleType // pointer to be nullable
Permission permissionType
Created time.Time
Updated time.Time
}
func (p DashboardACL) TableName() string { return "dashboard_acl" }
// uidOrID for both uid and ID, primarily used for mapping legacy channel to migrated receiver.
type UidOrID any
type DashAlert struct {
*legacymodels.Alert
ParsedSettings *DashAlertSettings
}
// dashAlertSettings is a type for the JSON that is in the settings field of
// the alert table.
type DashAlertSettings struct {
NoDataState string `json:"noDataState"`
ExecutionErrorState string `json:"executionErrorState"`
Conditions []DashAlertCondition `json:"conditions"`
AlertRuleTags any `json:"alertRuleTags"`
Notifications []DashAlertNot `json:"notifications"`
}
// dashAlertNot is the object that represents the Notifications array in
// dashAlertSettings
type DashAlertNot struct {
UID string `json:"uid,omitempty"`
ID int64 `json:"id,omitempty"`
}
// dashAlertingConditionJSON is like classic.ClassicConditionJSON except that it
// includes the model property with the query.
type DashAlertCondition struct {
Evaluator ConditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
} `json:"operator"`
Query struct {
Params []string `json:"params"`
DatasourceID int64 `json:"datasourceId"`
Model json.RawMessage
} `json:"query"`
Reducer struct {
// Params []any `json:"params"` (Unused)
Type string `json:"type"`
}
}
type ConditionEvalJSON struct {
Params []float64 `json:"params"`
Type string `json:"type"` // e.g. "gt"
}

View File

@@ -0,0 +1,57 @@
package store
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/infra/localcache"
"github.com/grafana/grafana/pkg/infra/log/logtest"
"github.com/grafana/grafana/pkg/infra/tracing"
legacyalerting "github.com/grafana/grafana/pkg/services/alerting"
"github.com/grafana/grafana/pkg/services/datasources/guardian"
datasourceService "github.com/grafana/grafana/pkg/services/datasources/service"
encryptionservice "github.com/grafana/grafana/pkg/services/encryption/service"
"github.com/grafana/grafana/pkg/services/folder/folderimpl"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
"github.com/grafana/grafana/pkg/services/ngalert/testutil"
"github.com/grafana/grafana/pkg/services/org/orgimpl"
"github.com/grafana/grafana/pkg/services/quota/quotatest"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting"
)
func NewTestMigrationStore(t *testing.T, sqlStore *sqlstore.SQLStore, cfg *setting.Cfg) *migrationStore {
if cfg.UnifiedAlerting.BaseInterval == 0 {
cfg.UnifiedAlerting.BaseInterval = time.Second * 10
}
alertingStore := store.DBstore{
SQLStore: sqlStore,
Cfg: cfg.UnifiedAlerting,
}
bus := bus.ProvideBus(tracing.InitializeTracerForTest())
folderStore := folderimpl.ProvideDashboardFolderStore(sqlStore)
dashboardService, dashboardStore := testutil.SetupDashboardService(t, sqlStore, folderStore, cfg)
folderService := testutil.SetupFolderService(t, cfg, sqlStore, dashboardStore, folderStore, bus)
quotaService := &quotatest.FakeQuotaService{}
orgService, err := orgimpl.ProvideService(sqlStore, cfg, quotaService)
require.NoError(t, err)
cache := localcache.ProvideService()
return &migrationStore{
log: &logtest.Fake{},
cfg: cfg,
store: sqlStore,
kv: fakes.NewFakeKVStore(t),
alertingStore: &alertingStore,
dashboardService: dashboardService,
folderService: folderService,
dataSourceCache: datasourceService.ProvideCacheService(cache, sqlStore, guardian.ProvideGuardian()),
orgService: orgService,
legacyAlertNotificationService: legacyalerting.ProvideService(sqlStore, encryptionservice.SetupTestService(t), nil),
}
}

View File

@@ -0,0 +1,208 @@
// This file contains code that parses templates from old alerting into a sequence
// of tokens. Each token can be either a string literal or a variable.
package migration
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/ngalert/state/template"
)
// Token contains either a string literal or a variable.
type Token struct {
Literal string
Variable string
}
func (t Token) IsLiteral() bool {
return t.Literal != ""
}
func (t Token) IsVariable() bool {
return t.Variable != ""
}
func (t Token) String() string {
if t.IsLiteral() {
return t.Literal
} else if t.IsVariable() {
return t.Variable
} else {
panic("empty token")
}
}
func MigrateTmpl(l log.Logger, oldTmpl string) string {
var newTmpl string
tokens := tokenizeTmpl(l, oldTmpl)
tokens = escapeLiterals(tokens)
if anyVariableToken(tokens) {
tokens = variablesToMapLookups(tokens, "mergedLabels")
newTmpl += fmt.Sprintf("{{- $mergedLabels := %s $values -}}\n", template.MergeLabelValuesFuncName)
}
newTmpl += tokensToTmpl(tokens)
return newTmpl
}
func tokenizeTmpl(logger log.Logger, tmpl string) []Token {
var (
tokens []Token
l int
r int
err error
)
in := []rune(tmpl)
for r < len(in) {
if !startVariable(in[r:]) {
r++
continue
}
token, offset, tokenErr := tokenizeVariable(in[r:])
if tokenErr != nil {
err = errors.Join(err, tokenErr)
r += offset
continue
}
// we've found a variable, so everything from l -> r is the literal before the variable
// ex: "foo ${bar}" -> Literal: "foo ", Variable: "bar"
if r > l {
tokens = append(tokens, Token{Literal: string(in[l:r])})
}
tokens = append(tokens, token)
// seek l and r past the variable
r += offset
l = r
}
// any remaining runes will be a final literal
if r > l {
tokens = append(tokens, Token{Literal: string(in[l:r])})
}
if err != nil {
logger.Warn("Encountered malformed template", "template", tmpl, "err", err)
}
return tokens
}
func tokenizeVariable(in []rune) (Token, int, error) {
var (
pos int
r rune
runes []rune
)
if !startVariable(in) {
panic("tokenizeVariable called with input that doesn't start with delimiter")
}
pos += 2 // seek past opening delimiter
// consume valid runes until we hit a closing brace
// non-space whitespace and the opening delimiter are invalid
for pos < len(in) {
r = in[pos]
if unicode.IsSpace(r) && r != ' ' {
return Token{}, pos, fmt.Errorf("unexpected whitespace")
}
if startVariable(in[pos:]) {
return Token{}, pos, fmt.Errorf("ambiguous delimiter")
}
if r == '}' {
pos++
break
}
runes = append(runes, r)
pos++
}
// variable must end with '}' delimiter
if r != '}' {
return Token{}, pos, fmt.Errorf("expected '}', got '%c'", r)
}
return Token{Variable: string(runes)}, pos, nil
}
func startVariable(in []rune) bool {
return len(in) >= 2 && in[0] == '$' && in[1] == '{'
}
func anyVariableToken(tokens []Token) bool {
for _, token := range tokens {
if token.IsVariable() {
return true
}
}
return false
}
// tokensToTmpl returns the tokens as a Go template
func tokensToTmpl(tokens []Token) string {
buf := bytes.Buffer{}
for _, token := range tokens {
if token.IsVariable() {
buf.WriteString("{{")
buf.WriteString(token.String())
buf.WriteString("}}")
} else {
buf.WriteString(token.String())
}
}
return buf.String()
}
// escapeLiterals escapes any token literals with substrings that would be interpreted as Go template syntax
func escapeLiterals(tokens []Token) []Token {
result := make([]Token, 0, len(tokens))
for _, token := range tokens {
if token.IsLiteral() && shouldEscape(token.Literal) {
token.Literal = fmt.Sprintf("{{`%s`}}", token.Literal)
}
result = append(result, token)
}
return result
}
func shouldEscape(literal string) bool {
return strings.Contains(literal, "{{") || literal[len(literal)-1] == '{'
}
// variablesToMapLookups converts any variables in a slice of tokens to Go template map lookups
func variablesToMapLookups(tokens []Token, mapName string) []Token {
result := make([]Token, 0, len(tokens))
for _, token := range tokens {
if token.IsVariable() {
token.Variable = mapLookupString(token.Variable, mapName)
}
result = append(result, token)
}
return result
}
func mapLookupString(v string, mapName string) string {
for _, r := range v {
if !(unicode.IsDigit(r) || unicode.IsLetter(r) || r == '_') {
return fmt.Sprintf(`index $%s %s`, mapName, strconv.Quote(v)) // quote v to escape any special characters
}
}
return fmt.Sprintf(`$%s.%s`, mapName, v)
}

View File

@@ -0,0 +1,322 @@
package migration
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/grafana/grafana/pkg/infra/log"
)
func TestTokenString(t *testing.T) {
t1 := Token{Literal: "this is a literal"}
assert.Equal(t, "this is a literal", t1.String())
t2 := Token{Variable: "this is a variable"}
assert.Equal(t, "this is a variable", t2.String())
}
func TestTokenizeVariable(t *testing.T) {
tests := []struct {
name string
text string
token Token
offset int
err string
}{{
name: "variable with no trailing text",
text: "${instance}",
token: Token{Variable: "instance"},
offset: 11,
}, {
name: "variable with trailing text",
text: "${instance} is down",
token: Token{Variable: "instance"},
offset: 11,
}, {
name: "varaiable with numbers",
text: "${instance1} is down",
token: Token{Variable: "instance1"},
offset: 12,
}, {
name: "variable with underscores",
text: "${instance_with_underscores} is down",
token: Token{Variable: "instance_with_underscores"},
offset: 28,
}, {
name: "variable with spaces",
text: "${instance with spaces} is down",
token: Token{Variable: "instance with spaces"},
offset: 23,
}, {
name: "variable with non-reserved special character",
text: "${@instance1} is down",
token: Token{Variable: "@instance1"},
offset: 13,
}, {
name: "two variables without spaces",
text: "${variable1}${variable2}",
token: Token{Variable: "variable1"},
offset: 12,
}, {
name: "variable with two closing braces stops at first brace",
text: "${instance}} is down",
token: Token{Variable: "instance"},
offset: 11,
}, {
name: "variable with newline",
text: "${instance\n} is down",
offset: 10,
err: "unexpected whitespace",
}, {
name: "variable with ambiguous delimiter returns error",
text: "${${instance}",
offset: 2,
err: "ambiguous delimiter",
}, {
name: "variable without closing brace returns error",
text: "${instance is down",
offset: 18,
err: "expected '}', got 'n'",
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
token, offset, err := tokenizeVariable([]rune(test.text))
if test.err != "" {
assert.EqualError(t, err, test.err)
}
assert.Equal(t, test.offset, offset)
assert.Equal(t, test.token, token)
})
}
}
func TestTokenizeTmpl(t *testing.T) {
tests := []struct {
name string
tmpl string
tokens []Token
}{{
name: "simple template can be tokenized",
tmpl: "${instance} is down",
tokens: []Token{{Variable: "instance"}, {Literal: " is down"}},
}, {
name: "complex template can be tokenized",
tmpl: "More than ${value} ${status_code} in the last 5 minutes",
tokens: []Token{
{Literal: "More than "},
{Variable: "value"},
{Literal: " "},
{Variable: "status_code"},
{Literal: " in the last 5 minutes"},
},
}, {
name: "variables without spaces between can be tokenized",
tmpl: "${value}${status_code}",
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}},
}, {
name: "variables without spaces between then literal can be tokenized",
tmpl: "${value}${status_code} in the last 5 minutes",
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}, {Literal: " in the last 5 minutes"}},
}, {
name: "variables with reserved characters can be tokenized",
tmpl: "More than ${$value} ${{status_code} in the last 5 minutes",
tokens: []Token{
{Literal: "More than "},
{Variable: "$value"},
{Literal: " "},
{Variable: "{status_code"},
{Literal: " in the last 5 minutes"},
},
}, {
name: "ambiguous delimiters are tokenized as literals",
tmpl: "Instance ${instance and ${instance} is down",
tokens: []Token{{Literal: "Instance ${instance and "}, {Variable: "instance"}, {Literal: " is down"}},
}, {
name: "all '$' runes preceding a variable are included in literal",
tmpl: "Instance $${instance} is down",
tokens: []Token{{Literal: "Instance $"}, {Variable: "instance"}, {Literal: " is down"}},
}, {
name: "sole '$' rune is included in literal",
tmpl: "Instance $instance and ${instance} is down",
tokens: []Token{{Literal: "Instance $instance and "}, {Variable: "instance"}, {Literal: " is down"}},
}, {
name: "extra closing brace is included in literal",
tmpl: "Instance ${instance}} and ${instance} is down",
tokens: []Token{{Literal: "Instance "}, {Variable: "instance"}, {Literal: "} and "}, {Variable: "instance"}, {Literal: " is down"}},
}, {
name: "variable with newline tokenized as literal",
tmpl: "${value}${status_code\n}${value} in the last 5 minutes",
tokens: []Token{{Variable: "value"}, {Literal: "${status_code\n}"}, {Variable: "value"}, {Literal: " in the last 5 minutes"}},
}, {
name: "extra closing brace between variables is included in literal",
tmpl: "${value}${status_code}}${value} in the last 5 minutes",
tokens: []Token{{Variable: "value"}, {Variable: "status_code"}, {Literal: "}"}, {Variable: "value"}, {Literal: " in the last 5 minutes"}},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
tokens := tokenizeTmpl(log.NewNopLogger(), test.tmpl)
assert.Equal(t, test.tokens, tokens)
})
}
}
func TestTokensToTmpl(t *testing.T) {
tokens := []Token{{Variable: "instance"}, {Literal: " is down"}}
assert.Equal(t, "{{instance}} is down", tokensToTmpl(tokens))
}
func TestTokensToTmplNewlines(t *testing.T) {
tokens := []Token{{Variable: "instance"}, {Literal: " is down\n"}, {Variable: "job"}, {Literal: " is down"}}
assert.Equal(t, "{{instance}} is down\n{{job}} is down", tokensToTmpl(tokens))
}
func TestMapLookupString(t *testing.T) {
cases := []struct {
name string
input string
expected string
}{
{
name: "when there are no spaces",
input: "instance",
expected: "$labels.instance",
},
{
name: "when there are spaces",
input: "instance with spaces",
expected: `index $labels "instance with spaces"`,
},
{
name: "when there are quotes",
input: `instance with "quotes"`,
expected: `index $labels "instance with \"quotes\""`,
},
{
name: "when there are backslashes",
input: `instance with \backslashes\`,
expected: `index $labels "instance with \\backslashes\\"`,
},
{
name: "when there are legacy delimiter characters",
input: `instance{ with $delim} characters`,
expected: `index $labels "instance{ with $delim} characters"`,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
assert.Equal(t, tc.expected, mapLookupString(tc.input, "labels"))
})
}
}
func TestVariablesToMapLookups(t *testing.T) {
tokens := []Token{{Variable: "instance"}, {Literal: " is down"}}
expected := []Token{{Variable: "$labels.instance"}, {Literal: " is down"}}
assert.Equal(t, expected, variablesToMapLookups(tokens, "labels"))
}
func TestVariablesToMapLookupsSpace(t *testing.T) {
tokens := []Token{{Variable: "instance with spaces"}, {Literal: " is down"}}
expected := []Token{{Variable: "index $labels \"instance with spaces\""}, {Literal: " is down"}}
assert.Equal(t, expected, variablesToMapLookups(tokens, "labels"))
}
func TestEscapeLiterals(t *testing.T) {
cases := []struct {
name string
input []Token
expected []Token
}{
{
name: "when there are no literals",
input: []Token{{Variable: "instance"}},
expected: []Token{{Variable: "instance"}},
},
{
name: "literal with double braces: {{",
input: []Token{{Literal: "instance {{"}},
expected: []Token{{Literal: "{{`instance {{`}}"}},
},
{
name: "literal that ends with closing brace: {",
input: []Token{{Literal: "instance {"}},
expected: []Token{{Literal: "{{`instance {`}}"}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
assert.Equal(t, tc.expected, escapeLiterals(tc.input))
})
}
}
func TestMigrateTmpl(t *testing.T) {
cases := []struct {
name string
input string
expected string
vars bool
}{
{
name: "template does not contain variables",
input: "instance is down",
expected: "instance is down",
vars: false,
},
{
name: "template contains variable",
input: "${instance} is down",
expected: withDeduplicateMap("{{$mergedLabels.instance}} is down"),
vars: true,
},
{
name: "template contains double braces",
input: "{{CRITICAL}} instance is down",
expected: "{{`{{CRITICAL}} instance is down`}}",
vars: false,
},
{
name: "template contains opening brace before variable",
input: `${${instance} is down`,
expected: withDeduplicateMap("{{`${`}}{{$mergedLabels.instance}} is down"),
vars: true,
},
{
name: "template contains newline",
input: "CRITICAL\n${instance} is down",
expected: withDeduplicateMap("CRITICAL\n{{$mergedLabels.instance}} is down"),
vars: true,
},
{
name: "partial migration, no variables",
input: "${instance is down",
expected: "${instance is down",
},
{
name: "partial migration, with variables",
input: "${instance} is down ${${nestedVar}}",
expected: withDeduplicateMap("{{$mergedLabels.instance}}{{` is down ${`}}{{$mergedLabels.nestedVar}}}"),
vars: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
tmpl := MigrateTmpl(log.NewNopLogger(), tc.input)
assert.Equal(t, tc.expected, tmpl)
})
}
}
func withDeduplicateMap(input string) string {
// hardcode function name to fail tests if it changes
funcName := "mergeLabelValues"
return fmt.Sprintf("{{- $mergedLabels := %s $values -}}\n", funcName) + input
}

View File

@@ -0,0 +1,29 @@
package migration
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/serverlock"
"github.com/grafana/grafana/pkg/infra/tracing"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
fake_secrets "github.com/grafana/grafana/pkg/services/secrets/fakes"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting"
)
func NewTestMigrationService(t *testing.T, sqlStore *sqlstore.SQLStore, cfg *setting.Cfg) *MigrationService {
if cfg == nil {
cfg = setting.NewCfg()
}
ms, err := ProvideService(
serverlock.ProvideService(sqlStore, tracing.InitializeTracerForTest()),
cfg,
sqlStore,
migrationStore.NewTestMigrationStore(t, sqlStore, cfg),
fake_secrets.NewFakeSecretsService(),
)
require.NoError(t, err)
return ms
}

View File

@@ -0,0 +1,115 @@
package migration
import (
"context"
"fmt"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
migrationStore "github.com/grafana/grafana/pkg/services/ngalert/migration/store"
"github.com/grafana/grafana/pkg/services/ngalert/models"
)
func (om *OrgMigration) migrateAlerts(ctx context.Context, alerts []*migrationStore.DashAlert, dashboardUID string, folderUID string) ([]*AlertPair, error) {
log := om.log.New(
"dashboardUID", dashboardUID,
"newFolderUID", folderUID,
)
pairs := make([]*AlertPair, 0, len(alerts))
for _, da := range alerts {
al := log.New("ruleID", da.ID, "ruleName", da.Name)
alertRule, err := om.migrateAlert(ctx, al, da, dashboardUID, folderUID)
if err != nil {
return nil, fmt.Errorf("migrate alert: %w", err)
}
pairs = append(pairs, &AlertPair{AlertRule: alertRule, DashAlert: da})
}
return pairs, nil
}
func (om *OrgMigration) migrateDashboard(ctx context.Context, dashID int64, alerts []*migrationStore.DashAlert) ([]*AlertPair, error) {
dash, newFolder, err := om.getOrCreateMigratedFolder(ctx, om.log, dashID)
if err != nil {
return nil, fmt.Errorf("get or create migrated folder: %w", err)
}
pairs, err := om.migrateAlerts(ctx, alerts, dash.UID, newFolder.UID)
if err != nil {
return nil, fmt.Errorf("migrate and save alerts: %w", err)
}
return pairs, nil
}
func (om *OrgMigration) migrateOrgAlerts(ctx context.Context) ([]*AlertPair, error) {
mappedAlerts, cnt, err := om.migrationStore.GetOrgDashboardAlerts(ctx, om.orgID)
if err != nil {
return nil, fmt.Errorf("load alerts: %w", err)
}
om.log.Info("Alerts found to migrate", "alerts", cnt)
pairs := make([]*AlertPair, 0, cnt)
for dashID, alerts := range mappedAlerts {
dashPairs, err := om.migrateDashboard(ctx, dashID, alerts)
if err != nil {
return nil, fmt.Errorf("migrate and save dashboard '%d': %w", dashID, err)
}
pairs = append(pairs, dashPairs...)
}
return pairs, nil
}
func (om *OrgMigration) migrateOrgChannels(ctx context.Context, pairs []*AlertPair) (*apimodels.PostableUserConfig, error) {
channels, err := om.migrationStore.GetNotificationChannels(ctx, om.orgID)
if err != nil {
return nil, fmt.Errorf("load notification channels: %w", err)
}
amConfig, err := om.migrateChannels(channels, pairs)
if err != nil {
return nil, err
}
return amConfig, nil
}
func (om *OrgMigration) migrateOrg(ctx context.Context) error {
om.log.Info("Migrating alerts for organisation")
pairs, err := om.migrateOrgAlerts(ctx)
if err != nil {
return fmt.Errorf("migrate alerts: %w", err)
}
// This must happen before we insert the rules into the database because it modifies the alert labels. This will
// be changed in the future when we improve how notification policies are created.
amConfig, err := om.migrateOrgChannels(ctx, pairs)
if err != nil {
return fmt.Errorf("migrate channels: %w", err)
}
if err := om.writeSilencesFile(); err != nil {
return fmt.Errorf("write silence file for org %d: %w", om.orgID, err)
}
if len(pairs) > 0 {
om.log.Debug("Inserting migrated alert rules", "count", len(pairs))
rules := make([]models.AlertRule, 0, len(pairs))
for _, p := range pairs {
rules = append(rules, *p.AlertRule)
}
err := om.migrationStore.InsertAlertRules(ctx, rules...)
if err != nil {
return fmt.Errorf("insert alert rules: %w", err)
}
}
if amConfig != nil {
if err := om.migrationStore.SaveAlertmanagerConfiguration(ctx, om.orgID, amConfig); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,160 @@
package migration
import (
"encoding/json"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/dashboards"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
"github.com/grafana/grafana/pkg/util"
)
func Test_validateAlertmanagerConfig(t *testing.T) {
tc := []struct {
name string
receivers []*apimodels.PostableGrafanaReceiver
err error
}{
{
name: "when a slack receiver does not have a valid URL - it should error",
receivers: []*apimodels.PostableGrafanaReceiver{
{
UID: "test-uid",
Name: "SlackWithBadURL",
Type: "slack",
Settings: mustRawMessage(map[string]any{}),
SecureSettings: map[string]string{"url": invalidUri},
},
},
err: fmt.Errorf("failed to validate integration \"SlackWithBadURL\" (UID test-uid) of type \"slack\": invalid URL %q", invalidUri),
},
{
name: "when a slack receiver has an invalid recipient - it should not error",
receivers: []*apimodels.PostableGrafanaReceiver{
{
UID: util.GenerateShortUID(),
Name: "SlackWithBadRecipient",
Type: "slack",
Settings: mustRawMessage(map[string]any{"recipient": "this passes"}),
SecureSettings: map[string]string{"url": "http://webhook.slack.com/myuser"},
},
},
},
{
name: "when the configuration is valid - it should not error",
receivers: []*apimodels.PostableGrafanaReceiver{
{
UID: util.GenerateShortUID(),
Name: "SlackWithBadURL",
Type: "slack",
Settings: mustRawMessage(map[string]interface{}{"recipient": "#a-good-channel"}),
SecureSettings: map[string]string{"url": "http://webhook.slack.com/myuser"},
},
},
},
}
sqlStore := db.InitTestDB(t)
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
service := NewTestMigrationService(t, sqlStore, nil)
mg := service.newOrgMigration(1)
config := configFromReceivers(t, tt.receivers)
require.NoError(t, encryptSecureSettings(config, mg)) // make sure we encrypt the settings
err := mg.validateAlertmanagerConfig(config)
if tt.err != nil {
require.Error(t, err)
require.EqualError(t, err, tt.err.Error())
} else {
require.NoError(t, err)
}
})
}
}
func configFromReceivers(t *testing.T, receivers []*apimodels.PostableGrafanaReceiver) *apimodels.PostableUserConfig {
t.Helper()
return &apimodels.PostableUserConfig{
AlertmanagerConfig: apimodels.PostableApiAlertingConfig{
Receivers: []*apimodels.PostableApiReceiver{
{PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{GrafanaManagedReceivers: receivers}},
},
},
}
}
func encryptSecureSettings(c *apimodels.PostableUserConfig, m *OrgMigration) error {
for _, r := range c.AlertmanagerConfig.Receivers {
for _, gr := range r.GrafanaManagedReceivers {
err := m.encryptSecureSettings(gr.SecureSettings)
if err != nil {
return err
}
}
}
return nil
}
const invalidUri = "<22>6<EFBFBD>M<EFBFBD><4D>)uk譹1(<28>h`$<24>o<EFBFBD>N>mĕ<6D><C495><EFBFBD><EFBFBD>cS2<53>dh![ę<> <09><><EFBFBD>`csB<73>!<21><>OSxP<78>{<7B>"
func Test_getAlertFolderNameFromDashboard(t *testing.T) {
t.Run("should include full title", func(t *testing.T) {
dash := &dashboards.Dashboard{
UID: util.GenerateShortUID(),
Title: "TEST",
}
folder := getAlertFolderNameFromDashboard(dash)
require.Contains(t, folder, dash.Title)
require.Contains(t, folder, dash.UID)
})
t.Run("should cut title to the length", func(t *testing.T) {
title := ""
for {
title += util.GenerateShortUID()
if len(title) > MaxFolderName {
title = title[:MaxFolderName]
break
}
}
dash := &dashboards.Dashboard{
UID: util.GenerateShortUID(),
Title: title,
}
folder := getAlertFolderNameFromDashboard(dash)
require.Len(t, folder, MaxFolderName)
require.Contains(t, folder, dash.UID)
})
}
func Test_shortUIDCaseInsensitiveConflicts(t *testing.T) {
s := Deduplicator{
set: make(map[string]struct{}),
caseInsensitive: true,
}
// 10000 uids seems to be enough to cause a collision in almost every run if using util.GenerateShortUID directly.
for i := 0; i < 10000; i++ {
s.add(util.GenerateShortUID())
}
// check if any are case-insensitive duplicates.
deduped := make(map[string]struct{})
for k := range s.set {
deduped[strings.ToLower(k)] = struct{}{}
}
require.Equal(t, len(s.set), len(deduped))
}
func mustRawMessage[T any](s T) apimodels.RawMessage {
js, _ := json.Marshal(s)
return js
}

View File

@@ -319,6 +319,7 @@ func subscribeToFolderChanges(logger log.Logger, bus bus.Bus, dbStore api.RuleSt
// Run starts the scheduler and Alertmanager.
func (ng *AlertNG) Run(ctx context.Context) error {
ng.Log.Debug("Starting")
ng.stateManager.Warm(ctx, ng.store)
children, subCtx := errgroup.WithContext(ctx)

View File

@@ -13,6 +13,7 @@ import (
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/ngalert/metrics"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
"github.com/grafana/grafana/pkg/services/secrets/database"
secretsManager "github.com/grafana/grafana/pkg/services/secrets/manager"
"github.com/grafana/grafana/pkg/setting"
@@ -37,7 +38,7 @@ func setupAMTest(t *testing.T) *alertmanager {
DashboardService: dashboards.NewFakeDashboardService(t),
}
kvStore := NewFakeKVStore(t)
kvStore := fakes.NewFakeKVStore(t)
secretsService := secretsManager.SetupTestService(t, database.ProvideSecretsStore(sqlStore))
decryptFn := secretsService.GetDecryptedValue
am, err := newAlertmanager(context.Background(), 1, cfg, s, kvStore, &NilPeer{}, decryptFn, nil, m)

View File

@@ -1,6 +1,7 @@
package channels_config
import (
"fmt"
"os"
alertingOpsgenie "github.com/grafana/alerting/receivers/opsgenie"
@@ -1340,3 +1341,20 @@ func GetAvailableNotifiers() []*NotifierPlugin {
},
}
}
// GetSecretKeysForContactPointType returns settings keys of contact point of the given type that are expected to be secrets. Returns error is contact point type is not known.
func GetSecretKeysForContactPointType(contactPointType string) ([]string, error) {
notifiers := GetAvailableNotifiers()
for _, n := range notifiers {
if n.Type == contactPointType {
var secureFields []string
for _, field := range n.Options {
if field.Secure {
secureFields = append(secureFields, field.PropertyName)
}
}
return secureFields, nil
}
}
return nil, fmt.Errorf("no secrets configured for type '%s'", contactPointType)
}

View File

@@ -7,10 +7,12 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
)
func TestFileStore_FilepathFor_DirectoryNotExist(t *testing.T) {
store := NewFakeKVStore(t)
store := fakes.NewFakeKVStore(t)
workingDir := filepath.Join(t.TempDir(), "notexistdir")
fs := NewFileStore(1, store, workingDir)
filekey := "silences"
@@ -31,7 +33,7 @@ func TestFileStore_FilepathFor_DirectoryNotExist(t *testing.T) {
}
}
func TestFileStore_FilepathFor(t *testing.T) {
store := NewFakeKVStore(t)
store := fakes.NewFakeKVStore(t)
workingDir := t.TempDir()
fs := NewFileStore(1, store, workingDir)
filekey := "silences"
@@ -73,7 +75,7 @@ func TestFileStore_FilepathFor(t *testing.T) {
}
func TestFileStore_Persist(t *testing.T) {
store := NewFakeKVStore(t)
store := fakes.NewFakeKVStore(t)
state := &fakeState{data: "something to marshal"}
workingDir := t.TempDir()
fs := NewFileStore(1, store, workingDir)
@@ -82,9 +84,9 @@ func TestFileStore_Persist(t *testing.T) {
size, err := fs.Persist(context.Background(), filekey, state)
require.NoError(t, err)
require.Equal(t, int64(20), size)
store.mtx.Lock()
require.Len(t, store.store, 1)
store.mtx.Unlock()
store.Mtx.Lock()
require.Len(t, store.Store, 1)
store.Mtx.Unlock()
v, ok, err := store.Get(context.Background(), 1, KVNamespace, filekey)
require.NoError(t, err)
require.True(t, ok)

View File

@@ -19,6 +19,7 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/provisioning"
"github.com/grafana/grafana/pkg/services/ngalert/store"
ngfakes "github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
"github.com/grafana/grafana/pkg/services/secrets/fakes"
secretsManager "github.com/grafana/grafana/pkg/services/secrets/manager"
"github.com/grafana/grafana/pkg/setting"
@@ -31,7 +32,7 @@ func TestMultiOrgAlertmanager_SyncAlertmanagersForOrgs(t *testing.T) {
}
tmpDir := t.TempDir()
kvStore := NewFakeKVStore(t)
kvStore := ngfakes.NewFakeKVStore(t)
provStore := provisioning.NewFakeProvisioningStore()
secretsService := secretsManager.SetupTestService(t, fakes.NewFakeSecretsStore())
decryptFn := secretsService.GetDecryptedValue
@@ -165,7 +166,7 @@ func TestMultiOrgAlertmanager_SyncAlertmanagersForOrgsWithFailures(t *testing.T)
}
tmpDir := t.TempDir()
kvStore := NewFakeKVStore(t)
kvStore := ngfakes.NewFakeKVStore(t)
provStore := provisioning.NewFakeProvisioningStore()
secretsService := secretsManager.SetupTestService(t, fakes.NewFakeSecretsStore())
decryptFn := secretsService.GetDecryptedValue
@@ -259,7 +260,7 @@ func TestMultiOrgAlertmanager_AlertmanagerFor(t *testing.T) {
DataPath: tmpDir,
UnifiedAlerting: setting.UnifiedAlertingSettings{AlertmanagerConfigPollInterval: 3 * time.Minute, DefaultConfiguration: setting.GetAlertmanagerDefaultConfiguration()}, // do not poll in tests.
}
kvStore := NewFakeKVStore(t)
kvStore := ngfakes.NewFakeKVStore(t)
provStore := provisioning.NewFakeProvisioningStore()
secretsService := secretsManager.SetupTestService(t, fakes.NewFakeSecretsStore())
decryptFn := secretsService.GetDecryptedValue
@@ -310,7 +311,7 @@ func TestMultiOrgAlertmanager_ActivateHistoricalConfiguration(t *testing.T) {
DataPath: tmpDir,
UnifiedAlerting: setting.UnifiedAlertingSettings{AlertmanagerConfigPollInterval: 3 * time.Minute, DefaultConfiguration: defaultConfig}, // do not poll in tests.
}
kvStore := NewFakeKVStore(t)
kvStore := ngfakes.NewFakeKVStore(t)
provStore := provisioning.NewFakeProvisioningStore()
secretsService := secretsManager.SetupTestService(t, fakes.NewFakeSecretsStore())
decryptFn := secretsService.GetDecryptedValue

View File

@@ -5,12 +5,9 @@ import (
"crypto/md5"
"errors"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/grafana/grafana/pkg/infra/kvstore"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
@@ -195,98 +192,6 @@ func (f *FakeOrgStore) GetOrgs(_ context.Context) ([]int64, error) {
return f.orgs, nil
}
type FakeKVStore struct {
mtx sync.Mutex
store map[int64]map[string]map[string]string
}
func NewFakeKVStore(t *testing.T) *FakeKVStore {
t.Helper()
return &FakeKVStore{
store: map[int64]map[string]map[string]string{},
}
}
func (fkv *FakeKVStore) Get(_ context.Context, orgId int64, namespace string, key string) (string, bool, error) {
fkv.mtx.Lock()
defer fkv.mtx.Unlock()
org, ok := fkv.store[orgId]
if !ok {
return "", false, nil
}
k, ok := org[namespace]
if !ok {
return "", false, nil
}
v, ok := k[key]
if !ok {
return "", false, nil
}
return v, true, nil
}
func (fkv *FakeKVStore) Set(_ context.Context, orgId int64, namespace string, key string, value string) error {
fkv.mtx.Lock()
defer fkv.mtx.Unlock()
org, ok := fkv.store[orgId]
if !ok {
fkv.store[orgId] = map[string]map[string]string{}
}
_, ok = org[namespace]
if !ok {
fkv.store[orgId][namespace] = map[string]string{}
}
fkv.store[orgId][namespace][key] = value
return nil
}
func (fkv *FakeKVStore) Del(_ context.Context, orgId int64, namespace string, key string) error {
fkv.mtx.Lock()
defer fkv.mtx.Unlock()
org, ok := fkv.store[orgId]
if !ok {
return nil
}
_, ok = org[namespace]
if !ok {
return nil
}
delete(fkv.store[orgId][namespace], key)
return nil
}
func (fkv *FakeKVStore) Keys(ctx context.Context, orgID int64, namespace string, keyPrefix string) ([]kvstore.Key, error) {
fkv.mtx.Lock()
defer fkv.mtx.Unlock()
var keys []kvstore.Key
for orgIDFromStore, namespaceMap := range fkv.store {
if orgID != kvstore.AllOrganizations && orgID != orgIDFromStore {
continue
}
if keyMap, exists := namespaceMap[namespace]; exists {
for k := range keyMap {
if strings.HasPrefix(k, keyPrefix) {
keys = append(keys, kvstore.Key{
OrgId: orgIDFromStore,
Namespace: namespace,
Key: keyPrefix,
})
}
}
}
}
return keys, nil
}
func (fkv *FakeKVStore) GetAll(ctx context.Context, orgId int64, namespace string) (map[int64]map[string]string, error) {
return nil, nil
}
type fakeState struct {
data string
}

View File

@@ -242,7 +242,7 @@ func (ecp *ContactPointService) UpdateContactPoint(ctx context.Context, orgID in
if err != nil {
return err
}
secretKeys, err := GetSecretKeysForContactPointType(contactPoint.Type)
secretKeys, err := channels_config.GetSecretKeysForContactPointType(contactPoint.Type)
if err != nil {
return fmt.Errorf("%w: %s", ErrValidation, err.Error())
}
@@ -531,27 +531,10 @@ func ValidateContactPoint(ctx context.Context, e apimodels.EmbeddedContactPoint,
return nil
}
// GetSecretKeysForContactPointType returns settings keys of contact point of the given type that are expected to be secrets. Returns error is contact point type is not known.
func GetSecretKeysForContactPointType(contactPointType string) ([]string, error) {
notifiers := channels_config.GetAvailableNotifiers()
for _, n := range notifiers {
if n.Type == contactPointType {
var secureFields []string
for _, field := range n.Options {
if field.Secure {
secureFields = append(secureFields, field.PropertyName)
}
}
return secureFields, nil
}
}
return nil, fmt.Errorf("no secrets configured for type '%s'", contactPointType)
}
// RemoveSecretsForContactPoint removes all secrets from the contact point's settings and returns them as a map. Returns error if contact point type is not known.
func RemoveSecretsForContactPoint(e *apimodels.EmbeddedContactPoint) (map[string]string, error) {
s := map[string]string{}
secretKeys, err := GetSecretKeysForContactPointType(e.Type)
secretKeys, err := channels_config.GetSecretKeysForContactPointType(e.Type)
if err != nil {
return nil, err
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/notifier"
"github.com/grafana/grafana/pkg/services/ngalert/provisioning"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/ngalert/tests/fakes"
fake_secrets "github.com/grafana/grafana/pkg/services/secrets/fakes"
secretsManager "github.com/grafana/grafana/pkg/services/secrets/manager"
"github.com/grafana/grafana/pkg/setting"
@@ -405,7 +406,7 @@ func createMultiOrgAlertmanager(t *testing.T, orgs []int64) *notifier.MultiOrgAl
}
cfgStore := notifier.NewFakeConfigStore(t, make(map[int64]*models.AlertConfiguration))
kvStore := notifier.NewFakeKVStore(t)
kvStore := fakes.NewFakeKVStore(t)
registry := prometheus.NewPedanticRegistry()
m := metrics.NewNGAlert(registry)
secretsService := secretsManager.SetupTestService(t, fake_secrets.NewFakeSecretsStore())

View File

@@ -0,0 +1,102 @@
package fakes
import (
"context"
"strings"
"sync"
"testing"
"github.com/grafana/grafana/pkg/infra/kvstore"
)
type FakeKVStore struct {
Mtx sync.Mutex
Store map[int64]map[string]map[string]string
}
func NewFakeKVStore(t *testing.T) *FakeKVStore {
t.Helper()
return &FakeKVStore{
Store: map[int64]map[string]map[string]string{},
}
}
func (fkv *FakeKVStore) Get(_ context.Context, orgId int64, namespace string, key string) (string, bool, error) {
fkv.Mtx.Lock()
defer fkv.Mtx.Unlock()
org, ok := fkv.Store[orgId]
if !ok {
return "", false, nil
}
k, ok := org[namespace]
if !ok {
return "", false, nil
}
v, ok := k[key]
if !ok {
return "", false, nil
}
return v, true, nil
}
func (fkv *FakeKVStore) Set(_ context.Context, orgId int64, namespace string, key string, value string) error {
fkv.Mtx.Lock()
defer fkv.Mtx.Unlock()
org, ok := fkv.Store[orgId]
if !ok {
fkv.Store[orgId] = map[string]map[string]string{}
}
_, ok = org[namespace]
if !ok {
fkv.Store[orgId][namespace] = map[string]string{}
}
fkv.Store[orgId][namespace][key] = value
return nil
}
func (fkv *FakeKVStore) Del(_ context.Context, orgId int64, namespace string, key string) error {
fkv.Mtx.Lock()
defer fkv.Mtx.Unlock()
org, ok := fkv.Store[orgId]
if !ok {
return nil
}
_, ok = org[namespace]
if !ok {
return nil
}
delete(fkv.Store[orgId][namespace], key)
return nil
}
func (fkv *FakeKVStore) Keys(ctx context.Context, orgID int64, namespace string, keyPrefix string) ([]kvstore.Key, error) {
fkv.Mtx.Lock()
defer fkv.Mtx.Unlock()
var keys []kvstore.Key
for orgIDFromStore, namespaceMap := range fkv.Store {
if orgID != kvstore.AllOrganizations && orgID != orgIDFromStore {
continue
}
if keyMap, exists := namespaceMap[namespace]; exists {
for k := range keyMap {
if strings.HasPrefix(k, keyPrefix) {
keys = append(keys, kvstore.Key{
OrgId: orgIDFromStore,
Namespace: namespace,
Key: keyPrefix,
})
}
}
}
}
return keys, nil
}
func (fkv *FakeKVStore) GetAll(ctx context.Context, orgId int64, namespace string) (map[int64]map[string]string, error) {
return nil, nil
}