mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Alerting: modify DB table, accessors and migration to restrict org access (#37414)
* Alerting: modify table and accessors to limit org access appropriately * Update migration to create multiple Alertmanager configs * Apply suggestions from code review Co-authored-by: gotjosh <josue@grafana.com> * replace mg.ClearMigrationEntry() mg.ClearMigrationEntry() would create a new session. This commit introduces a new migration for clearing an entry from migration log for replacing mg.ClearMigrationEntry() so that all dashboard alert migration operations will run inside the same transaction. It adds also `SkipMigrationLog()` in Migrator interface for skipping adding an entry in the migration_log. Co-authored-by: gotjosh <josue@grafana.com>
This commit is contained in:
committed by
GitHub
parent
4a9fdb8b76
commit
04d5dcb7c8
@@ -22,8 +22,10 @@ var timeNow = time.Now
|
||||
|
||||
type Alertmanager interface {
|
||||
// Configuration
|
||||
SaveAndApplyConfig(config *apimodels.PostableUserConfig) error
|
||||
SaveAndApplyDefaultConfig() error
|
||||
// temporary add orgID parameter; this will move to the Alertmanager wrapper when it will be available
|
||||
SaveAndApplyConfig(orgID int64, config *apimodels.PostableUserConfig) error
|
||||
// temporary add orgID parameter; this will move to the Alertmanager wrapper when it will be available
|
||||
SaveAndApplyDefaultConfig(orgID int64) error
|
||||
GetStatus() apimodels.GettableStatus
|
||||
|
||||
// Silences
|
||||
|
@@ -48,7 +48,7 @@ func (srv AlertmanagerSrv) RouteDeleteAlertingConfig(c *models.ReqContext) respo
|
||||
if !c.HasUserRole(models.ROLE_EDITOR) {
|
||||
return ErrResp(http.StatusForbidden, errors.New("permission denied"), "")
|
||||
}
|
||||
if err := srv.am.SaveAndApplyDefaultConfig(); err != nil {
|
||||
if err := srv.am.SaveAndApplyDefaultConfig(c.OrgId); err != nil {
|
||||
srv.log.Error("unable to save and apply default alertmanager configuration", "err", err)
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to save and apply default Alertmanager configuration")
|
||||
}
|
||||
@@ -74,7 +74,8 @@ func (srv AlertmanagerSrv) RouteGetAlertingConfig(c *models.ReqContext) response
|
||||
if !c.HasUserRole(models.ROLE_EDITOR) {
|
||||
return ErrResp(http.StatusForbidden, errors.New("permission denied"), "")
|
||||
}
|
||||
query := ngmodels.GetLatestAlertmanagerConfigurationQuery{}
|
||||
|
||||
query := ngmodels.GetLatestAlertmanagerConfigurationQuery{OrgID: c.OrgId}
|
||||
if err := srv.store.GetLatestAlertmanagerConfiguration(&query); err != nil {
|
||||
if errors.Is(err, store.ErrNoAlertmanagerConfiguration) {
|
||||
return ErrResp(http.StatusNotFound, err, "")
|
||||
@@ -201,7 +202,7 @@ func (srv AlertmanagerSrv) RoutePostAlertingConfig(c *models.ReqContext, body ap
|
||||
}
|
||||
|
||||
// Get the last known working configuration
|
||||
query := ngmodels.GetLatestAlertmanagerConfigurationQuery{}
|
||||
query := ngmodels.GetLatestAlertmanagerConfigurationQuery{OrgID: c.OrgId}
|
||||
if err := srv.store.GetLatestAlertmanagerConfiguration(&query); err != nil {
|
||||
// If we don't have a configuration there's nothing for us to know and we should just continue saving the new one
|
||||
if !errors.Is(err, store.ErrNoAlertmanagerConfiguration) {
|
||||
@@ -255,7 +256,7 @@ func (srv AlertmanagerSrv) RoutePostAlertingConfig(c *models.ReqContext, body ap
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to post process Alertmanager configuration")
|
||||
}
|
||||
|
||||
if err := srv.am.SaveAndApplyConfig(&body); err != nil {
|
||||
if err := srv.am.SaveAndApplyConfig(c.OrgId, &body); err != nil {
|
||||
srv.log.Error("unable to save and apply alertmanager configuration", "err", err)
|
||||
return ErrResp(http.StatusBadRequest, err, "failed to save and apply Alertmanager configuration")
|
||||
}
|
||||
|
@@ -10,10 +10,12 @@ type AlertConfiguration struct {
|
||||
ConfigurationVersion string
|
||||
CreatedAt int64 `xorm:"created"`
|
||||
Default bool
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
}
|
||||
|
||||
// GetLatestAlertmanagerConfigurationQuery is the query to get the latest alertmanager configuration.
|
||||
type GetLatestAlertmanagerConfigurationQuery struct {
|
||||
OrgID int64
|
||||
Result *AlertConfiguration
|
||||
}
|
||||
|
||||
@@ -22,8 +24,5 @@ type SaveAlertmanagerConfigurationCmd struct {
|
||||
AlertmanagerConfiguration string
|
||||
ConfigurationVersion string
|
||||
Default bool
|
||||
}
|
||||
|
||||
type DeleteAlertmanagerConfigurationCmd struct {
|
||||
ID int64
|
||||
OrgID int64
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -72,6 +73,8 @@ const (
|
||||
}
|
||||
}
|
||||
`
|
||||
//TODO: temporary until fix org isolation
|
||||
mainOrgID = 1
|
||||
)
|
||||
|
||||
type Alertmanager struct {
|
||||
@@ -168,7 +171,7 @@ func (am *Alertmanager) Ready() bool {
|
||||
|
||||
func (am *Alertmanager) Run(ctx context.Context) error {
|
||||
// Make sure dispatcher starts. We can tolerate future reload failures.
|
||||
if err := am.SyncAndApplyConfigFromDatabase(); err != nil {
|
||||
if err := am.SyncAndApplyConfigFromDatabase(mainOrgID); err != nil {
|
||||
am.logger.Error("unable to sync configuration", "err", err)
|
||||
}
|
||||
|
||||
@@ -177,7 +180,7 @@ func (am *Alertmanager) Run(ctx context.Context) error {
|
||||
case <-ctx.Done():
|
||||
return am.StopAndWait()
|
||||
case <-time.After(pollInterval):
|
||||
if err := am.SyncAndApplyConfigFromDatabase(); err != nil {
|
||||
if err := am.SyncAndApplyConfigFromDatabase(mainOrgID); err != nil {
|
||||
am.logger.Error("unable to sync configuration", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -203,7 +206,7 @@ func (am *Alertmanager) StopAndWait() error {
|
||||
|
||||
// SaveAndApplyDefaultConfig saves the default configuration the database and applies the configuration to the Alertmanager.
|
||||
// It rollbacks the save if we fail to apply the configuration.
|
||||
func (am *Alertmanager) SaveAndApplyDefaultConfig() error {
|
||||
func (am *Alertmanager) SaveAndApplyDefaultConfig(orgID int64) error {
|
||||
am.reloadConfigMtx.Lock()
|
||||
defer am.reloadConfigMtx.Unlock()
|
||||
|
||||
@@ -211,6 +214,7 @@ func (am *Alertmanager) SaveAndApplyDefaultConfig() error {
|
||||
AlertmanagerConfiguration: alertmanagerDefaultConfiguration,
|
||||
Default: true,
|
||||
ConfigurationVersion: fmt.Sprintf("v%d", ngmodels.AlertConfigurationVersion),
|
||||
OrgID: orgID,
|
||||
}
|
||||
|
||||
cfg, err := Load([]byte(alertmanagerDefaultConfiguration))
|
||||
@@ -234,7 +238,7 @@ func (am *Alertmanager) SaveAndApplyDefaultConfig() error {
|
||||
|
||||
// SaveAndApplyConfig saves the configuration the database and applies the configuration to the Alertmanager.
|
||||
// It rollbacks the save if we fail to apply the configuration.
|
||||
func (am *Alertmanager) SaveAndApplyConfig(cfg *apimodels.PostableUserConfig) error {
|
||||
func (am *Alertmanager) SaveAndApplyConfig(orgID int64, cfg *apimodels.PostableUserConfig) error {
|
||||
rawConfig, err := json.Marshal(&cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize to the Alertmanager configuration: %w", err)
|
||||
@@ -246,6 +250,7 @@ func (am *Alertmanager) SaveAndApplyConfig(cfg *apimodels.PostableUserConfig) er
|
||||
cmd := &ngmodels.SaveAlertmanagerConfigurationCmd{
|
||||
AlertmanagerConfiguration: string(rawConfig),
|
||||
ConfigurationVersion: fmt.Sprintf("v%d", ngmodels.AlertConfigurationVersion),
|
||||
OrgID: orgID,
|
||||
}
|
||||
|
||||
err = am.Store.SaveAlertmanagerConfigurationWithCallback(cmd, func() error {
|
||||
@@ -264,12 +269,12 @@ func (am *Alertmanager) SaveAndApplyConfig(cfg *apimodels.PostableUserConfig) er
|
||||
|
||||
// SyncAndApplyConfigFromDatabase picks the latest config from database and restarts
|
||||
// the components with the new config.
|
||||
func (am *Alertmanager) SyncAndApplyConfigFromDatabase() error {
|
||||
func (am *Alertmanager) SyncAndApplyConfigFromDatabase(orgID int64) error {
|
||||
am.reloadConfigMtx.Lock()
|
||||
defer am.reloadConfigMtx.Unlock()
|
||||
|
||||
// First, let's get the configuration we need from the database.
|
||||
q := &ngmodels.GetLatestAlertmanagerConfigurationQuery{}
|
||||
q := &ngmodels.GetLatestAlertmanagerConfigurationQuery{OrgID: mainOrgID}
|
||||
if err := am.Store.GetLatestAlertmanagerConfiguration(q); err != nil {
|
||||
// If there's no configuration in the database, let's use the default configuration.
|
||||
if errors.Is(err, store.ErrNoAlertmanagerConfiguration) {
|
||||
@@ -279,6 +284,7 @@ func (am *Alertmanager) SyncAndApplyConfigFromDatabase() error {
|
||||
AlertmanagerConfiguration: alertmanagerDefaultConfiguration,
|
||||
Default: true,
|
||||
ConfigurationVersion: fmt.Sprintf("v%d", ngmodels.AlertConfigurationVersion),
|
||||
OrgID: orgID,
|
||||
}
|
||||
if err := am.Store.SaveAlertmanagerConfiguration(savecmd); err != nil {
|
||||
return err
|
||||
@@ -399,7 +405,7 @@ func (am *Alertmanager) applyConfig(cfg *apimodels.PostableUserConfig, rawConfig
|
||||
}
|
||||
|
||||
func (am *Alertmanager) WorkingDirPath() string {
|
||||
return filepath.Join(am.Settings.DataPath, workingDir)
|
||||
return filepath.Join(am.Settings.DataPath, workingDir, strconv.Itoa(mainOrgID))
|
||||
}
|
||||
|
||||
// buildIntegrationsMap builds a map of name to the list of Grafana integration notifiers off of a list of receiver config.
|
||||
|
@@ -54,7 +54,7 @@ func setupAMTest(t *testing.T) *Alertmanager {
|
||||
|
||||
func TestAlertmanager_ShouldUseDefaultConfigurationWhenNoConfiguration(t *testing.T) {
|
||||
am := setupAMTest(t)
|
||||
require.NoError(t, am.SyncAndApplyConfigFromDatabase())
|
||||
require.NoError(t, am.SyncAndApplyConfigFromDatabase(mainOrgID))
|
||||
require.NotNil(t, am.config)
|
||||
}
|
||||
|
||||
|
@@ -19,7 +19,7 @@ func (st *DBstore) GetLatestAlertmanagerConfiguration(query *models.GetLatestAle
|
||||
return st.SQLStore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
|
||||
c := &models.AlertConfiguration{}
|
||||
// The ID is already an auto incremental column, using the ID as an order should guarantee the latest.
|
||||
ok, err := sess.Desc("id").Limit(1).Get(c)
|
||||
ok, err := sess.Desc("id").Where("org_id = ?", query.OrgID).Limit(1).Get(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -48,6 +48,7 @@ func (st DBstore) SaveAlertmanagerConfigurationWithCallback(cmd *models.SaveAler
|
||||
AlertmanagerConfiguration: cmd.AlertmanagerConfiguration,
|
||||
ConfigurationVersion: cmd.ConfigurationVersion,
|
||||
Default: cmd.Default,
|
||||
OrgID: cmd.OrgID,
|
||||
}
|
||||
if _, err := sess.Insert(config); err != nil {
|
||||
return err
|
||||
|
@@ -41,6 +41,7 @@ func AddMigrations(mg *Migrator) {
|
||||
ualert.AddTablesMigrations(mg)
|
||||
ualert.AddDashAlertMigration(mg)
|
||||
addLibraryElementsMigrations(mg)
|
||||
ualert.RerunDashAlertMigration(mg)
|
||||
}
|
||||
|
||||
func addMigrationLogMigrations(mg *Migrator) {
|
||||
|
@@ -9,14 +9,14 @@ import (
|
||||
)
|
||||
|
||||
type alertRule struct {
|
||||
OrgId int64
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
Title string
|
||||
Condition string
|
||||
Data []alertQuery
|
||||
IntervalSeconds int64
|
||||
Version int64
|
||||
Uid string
|
||||
NamespaceUid string
|
||||
UID string `xorm:"uid"`
|
||||
NamespaceUID string `xorm:"namespace_uid"`
|
||||
RuleGroup string
|
||||
NoDataState string
|
||||
ExecErrState string
|
||||
@@ -51,9 +51,9 @@ type alertRuleVersion struct {
|
||||
|
||||
func (a *alertRule) makeVersion() *alertRuleVersion {
|
||||
return &alertRuleVersion{
|
||||
RuleOrgID: a.OrgId,
|
||||
RuleUID: a.Uid,
|
||||
RuleNamespaceUID: a.NamespaceUid,
|
||||
RuleOrgID: a.OrgID,
|
||||
RuleUID: a.UID,
|
||||
RuleNamespaceUID: a.NamespaceUID,
|
||||
RuleGroup: a.RuleGroup,
|
||||
ParentVersion: 0,
|
||||
RestoredFrom: 0,
|
||||
@@ -96,14 +96,14 @@ func (m *migration) makeAlertRule(cond condition, da dashAlert, folderUID string
|
||||
annotations["message"] = da.Message
|
||||
|
||||
ar := &alertRule{
|
||||
OrgId: da.OrgId,
|
||||
OrgID: da.OrgId,
|
||||
Title: da.Name, // TODO: Make sure all names are unique, make new name on constraint insert error.
|
||||
Uid: util.GenerateShortUID(),
|
||||
UID: util.GenerateShortUID(),
|
||||
Condition: cond.Condition,
|
||||
Data: cond.Data,
|
||||
IntervalSeconds: ruleAdjustInterval(da.Frequency),
|
||||
Version: 1,
|
||||
NamespaceUid: folderUID, // Folder already created, comes from env var.
|
||||
NamespaceUID: folderUID, // Folder already created, comes from env var.
|
||||
RuleGroup: da.Name,
|
||||
For: duration(da.For),
|
||||
Updated: time.Now().UTC(),
|
||||
@@ -123,7 +123,7 @@ func (m *migration) makeAlertRule(cond condition, da dashAlert, folderUID string
|
||||
}
|
||||
|
||||
// Label for routing and silences.
|
||||
n, v := getLabelForRouteMatching(ar.Uid)
|
||||
n, v := getLabelForRouteMatching(ar.UID)
|
||||
ar.Labels[n] = v
|
||||
|
||||
if err := m.addSilence(da, ar); err != nil {
|
||||
|
@@ -17,7 +17,8 @@ import (
|
||||
)
|
||||
|
||||
type notificationChannel struct {
|
||||
ID int `xorm:"id"`
|
||||
ID int64 `xorm:"id"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
Uid string `xorm:"uid"`
|
||||
Name string `xorm:"name"`
|
||||
Type string `xorm:"type"`
|
||||
@@ -27,9 +28,16 @@ type notificationChannel struct {
|
||||
SecureSettings securejsondata.SecureJsonData `xorm:"secure_settings"`
|
||||
}
|
||||
|
||||
func (m *migration) getNotificationChannelMap() (map[interface{}]*notificationChannel, []*notificationChannel, error) {
|
||||
// channelsPerOrg maps notification channels per organisation
|
||||
type channelsPerOrg map[int64]map[interface{}]*notificationChannel
|
||||
|
||||
// channelMap maps notification channels per organisation
|
||||
type defaultChannelsPerOrg map[int64][]*notificationChannel
|
||||
|
||||
func (m *migration) getNotificationChannelMap() (channelsPerOrg, defaultChannelsPerOrg, error) {
|
||||
q := `
|
||||
SELECT id,
|
||||
org_id,
|
||||
uid,
|
||||
name,
|
||||
type,
|
||||
@@ -50,25 +58,27 @@ func (m *migration) getNotificationChannelMap() (map[interface{}]*notificationCh
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
allChannelsMap := make(map[interface{}]*notificationChannel)
|
||||
var defaultChannels []*notificationChannel
|
||||
allChannelsMap := make(channelsPerOrg)
|
||||
defaultChannelsMap := make(defaultChannelsPerOrg)
|
||||
for i, c := range allChannels {
|
||||
if _, ok := allChannelsMap[c.OrgID]; !ok { // new seen org
|
||||
allChannelsMap[c.OrgID] = make(map[interface{}]*notificationChannel)
|
||||
}
|
||||
if c.Uid != "" {
|
||||
allChannelsMap[c.Uid] = &allChannels[i]
|
||||
allChannelsMap[c.OrgID][c.Uid] = &allChannels[i]
|
||||
}
|
||||
if c.ID != 0 {
|
||||
allChannelsMap[c.ID] = &allChannels[i]
|
||||
allChannelsMap[c.OrgID][c.ID] = &allChannels[i]
|
||||
}
|
||||
if c.IsDefault {
|
||||
// TODO: verify that there will be only 1 default channel.
|
||||
defaultChannels = append(defaultChannels, &allChannels[i])
|
||||
defaultChannelsMap[c.OrgID] = append(defaultChannelsMap[c.OrgID], &allChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
return allChannelsMap, defaultChannels, nil
|
||||
return allChannelsMap, defaultChannelsMap, nil
|
||||
}
|
||||
|
||||
func (m *migration) updateReceiverAndRoute(allChannels map[interface{}]*notificationChannel, defaultChannels []*notificationChannel, da dashAlert, rule *alertRule, amConfig *PostableUserConfig) error {
|
||||
func (m *migration) updateReceiverAndRoute(allChannels channelsPerOrg, defaultChannels defaultChannelsPerOrg, da dashAlert, rule *alertRule, amConfig *PostableUserConfig) error {
|
||||
// Create receiver and route for this rule.
|
||||
if allChannels == nil {
|
||||
return nil
|
||||
@@ -82,7 +92,7 @@ func (m *migration) updateReceiverAndRoute(allChannels map[interface{}]*notifica
|
||||
return nil
|
||||
}
|
||||
|
||||
recv, route, err := m.makeReceiverAndRoute(rule.Uid, channelIDs, defaultChannels, allChannels)
|
||||
recv, route, err := m.makeReceiverAndRoute(rule.UID, rule.OrgID, channelIDs, defaultChannels[rule.OrgID], allChannels[rule.OrgID])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +107,7 @@ func (m *migration) updateReceiverAndRoute(allChannels map[interface{}]*notifica
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface{}, defaultChannels []*notificationChannel, allChannels map[interface{}]*notificationChannel) (*PostableApiReceiver, *Route, error) {
|
||||
func (m *migration) makeReceiverAndRoute(ruleUid string, orgID int64, channelUids []interface{}, defaultChannels []*notificationChannel, allChannels map[interface{}]*notificationChannel) (*PostableApiReceiver, *Route, error) {
|
||||
portedChannels := []*PostableGrafanaReceiver{}
|
||||
var receiver *PostableApiReceiver
|
||||
|
||||
@@ -112,7 +122,10 @@ func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface
|
||||
return errors.New("failed to generate UID for notification channel")
|
||||
}
|
||||
|
||||
m.migratedChannels[c] = struct{}{}
|
||||
if _, ok := m.migratedChannelsPerOrg[orgID]; !ok {
|
||||
m.migratedChannelsPerOrg[orgID] = make(map[*notificationChannel]struct{})
|
||||
}
|
||||
m.migratedChannelsPerOrg[orgID][c] = struct{}{}
|
||||
settings, secureSettings := migrateSettingsToSecureSettings(c.Type, c.Settings, c.SecureSettings)
|
||||
portedChannels = append(portedChannels, &PostableGrafanaReceiver{
|
||||
UID: uid,
|
||||
@@ -129,9 +142,10 @@ func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface
|
||||
// Remove obsolete notification channels.
|
||||
filteredChannelUids := make(map[interface{}]struct{})
|
||||
for _, uid := range channelUids {
|
||||
_, ok := allChannels[uid]
|
||||
c, ok := allChannels[uid]
|
||||
if ok {
|
||||
filteredChannelUids[uid] = struct{}{}
|
||||
// always store the channel UID to prevent duplicates
|
||||
filteredChannelUids[c.Uid] = struct{}{}
|
||||
} else {
|
||||
m.mg.Logger.Warn("ignoring obsolete notification channel", "uid", uid)
|
||||
}
|
||||
@@ -142,9 +156,10 @@ func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface
|
||||
if c.Uid == "" {
|
||||
id = c.ID
|
||||
}
|
||||
_, ok := allChannels[id]
|
||||
c, ok := allChannels[id]
|
||||
if ok {
|
||||
filteredChannelUids[id] = struct{}{}
|
||||
// always store the channel UID to prevent duplicates
|
||||
filteredChannelUids[c.Uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +174,11 @@ func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface
|
||||
}
|
||||
|
||||
var receiverName string
|
||||
if rn, ok := m.portedChannelGroups[chanKey]; ok {
|
||||
|
||||
if _, ok := m.portedChannelGroupsPerOrg[orgID]; !ok {
|
||||
m.portedChannelGroupsPerOrg[orgID] = make(map[string]string)
|
||||
}
|
||||
if rn, ok := m.portedChannelGroupsPerOrg[orgID][chanKey]; ok {
|
||||
// We have ported these exact set of channels already. Re-use it.
|
||||
receiverName = rn
|
||||
if receiverName == "autogen-contact-point-default" {
|
||||
@@ -180,7 +199,7 @@ func (m *migration) makeReceiverAndRoute(ruleUid string, channelUids []interface
|
||||
receiverName = fmt.Sprintf("autogen-contact-point-%d", m.lastReceiverID)
|
||||
}
|
||||
|
||||
m.portedChannelGroups[chanKey] = receiverName
|
||||
m.portedChannelGroupsPerOrg[orgID][chanKey] = receiverName
|
||||
receiver = &PostableApiReceiver{
|
||||
Name: receiverName,
|
||||
GrafanaManagedReceivers: portedChannels,
|
||||
@@ -220,32 +239,47 @@ func makeKeyForChannelGroup(channelUids map[interface{}]struct{}) (string, error
|
||||
}
|
||||
|
||||
// addDefaultChannels should be called before adding any other routes.
|
||||
func (m *migration) addDefaultChannels(amConfig *PostableUserConfig, allChannels map[interface{}]*notificationChannel, defaultChannels []*notificationChannel) error {
|
||||
// Default route and receiver.
|
||||
recv, route, err := m.makeReceiverAndRoute("default_route", nil, defaultChannels, allChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (m *migration) addDefaultChannels(amConfigsPerOrg amConfigsPerOrg, allChannels channelsPerOrg, defaultChannels defaultChannelsPerOrg) error {
|
||||
for orgID := range allChannels {
|
||||
if _, ok := amConfigsPerOrg[orgID]; !ok {
|
||||
amConfigsPerOrg[orgID] = &PostableUserConfig{
|
||||
AlertmanagerConfig: PostableApiAlertingConfig{
|
||||
Receivers: make([]*PostableApiReceiver, 0),
|
||||
Route: &Route{
|
||||
Routes: make([]*Route, 0),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
// Default route and receiver.
|
||||
recv, route, err := m.makeReceiverAndRoute("default_route", orgID, nil, defaultChannels[orgID], allChannels[orgID])
|
||||
if err != nil {
|
||||
// if one fails it will fail the migration
|
||||
return err
|
||||
}
|
||||
|
||||
if recv != nil {
|
||||
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, recv)
|
||||
if recv != nil {
|
||||
amConfigsPerOrg[orgID].AlertmanagerConfig.Receivers = append(amConfigsPerOrg[orgID].AlertmanagerConfig.Receivers, recv)
|
||||
}
|
||||
if route != nil {
|
||||
route.Matchers = nil // Don't need matchers for root route.
|
||||
amConfigsPerOrg[orgID].AlertmanagerConfig.Route = route
|
||||
}
|
||||
}
|
||||
if route != nil {
|
||||
route.Matchers = nil // Don't need matchers for root route.
|
||||
amConfig.AlertmanagerConfig.Route = route
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) addUnmigratedChannels(amConfig *PostableUserConfig, allChannels map[interface{}]*notificationChannel, defaultChannels []*notificationChannel) error {
|
||||
func (m *migration) addUnmigratedChannels(orgID int64, amConfigs *PostableUserConfig, allChannels map[interface{}]*notificationChannel, defaultChannels []*notificationChannel) error {
|
||||
// Unmigrated channels.
|
||||
portedChannels := []*PostableGrafanaReceiver{}
|
||||
receiver := &PostableApiReceiver{
|
||||
Name: "autogen-unlinked-channel-recv",
|
||||
}
|
||||
for _, c := range allChannels {
|
||||
_, ok := m.migratedChannels[c]
|
||||
if _, ok := m.migratedChannelsPerOrg[orgID]; !ok {
|
||||
m.migratedChannelsPerOrg[orgID] = make(map[*notificationChannel]struct{})
|
||||
}
|
||||
_, ok := m.migratedChannelsPerOrg[orgID][c]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
@@ -259,7 +293,7 @@ func (m *migration) addUnmigratedChannels(amConfig *PostableUserConfig, allChann
|
||||
return errors.New("failed to generate UID for notification channel")
|
||||
}
|
||||
|
||||
m.migratedChannels[c] = struct{}{}
|
||||
m.migratedChannelsPerOrg[orgID][c] = struct{}{}
|
||||
settings, secureSettings := migrateSettingsToSecureSettings(c.Type, c.Settings, c.SecureSettings)
|
||||
portedChannels = append(portedChannels, &PostableGrafanaReceiver{
|
||||
UID: uid,
|
||||
@@ -272,7 +306,7 @@ func (m *migration) addUnmigratedChannels(amConfig *PostableUserConfig, allChann
|
||||
}
|
||||
receiver.GrafanaManagedReceivers = portedChannels
|
||||
if len(portedChannels) > 0 {
|
||||
amConfig.AlertmanagerConfig.Receivers = append(amConfig.AlertmanagerConfig.Receivers, receiver)
|
||||
amConfigs.AlertmanagerConfig.Receivers = append(amConfigs.AlertmanagerConfig.Receivers, receiver)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -361,6 +395,8 @@ type PostableUserConfig struct {
|
||||
AlertmanagerConfig PostableApiAlertingConfig `yaml:"alertmanager_config" json:"alertmanager_config"`
|
||||
}
|
||||
|
||||
type amConfigsPerOrg = map[int64]*PostableUserConfig
|
||||
|
||||
func (c *PostableUserConfig) EncryptSecureSettings() error {
|
||||
for _, r := range c.AlertmanagerConfig.Receivers {
|
||||
for _, gr := range r.GrafanaManagedReceivers {
|
||||
|
@@ -61,6 +61,25 @@ func (m *migration) getOrCreateGeneralFolder(orgID int64) (*dashboard, error) {
|
||||
return &dashboard, nil
|
||||
}
|
||||
|
||||
// returns the folder of the given dashboard (if exists)
|
||||
func (m *migration) getFolder(dash dashboard, da dashAlert) (dashboard, error) {
|
||||
// get folder if exists
|
||||
folder := dashboard{}
|
||||
if dash.FolderId > 0 {
|
||||
exists, err := m.sess.Where("id=?", dash.FolderId).Get(&folder)
|
||||
if err != nil {
|
||||
return folder, fmt.Errorf("failed to get folder %d: %w", dash.FolderId, err)
|
||||
}
|
||||
if !exists {
|
||||
return folder, fmt.Errorf("folder with id %v not found", dash.FolderId)
|
||||
}
|
||||
if !folder.IsFolder {
|
||||
return folder, fmt.Errorf("id %v is a dashboard not a folder", dash.FolderId)
|
||||
}
|
||||
}
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// based on sqlstore.saveDashboard()
|
||||
// it should be called from inside a transaction
|
||||
func (m *migration) createFolder(orgID int64, title string) (*dashboard, error) {
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
@@ -27,7 +28,7 @@ func (m *migration) addSilence(da dashAlert, rule *alertRule) error {
|
||||
return errors.New("failed to create uuid for silence")
|
||||
}
|
||||
|
||||
n, v := getLabelForRouteMatching(rule.Uid)
|
||||
n, v := getLabelForRouteMatching(rule.UID)
|
||||
s := &pb.MeshSilence{
|
||||
Silence: &pb.Silence{
|
||||
Id: uid.String(),
|
||||
@@ -50,7 +51,7 @@ func (m *migration) addSilence(da dashAlert, rule *alertRule) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) writeSilencesFile() error {
|
||||
func (m *migration) writeSilencesFile(orgID int64) error {
|
||||
var buf bytes.Buffer
|
||||
for _, e := range m.silences {
|
||||
if _, err := pbutil.WriteDelimited(&buf, e); err != nil {
|
||||
@@ -58,7 +59,7 @@ func (m *migration) writeSilencesFile() error {
|
||||
}
|
||||
}
|
||||
|
||||
f, err := openReplace(silencesFileName(m.mg))
|
||||
f, err := openReplace(silencesFileNameForOrg(m.mg, orgID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,8 +71,12 @@ func (m *migration) writeSilencesFile() error {
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func silencesFileName(mg *migrator.Migrator) string {
|
||||
return filepath.Join(mg.Cfg.DataPath, "alerting", "silences")
|
||||
func getSilenceFileNamesForAllOrgs(mg *migrator.Migrator) ([]string, error) {
|
||||
return filepath.Glob(filepath.Join(mg.Cfg.DataPath, "alerting", "*", "silences"))
|
||||
}
|
||||
|
||||
func silencesFileNameForOrg(mg *migrator.Migrator, orgID int64) string {
|
||||
return filepath.Join(mg.Cfg.DataPath, "alerting", strconv.Itoa(int(orgID)), "silences")
|
||||
}
|
||||
|
||||
// replaceFile wraps a file that is moved to another filename on closing.
|
||||
@@ -94,6 +99,10 @@ func (f *replaceFile) Close() error {
|
||||
func openReplace(filename string) (*replaceFile, error) {
|
||||
tmpFilename := fmt.Sprintf("%s.%x", filename, uint64(rand.Int63()))
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(tmpFilename), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := os.Create(tmpFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -267,6 +267,15 @@ func AddAlertmanagerConfigMigrations(mg *migrator.Migrator) {
|
||||
|
||||
mg.AddMigration("alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql", migrator.NewRawSQLMigration("").
|
||||
Mysql("ALTER TABLE alert_configuration MODIFY alertmanager_configuration MEDIUMTEXT;"))
|
||||
|
||||
mg.AddMigration("add column org_id in alert_configuration", migrator.NewAddColumnMigration(alertConfiguration, &migrator.Column{
|
||||
Name: "org_id", Type: migrator.DB_BigInt, Nullable: false, Default: "0",
|
||||
}))
|
||||
|
||||
// add index on org_id
|
||||
mg.AddMigration("add index in alert_configuration table on org_id column", migrator.NewAddIndexMigration(alertConfiguration, &migrator.Index{
|
||||
Cols: []string{"org_id"},
|
||||
}))
|
||||
}
|
||||
|
||||
func AddAlertAdminConfigMigrations(mg *migrator.Migrator) {
|
||||
|
@@ -23,6 +23,8 @@ var migTitle = "move dashboard alerts to unified alerting"
|
||||
|
||||
var rmMigTitle = "remove unified alerting data"
|
||||
|
||||
const clearMigrationEntryTitle = "clear migration entry %q"
|
||||
|
||||
type MigrationError struct {
|
||||
AlertId int64
|
||||
Err error
|
||||
@@ -49,19 +51,23 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
case ngEnabled && !migrationRun:
|
||||
// Remove the migration entry that removes all unified alerting data. This is so when the feature
|
||||
// flag is removed in future the "remove unified alerting data" migration will be run again.
|
||||
err = mg.ClearMigrationEntry(rmMigTitle)
|
||||
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, rmMigTitle), &clearMigrationEntry{
|
||||
migrationID: rmMigTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration error: could not clear alert migration for removing data", "error", err)
|
||||
}
|
||||
mg.AddMigration(migTitle, &migration{
|
||||
seenChannelUIDs: make(map[string]struct{}),
|
||||
migratedChannels: make(map[*notificationChannel]struct{}),
|
||||
portedChannelGroups: make(map[string]string),
|
||||
seenChannelUIDs: make(map[string]struct{}),
|
||||
migratedChannelsPerOrg: make(map[int64]map[*notificationChannel]struct{}),
|
||||
portedChannelGroupsPerOrg: make(map[int64]map[string]string),
|
||||
})
|
||||
case !ngEnabled && migrationRun:
|
||||
// Remove the migration entry that creates unified alerting data. This is so when the feature
|
||||
// flag is enabled in the future the migration "move dashboard alerts to unified alerting" will be run again.
|
||||
err = mg.ClearMigrationEntry(migTitle)
|
||||
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, migTitle), &clearMigrationEntry{
|
||||
migrationID: migTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration error: could not clear dashboard alert migration", "error", err)
|
||||
}
|
||||
@@ -69,17 +75,84 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
|
||||
}
|
||||
}
|
||||
|
||||
// RerunDashAlertMigration force the dashboard alert migration to run
|
||||
// to make sure that the Alertmanager configurations will be created for each organisation
|
||||
func RerunDashAlertMigration(mg *migrator.Migrator) {
|
||||
logs, err := mg.GetMigrationLog()
|
||||
if err != nil {
|
||||
mg.Logger.Crit("alert migration failure: could not get migration log", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cloneMigTitle := fmt.Sprintf("clone %s", migTitle)
|
||||
cloneRmMigTitle := fmt.Sprintf("clone %s", rmMigTitle)
|
||||
|
||||
_, migrationRun := logs[cloneMigTitle]
|
||||
|
||||
ngEnabled := mg.Cfg.IsNgAlertEnabled()
|
||||
|
||||
switch {
|
||||
case ngEnabled && !migrationRun:
|
||||
// Removes all unified alerting data. It is not recorded so when the feature
|
||||
// flag is removed in future the "clone remove unified alerting data" migration will be run again.
|
||||
mg.AddMigration(cloneRmMigTitle, &rmMigrationWithoutLogging{})
|
||||
|
||||
mg.AddMigration(cloneMigTitle, &migration{
|
||||
seenChannelUIDs: make(map[string]struct{}),
|
||||
migratedChannelsPerOrg: make(map[int64]map[*notificationChannel]struct{}),
|
||||
portedChannelGroupsPerOrg: make(map[int64]map[string]string),
|
||||
})
|
||||
|
||||
case !ngEnabled && migrationRun:
|
||||
// Remove the migration entry that creates unified alerting data. This is so when the feature
|
||||
// flag is enabled in the future the migration "move dashboard alerts to unified alerting" will be run again.
|
||||
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, cloneMigTitle), &clearMigrationEntry{
|
||||
migrationID: cloneMigTitle,
|
||||
})
|
||||
if err != nil {
|
||||
mg.Logger.Error("alert migration error: could not clear clone dashboard alert migration", "error", err)
|
||||
}
|
||||
// Removes all unified alerting data. It is not recorded so when the feature
|
||||
// flag is enabled in future the "clone remove unified alerting data" migration will be run again.
|
||||
mg.AddMigration(cloneRmMigTitle, &rmMigrationWithoutLogging{})
|
||||
}
|
||||
}
|
||||
|
||||
// clearMigrationEntry removes an entry fromt the migration_log table.
|
||||
// This migration is not recorded in the migration_log so that it can re-run several times.
|
||||
type clearMigrationEntry struct {
|
||||
migrator.MigrationBase
|
||||
|
||||
migrationID string
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) SQL(dialect migrator.Dialect) string {
|
||||
return "clear migration entry code migration"
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
_, err := sess.SQL(`DELETE from migration_log where migration_id = ?`, m.migrationID).Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear migration entry %v: %w", m.migrationID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *clearMigrationEntry) SkipMigrationLog() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type migration struct {
|
||||
migrator.MigrationBase
|
||||
// session and mg are attached for convenience.
|
||||
sess *xorm.Session
|
||||
mg *migrator.Migrator
|
||||
|
||||
seenChannelUIDs map[string]struct{}
|
||||
migratedChannels map[*notificationChannel]struct{}
|
||||
silences []*pb.MeshSilence
|
||||
portedChannelGroups map[string]string // Channel group key -> receiver name.
|
||||
lastReceiverID int // For the auto generated receivers.
|
||||
seenChannelUIDs map[string]struct{}
|
||||
migratedChannelsPerOrg map[int64]map[*notificationChannel]struct{}
|
||||
silences []*pb.MeshSilence
|
||||
portedChannelGroupsPerOrg map[int64]map[string]string // Org -> Channel group key -> receiver name.
|
||||
lastReceiverID int // For the auto generated receivers.
|
||||
}
|
||||
|
||||
func (m *migration) SQL(dialect migrator.Dialect) string {
|
||||
@@ -108,13 +181,13 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
}
|
||||
|
||||
// allChannels: channelUID -> channelConfig
|
||||
allChannels, defaultChannels, err := m.getNotificationChannelMap()
|
||||
allChannelsPerOrg, defaultChannelsPerOrg, err := m.getNotificationChannelMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
amConfig := PostableUserConfig{}
|
||||
err = m.addDefaultChannels(&amConfig, allChannels, defaultChannels)
|
||||
amConfigPerOrg := make(amConfigsPerOrg, len(allChannelsPerOrg))
|
||||
err = m.addDefaultChannels(amConfigPerOrg, allChannelsPerOrg, defaultChannelsPerOrg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,26 +217,11 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
}
|
||||
|
||||
// get folder if exists
|
||||
folder := dashboard{}
|
||||
if dash.FolderId > 0 {
|
||||
exists, err := m.sess.Where("id=?", dash.FolderId).Get(&folder)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("failed to get folder %d: %w", dash.FolderId, err),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("folder with id %v not found", dash.FolderId),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
if !folder.IsFolder {
|
||||
return MigrationError{
|
||||
Err: fmt.Errorf("id %v is a dashboard not a folder", dash.FolderId),
|
||||
AlertId: da.Id,
|
||||
}
|
||||
folder, err := m.getFolder(dash, da)
|
||||
if err != nil {
|
||||
return MigrationError{
|
||||
Err: err,
|
||||
AlertId: da.Id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,8 +278,12 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.updateReceiverAndRoute(allChannels, defaultChannels, da, rule, &amConfig); err != nil {
|
||||
return err
|
||||
if _, ok := amConfigPerOrg[rule.OrgID]; !ok {
|
||||
m.mg.Logger.Info("no configuration found", "org", rule.OrgID)
|
||||
} else {
|
||||
if err := m.updateReceiverAndRoute(allChannelsPerOrg, defaultChannelsPerOrg, da, rule, amConfigPerOrg[rule.OrgID]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(mg.Dialect.DriverName(), migrator.Postgres) {
|
||||
@@ -234,8 +296,8 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
}
|
||||
if err != nil {
|
||||
// TODO better error handling, if constraint
|
||||
rule.Title += fmt.Sprintf(" %v", rule.Uid)
|
||||
rule.RuleGroup += fmt.Sprintf(" %v", rule.Uid)
|
||||
rule.Title += fmt.Sprintf(" %v", rule.UID)
|
||||
rule.RuleGroup += fmt.Sprintf(" %v", rule.UID)
|
||||
|
||||
_, err = m.sess.Insert(rule)
|
||||
if err != nil {
|
||||
@@ -250,24 +312,26 @@ func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Create a separate receiver for all the unmigrated channels.
|
||||
err = m.addUnmigratedChannels(&amConfig, allChannels, defaultChannels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for orgID, amConfig := range amConfigPerOrg {
|
||||
// Create a separate receiver for all the unmigrated channels.
|
||||
err = m.addUnmigratedChannels(orgID, amConfig, allChannelsPerOrg[orgID], defaultChannelsPerOrg[orgID])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.writeAlertmanagerConfig(&amConfig, allChannels); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.writeAlertmanagerConfig(orgID, amConfig, allChannelsPerOrg[orgID]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.writeSilencesFile(); err != nil {
|
||||
m.mg.Logger.Error("alert migration error: failed to write silence file", "err", err)
|
||||
if err := m.writeSilencesFile(orgID); err != nil {
|
||||
m.mg.Logger.Error("alert migration error: failed to write silence file", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *migration) writeAlertmanagerConfig(amConfig *PostableUserConfig, allChannels map[interface{}]*notificationChannel) error {
|
||||
func (m *migration) writeAlertmanagerConfig(orgID int64, amConfig *PostableUserConfig, allChannels map[interface{}]*notificationChannel) error {
|
||||
if len(allChannels) == 0 {
|
||||
// No channels, hence don't require Alertmanager config.
|
||||
m.mg.Logger.Info("alert migration: no notification channel found, skipping Alertmanager config")
|
||||
@@ -288,6 +352,7 @@ func (m *migration) writeAlertmanagerConfig(amConfig *PostableUserConfig, allCha
|
||||
// Since we are migration for a snapshot of the code, it is always going to migrate to
|
||||
// the v1 config.
|
||||
ConfigurationVersion: "v1",
|
||||
OrgID: orgID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -297,13 +362,15 @@ func (m *migration) writeAlertmanagerConfig(amConfig *PostableUserConfig, allCha
|
||||
}
|
||||
|
||||
type AlertConfiguration struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
OrgID int64 `xorm:"org_id"`
|
||||
|
||||
AlertmanagerConfiguration string
|
||||
ConfigurationVersion string
|
||||
CreatedAt int64 `xorm:"created"`
|
||||
}
|
||||
|
||||
// rmMigration removes Grafana 8 alert data
|
||||
type rmMigration struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
@@ -343,9 +410,23 @@ func (m *rmMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(silencesFileName(mg)); err != nil {
|
||||
mg.Logger.Error("alert migration error: failed to remove silence file", "err", err)
|
||||
files, err := getSilenceFileNamesForAllOrgs(mg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
if err := os.Remove(f); err != nil {
|
||||
mg.Logger.Error("alert migration error: failed to remove silence file", "file", f, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rmMigrationWithoutLogging is similar migration to rmMigration
|
||||
// but is not recorded in the migration_log table so that it can rerun in the future
|
||||
type rmMigrationWithoutLogging = rmMigration
|
||||
|
||||
func (m *rmMigrationWithoutLogging) SkipMigrationLog() bool {
|
||||
return true
|
||||
}
|
||||
|
@@ -21,6 +21,10 @@ func (m *MigrationBase) GetCondition() MigrationCondition {
|
||||
return m.Condition
|
||||
}
|
||||
|
||||
func (m *MigrationBase) SkipMigrationLog() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type RawSQLMigration struct {
|
||||
MigrationBase
|
||||
|
||||
|
@@ -108,13 +108,17 @@ func (mg *Migrator) Start() error {
|
||||
if err != nil {
|
||||
mg.Logger.Error("Exec failed", "error", err, "sql", sql)
|
||||
record.Error = err.Error()
|
||||
if _, err := sess.Insert(&record); err != nil {
|
||||
return err
|
||||
if !m.SkipMigrationLog() {
|
||||
if _, err := sess.Insert(&record); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
record.Success = true
|
||||
_, err = sess.Insert(&record)
|
||||
if !m.SkipMigrationLog() {
|
||||
_, err = sess.Insert(&record)
|
||||
}
|
||||
if err == nil {
|
||||
migrationsPerformed++
|
||||
}
|
||||
@@ -171,16 +175,6 @@ func (mg *Migrator) exec(m Migration, sess *xorm.Session) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mg *Migrator) ClearMigrationEntry(id string) error {
|
||||
sess := mg.x.NewSession()
|
||||
defer sess.Close()
|
||||
_, err := sess.SQL(`DELETE from migration_log where migration_id = ?`, id).Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear migration entry %v: %w", id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dbTransactionFunc func(sess *xorm.Session) error
|
||||
|
||||
func (mg *Migrator) InTransaction(callback dbTransactionFunc) error {
|
||||
|
@@ -19,6 +19,10 @@ type Migration interface {
|
||||
Id() string
|
||||
SetId(string)
|
||||
GetCondition() MigrationCondition
|
||||
// SkipMigrationLog is used by dashboard alert migration to Grafana 8 Alerts
|
||||
// for skipping recording it in the migration_log so that it can run several times.
|
||||
// For all the other migrations it should be false.
|
||||
SkipMigrationLog() bool
|
||||
}
|
||||
|
||||
type CodeMigration interface {
|
||||
|
@@ -4,8 +4,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
|
||||
"github.com/grafana/grafana/pkg/tests/testinfra"
|
||||
@@ -16,12 +18,34 @@ import (
|
||||
func TestAlertmanagerConfigurationIsTransactional(t *testing.T) {
|
||||
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
|
||||
EnableFeatureToggles: []string{"ngalert"},
|
||||
AnonymousUserRole: models.ROLE_EDITOR,
|
||||
DisableAnonymous: true,
|
||||
})
|
||||
|
||||
store := testinfra.SetUpDatabase(t, dir)
|
||||
// override bus to get the GetSignedInUserQuery handler
|
||||
store.Bus = bus.GetBus()
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
alertConfigURL := fmt.Sprintf("http://%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
|
||||
// create user under main organisation
|
||||
userID := createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor",
|
||||
Login: "editor",
|
||||
})
|
||||
|
||||
// create another organisation
|
||||
orgID := createOrg(t, store, "another org", userID)
|
||||
|
||||
// create user under different organisation
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor-42",
|
||||
Login: "editor-42",
|
||||
OrgId: orgID,
|
||||
})
|
||||
|
||||
// editor from main organisation requests configuration
|
||||
alertConfigURL := fmt.Sprintf("http://editor:editor@%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
|
||||
// On a blank start with no configuration, it saves and delivers the default configuration.
|
||||
{
|
||||
@@ -66,17 +90,48 @@ func TestAlertmanagerConfigurationIsTransactional(t *testing.T) {
|
||||
resp = getRequest(t, alertConfigURL, http.StatusOK) // nolint
|
||||
require.JSONEq(t, defaultAlertmanagerConfigJSON, getBody(t, resp.Body))
|
||||
}
|
||||
|
||||
// editor42 from organisation 42 posts configuration
|
||||
alertConfigURL = fmt.Sprintf("http://editor-42:editor-42@%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
|
||||
// Post the alertmanager config.
|
||||
{
|
||||
mockChannel := newMockNotificationChannel(t, grafanaListedAddr)
|
||||
amConfig := getAlertmanagerConfig(mockChannel.server.Addr)
|
||||
postRequest(t, alertConfigURL, amConfig, http.StatusAccepted) // nolint
|
||||
|
||||
// Verifying that the new configuration is returned
|
||||
resp := getRequest(t, alertConfigURL, http.StatusOK) // nolint
|
||||
b := getBody(t, resp.Body)
|
||||
re := regexp.MustCompile(`"uid":"([\w|-]*)"`)
|
||||
e := getExpAlertmanagerConfigFromAPI(mockChannel.server.Addr)
|
||||
require.JSONEq(t, e, string(re.ReplaceAll([]byte(b), []byte(`"uid":""`))))
|
||||
}
|
||||
|
||||
// verify that main organisation still gets the default configuration
|
||||
alertConfigURL = fmt.Sprintf("http://editor:editor@%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
{
|
||||
resp := getRequest(t, alertConfigURL, http.StatusOK) // nolint
|
||||
require.JSONEq(t, defaultAlertmanagerConfigJSON, getBody(t, resp.Body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertmanagerConfigurationPersistSecrets(t *testing.T) {
|
||||
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
|
||||
EnableFeatureToggles: []string{"ngalert"},
|
||||
AnonymousUserRole: models.ROLE_EDITOR,
|
||||
DisableAnonymous: true,
|
||||
})
|
||||
|
||||
store := testinfra.SetUpDatabase(t, dir)
|
||||
// override bus to get the GetSignedInUserQuery handler
|
||||
store.Bus = bus.GetBus()
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
alertConfigURL := fmt.Sprintf("http://%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor",
|
||||
Login: "editor",
|
||||
})
|
||||
alertConfigURL := fmt.Sprintf("http://editor:editor@%s/api/alertmanager/grafana/config/api/v1/alerts", grafanaListedAddr)
|
||||
generatedUID := ""
|
||||
|
||||
// create a new configuration that has a secret
|
||||
|
@@ -39,9 +39,21 @@ func TestAMConfigAccess(t *testing.T) {
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
// Create a users to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_VIEWER, "viewer", "viewer"))
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "editor", "editor"))
|
||||
require.NoError(t, createUser(t, store, models.ROLE_ADMIN, "admin", "admin"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_VIEWER),
|
||||
Password: "viewer",
|
||||
Login: "viewer",
|
||||
})
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor",
|
||||
Login: "editor",
|
||||
})
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_ADMIN),
|
||||
Password: "admin",
|
||||
Login: "admin",
|
||||
})
|
||||
|
||||
type testCase struct {
|
||||
desc string
|
||||
@@ -402,7 +414,11 @@ func TestAlertAndGroupsQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
// invalid credentials request to get the alerts should fail
|
||||
{
|
||||
@@ -554,9 +570,21 @@ func TestRulerAccess(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a users to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_VIEWER, "viewer", "viewer"))
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "editor", "editor"))
|
||||
require.NoError(t, createUser(t, store, models.ROLE_ADMIN, "admin", "admin"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_VIEWER),
|
||||
Password: "viewer",
|
||||
Login: "viewer",
|
||||
})
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor",
|
||||
Login: "editor",
|
||||
})
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_ADMIN),
|
||||
Password: "admin",
|
||||
Login: "admin",
|
||||
})
|
||||
|
||||
// Now, let's test the access policies.
|
||||
testCases := []struct {
|
||||
@@ -668,8 +696,16 @@ func TestDeleteFolderWithRules(t *testing.T) {
|
||||
namespaceUID, err := createFolder(t, store, 0, "default")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, createUser(t, store, models.ROLE_VIEWER, "viewer", "viewer"))
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "editor", "editor"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_VIEWER),
|
||||
Password: "viewer",
|
||||
Login: "viewer",
|
||||
})
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "editor",
|
||||
Login: "editor",
|
||||
})
|
||||
|
||||
createRule(t, grafanaListedAddr, "default", "editor", "editor")
|
||||
|
||||
@@ -815,12 +851,14 @@ func TestAlertRuleCRUD(t *testing.T) {
|
||||
store.Bus = bus.GetBus()
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
err := createUser(t, store, models.ROLE_EDITOR, "grafana", "password")
|
||||
|
||||
require.NoError(t, err)
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
// Create the namespace we'll save our alerts to.
|
||||
_, err = createFolder(t, store, 0, "default")
|
||||
_, err := createFolder(t, store, 0, "default")
|
||||
require.NoError(t, err)
|
||||
|
||||
interval, err := model.ParseDuration("1m")
|
||||
@@ -1827,7 +1865,11 @@ func TestQuota(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
interval, err := model.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
@@ -1921,7 +1963,11 @@ func TestEval(t *testing.T) {
|
||||
store.Bus = bus.GetBus()
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
// Create the namespace we'll save our alerts to.
|
||||
_, err := createFolder(t, store, 0, "default")
|
||||
@@ -2338,16 +2384,18 @@ func rulesNamespaceWithoutVariableValues(t *testing.T, b []byte) (string, map[st
|
||||
return string(json), m
|
||||
}
|
||||
|
||||
func createUser(t *testing.T, store *sqlstore.SQLStore, role models.RoleType, username, password string) error {
|
||||
func createUser(t *testing.T, store *sqlstore.SQLStore, cmd models.CreateUserCommand) int64 {
|
||||
t.Helper()
|
||||
|
||||
cmd := models.CreateUserCommand{
|
||||
Login: username,
|
||||
Password: password,
|
||||
DefaultOrgRole: string(role),
|
||||
}
|
||||
_, err := store.CreateUser(context.Background(), cmd)
|
||||
return err
|
||||
u, err := store.CreateUser(context.Background(), cmd)
|
||||
require.NoError(t, err)
|
||||
return u.Id
|
||||
}
|
||||
|
||||
func createOrg(t *testing.T, store *sqlstore.SQLStore, name string, userID int64) int64 {
|
||||
org, err := store.CreateOrgWithMember(name, userID)
|
||||
require.NoError(t, err)
|
||||
return org.Id
|
||||
}
|
||||
|
||||
func getLongString(t *testing.T, n int) string {
|
||||
|
@@ -24,7 +24,11 @@ func TestAvailableChannels(t *testing.T) {
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
alertsURL := fmt.Sprintf("http://grafana:password@%s/api/alert-notifiers", grafanaListedAddr)
|
||||
// nolint:gosec
|
||||
|
@@ -68,7 +68,11 @@ func TestNotificationChannels(t *testing.T) {
|
||||
bus.AddHandlerCtx("", mockEmail.sendEmailCommandHandlerSync)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, s, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, s, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
{
|
||||
// There are no notification channel config initially - so it returns the default configuration.
|
||||
|
@@ -34,7 +34,11 @@ func TestPrometheusRules(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
interval, err := model.ParseDuration("10s")
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +274,11 @@ func TestPrometheusRulesPermissions(t *testing.T) {
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
// Create a namespace under default organisation (orgID = 1) where we'll save some alerts.
|
||||
_, err := createFolder(t, store, 0, "folder1")
|
||||
|
@@ -31,7 +31,11 @@ func TestAlertRulePermissions(t *testing.T) {
|
||||
grafanaListedAddr := testinfra.StartGrafana(t, dir, path, store)
|
||||
|
||||
// Create a user to make authenticated requests
|
||||
require.NoError(t, createUser(t, store, models.ROLE_EDITOR, "grafana", "password"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_EDITOR),
|
||||
Password: "password",
|
||||
Login: "grafana",
|
||||
})
|
||||
|
||||
// Create the namespace we'll save our alerts to.
|
||||
_, err := createFolder(t, store, 0, "folder1")
|
||||
@@ -320,7 +324,11 @@ func TestAlertRuleConflictingTitle(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create user
|
||||
require.NoError(t, createUser(t, store, models.ROLE_ADMIN, "admin", "admin"))
|
||||
createUser(t, store, models.CreateUserCommand{
|
||||
DefaultOrgRole: string(models.ROLE_ADMIN),
|
||||
Password: "admin",
|
||||
Login: "admin",
|
||||
})
|
||||
|
||||
interval, err := model.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
|
Reference in New Issue
Block a user