Alerting: Alert Rule migration (#33000)

* Not complete, put migration behind env flag for now:
UALERT_MIG=iDidBackup
* Important to backup, and not expect the same DB to keep working until the env trigger is removed.
* Alerting: Migrate dashboard alert permissions
* Do not use imported models
* Change folder titles

Co-authored-by: Sofia Papagiannaki <papagian@users.noreply.github.com>
This commit is contained in:
Kyle Brandt 2021-04-29 13:24:37 -04:00 committed by GitHub
parent cf958e0b4f
commit 6c8ef2a9c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 1093 additions and 3 deletions

View File

@ -7,6 +7,7 @@ import (
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
// AddAlertDefinitionMigrations should not be modified.
func AddAlertDefinitionMigrations(mg *migrator.Migrator, defaultIntervalSeconds int64) {
mg.AddMigration("delete alert_definition table", migrator.NewDropTableMigration("alert_definition"))
@ -53,6 +54,7 @@ func AddAlertDefinitionMigrations(mg *migrator.Migrator, defaultIntervalSeconds
}))
}
// AddAlertDefinitionMigrations should not be modified.
func AddAlertDefinitionVersionMigrations(mg *migrator.Migrator) {
mg.AddMigration("delete alert_definition_version table", migrator.NewDropTableMigration("alert_definition_version"))

View File

@ -1,6 +1,9 @@
package migrations
import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
import (
"github.com/grafana/grafana/pkg/services/sqlstore/migrations/ualert"
. "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
// --- Migration Guide line ---
// 1. Never change a migration that is committed and pushed to master
@ -13,7 +16,7 @@ func AddMigrations(mg *Migrator) {
addTempUserMigrations(mg)
addStarMigrations(mg)
addOrgMigrations(mg)
addDashboardMigration(mg)
addDashboardMigration(mg) // Do NOT add more migrations to this function.
addDataSourceMigration(mg)
addApiKeyMigrations(mg)
addDashboardSnapshotMigrations(mg)
@ -27,7 +30,7 @@ func AddMigrations(mg *Migrator) {
addTestDataMigrations(mg)
addDashboardVersionMigration(mg)
addTeamMigrations(mg)
addDashboardAclMigrations(mg)
addDashboardAclMigrations(mg) // Do NOT add more migrations to this function.
addTagMigration(mg)
addLoginAttemptMigrations(mg)
addUserAuthMigrations(mg)
@ -35,6 +38,7 @@ func AddMigrations(mg *Migrator) {
addUserAuthTokenMigrations(mg)
addCacheMigration(mg)
addShortURLMigrations(mg)
ualert.AddMigration(mg)
}
func addMigrationLogMigrations(mg *Migrator) {

View File

@ -0,0 +1,171 @@
package ualert
import (
"encoding/json"
"fmt"
"time"
"github.com/grafana/grafana/pkg/util"
)
type alertRule struct {
OrgId int64
Title string
Condition string
Data []alertQuery
IntervalSeconds int64
Uid string
NamespaceUid string
RuleGroup string
NoDataState string
ExecErrState string
For duration
Updated time.Time
Annotations map[string]string
// Labels map[string]string (Labels are not Created in the migration)
}
func getMigrationInfo(da dashAlert) string {
return fmt.Sprintf(`{"dashboardUid": "%v", "panelId": %v, "alertId": %v}`, da.DashboardUID, da.PanelId, da.Id)
}
func (m *migration) makeAlertRule(cond condition, da dashAlert, folderUID string) (*alertRule, error) {
migAnnotation := getMigrationInfo(da)
annotations := da.ParsedSettings.AlertRuleTags
if annotations == nil {
annotations = make(map[string]string, 1)
}
annotations["__migration__info__"] = migAnnotation
ar := &alertRule{
OrgId: da.OrgId,
Title: da.Name, // TODO: Make sure all names are unique, make new name on constraint insert error.
Condition: cond.Condition,
Data: cond.Data,
IntervalSeconds: ruleAdjustInterval(da.Frequency),
NamespaceUid: folderUID, // Folder already created, comes from env var.
RuleGroup: da.Name,
For: duration(da.For),
Updated: time.Now().UTC(),
Annotations: annotations,
}
var err error
ar.Uid, err = m.generateAlertRuleUID(ar.OrgId)
if err != nil {
return nil, err
}
ar.NoDataState, err = transNoData(da.ParsedSettings.NoDataState)
if err != nil {
return nil, err
}
ar.ExecErrState, err = transExecErr(da.ParsedSettings.ExecutionErrorState)
if err != nil {
return nil, err
}
return ar, nil
}
func (m *migration) generateAlertRuleUID(orgId int64) (string, error) {
for i := 0; i < 20; i++ {
uid := util.GenerateShortUID()
exists, err := m.sess.Where("org_id=? AND uid=?", orgId, uid).Get(&alertRule{})
if err != nil {
return "", err
}
if !exists {
return uid, nil
}
}
return "", fmt.Errorf("could not generate unique uid for alert rule")
}
// TODO: Do I need to create an initial alertRuleVersion as well?
type alertQuery struct {
// RefID is the unique identifier of the query, set by the frontend call.
RefID string `json:"refId"`
// QueryType is an optional identifier for the type of query.
// It can be used to distinguish different types of queries.
QueryType string `json:"queryType"`
// RelativeTimeRange is the relative Start and End of the query as sent by the frontend.
RelativeTimeRange relativeTimeRange `json:"relativeTimeRange"`
DatasourceUID string `json:"datasourceUid"`
// JSON is the raw JSON query and includes the above properties as well as custom properties.
Model json.RawMessage `json:"model"`
}
// RelativeTimeRange is the per query start and end time
// for requests.
type relativeTimeRange struct {
From duration `json:"from"`
To duration `json:"to"`
}
// duration is a type used for marshalling durations.
type duration time.Duration
func (d duration) String() string {
return time.Duration(d).String()
}
func (d duration) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Duration(d).Seconds())
}
func (d *duration) UnmarshalJSON(b []byte) error {
var v interface{}
if err := json.Unmarshal(b, &v); err != nil {
return err
}
switch value := v.(type) {
case float64:
*d = duration(time.Duration(value) * time.Second)
return nil
default:
return fmt.Errorf("invalid duration %v", v)
}
}
func ruleAdjustInterval(freq int64) int64 {
// 10 corresponds to the SchedulerCfg, but TODO not worrying about fetching for now.
var baseFreq int64 = 10
if freq <= baseFreq {
return 10
}
return freq - (freq % baseFreq)
}
func transNoData(s string) (string, error) {
switch s {
case "ok":
return "OK", nil // values from ngalert/models/rule
case "", "no_data":
return "NoData", nil
case "alerting":
return "Alerting", nil
case "keep_state":
return "KeepLastState", nil
}
return "", fmt.Errorf("unrecognized No Data setting %v", s)
}
func transExecErr(s string) (string, error) {
switch s {
case "", "alerting":
return "Alerting", nil
case "KeepLastState":
return "KeepLastState", nil
}
return "", fmt.Errorf("unrecognized Execution Error setting %v", s)
}

View File

@ -0,0 +1,296 @@
package ualert
import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"
)
func transConditions(set dashAlertSettings, orgID int64, dsIDMap map[[2]int64][2]string) (*condition, error) {
refIDtoCondIdx := make(map[string][]int) // a map of original refIds to their corresponding condition index
for i, cond := range set.Conditions {
if len(cond.Query.Params) != 3 {
return nil, fmt.Errorf("unexpected number of query parameters in cond %v, want 3 got %v", i+1, len(cond.Query.Params))
}
refID := cond.Query.Params[0]
refIDtoCondIdx[refID] = append(refIDtoCondIdx[refID], i)
}
newRefIDstoCondIdx := make(map[string][]int) // a map of the new refIds to their coresponding condition index
refIDs := make([]string, 0, len(refIDtoCondIdx)) // a unique sorted list of the original refIDs
for refID := range refIDtoCondIdx {
refIDs = append(refIDs, refID)
}
sort.Strings(refIDs)
newRefIDsToTimeRanges := make(map[string][2]string) // a map of new RefIDs to their time range string tuple representation
for _, refID := range refIDs {
condIdxes := refIDtoCondIdx[refID]
if len(condIdxes) == 1 {
// If the refID is used in only condition, keep the letter a new refID
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[0])
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[0]].Query.Params[1], set.Conditions[condIdxes[0]].Query.Params[2]}
continue
}
// track unique time ranges within the same refID
timeRangesToCondIdx := make(map[[2]string][]int) // a map of the time range tuple to the condition index
for _, idx := range condIdxes {
timeParamFrom := set.Conditions[idx].Query.Params[1]
timeParamTo := set.Conditions[idx].Query.Params[2]
key := [2]string{timeParamFrom, timeParamTo}
timeRangesToCondIdx[key] = append(timeRangesToCondIdx[key], idx)
}
if len(timeRangesToCondIdx) == 1 {
// if all shared time range, no need to create a new query with a new RefID
for i := range condIdxes {
newRefIDstoCondIdx[refID] = append(newRefIDstoCondIdx[refID], condIdxes[i])
newRefIDsToTimeRanges[refID] = [2]string{set.Conditions[condIdxes[i]].Query.Params[1], set.Conditions[condIdxes[i]].Query.Params[2]}
}
continue
}
// This referenced query/refID has different time ranges, so new queries are needed for each unique time range.
timeRanges := make([][2]string, 0, len(timeRangesToCondIdx)) // a sorted list of unique time ranges for the query
for tr := range timeRangesToCondIdx {
timeRanges = append(timeRanges, tr)
}
sort.Slice(timeRanges, func(i, j int) bool {
switch {
case timeRanges[i][0] < timeRanges[j][0]:
return true
case timeRanges[i][0] > timeRanges[j][0]:
return false
default:
return timeRanges[i][1] < timeRanges[j][1]
}
})
for _, tr := range timeRanges {
idxes := timeRangesToCondIdx[tr]
for i := 0; i < len(idxes); i++ {
newLetter, err := getNewRefID(newRefIDstoCondIdx)
if err != nil {
return nil, err
}
newRefIDstoCondIdx[newLetter] = append(newRefIDstoCondIdx[newLetter], idxes[i])
newRefIDsToTimeRanges[newLetter] = [2]string{set.Conditions[idxes[i]].Query.Params[1], set.Conditions[idxes[i]].Query.Params[2]}
}
}
}
newRefIDs := make([]string, 0, len(newRefIDstoCondIdx)) // newRefIds is a sorted list of the unique refIds of new queries
for refID := range newRefIDstoCondIdx {
newRefIDs = append(newRefIDs, refID)
}
sort.Strings(newRefIDs)
newCond := &condition{}
condIdxToNewRefID := make(map[int]string) // a map of condition indices to the RefIDs of new queries
// build the new data source queries
for _, refID := range newRefIDs {
condIdxes := newRefIDstoCondIdx[refID]
for i, condIdx := range condIdxes {
condIdxToNewRefID[condIdx] = refID
if i > 0 {
// only create each unique query once
continue
}
var queryObj map[string]interface{} // copy the model
err := json.Unmarshal(set.Conditions[condIdx].Query.Model, &queryObj)
if err != nil {
return nil, err
}
var queryType string
if v, ok := queryObj["queryType"]; ok {
if s, ok := v.(string); ok {
queryType = s
}
}
// one could have an alert saved but datasource deleted, so can not require match.
dsInfo := dsIDMap[[2]int64{orgID, set.Conditions[condIdx].Query.DatasourceID}]
queryObj["datasource"] = dsInfo[1] // name is needed for UI to load query editor
queryObj["refId"] = refID
encodedObj, err := json.Marshal(queryObj)
if err != nil {
return nil, err
}
rawFrom := newRefIDsToTimeRanges[refID][0]
rawTo := newRefIDsToTimeRanges[refID][1]
rTR, err := getRelativeDuration(rawFrom, rawTo)
if err != nil {
return nil, err
}
alertQuery := alertQuery{
RefID: refID,
Model: encodedObj,
RelativeTimeRange: *rTR,
DatasourceUID: dsInfo[0],
QueryType: queryType,
}
newCond.Data = append(newCond.Data, alertQuery)
}
}
// build the new classic condition pointing our new equivalent queries
conditions := make([]classicConditionJSON, len(set.Conditions))
for i, cond := range set.Conditions {
newCond := classicConditionJSON{}
newCond.Evaluator = conditionEvalJSON{
Type: cond.Evaluator.Type,
Params: cond.Evaluator.Params,
}
newCond.Operator.Type = cond.Operator.Type
newCond.Query.Params = append(newCond.Query.Params, condIdxToNewRefID[i])
newCond.Reducer.Type = cond.Reducer.Type
conditions[i] = newCond
}
ccRefID, err := getNewRefID(newRefIDstoCondIdx) // get refID for the classic condition
if err != nil {
return nil, err
}
newCond.Condition = ccRefID // set the alert condition to point to the classic condition
newCond.OrgID = orgID
exprModel := struct {
Type string `json:"type"`
RefID string `json:"refId"`
Datasource string `json:"datasource"`
Conditions []classicConditionJSON `json:"conditions"`
}{
"classic_conditions",
ccRefID,
"__expr__",
conditions,
}
exprModelJSON, err := json.Marshal(&exprModel)
if err != nil {
return nil, err
}
ccAlertQuery := alertQuery{
RefID: ccRefID,
Model: exprModelJSON,
DatasourceUID: "-100",
}
newCond.Data = append(newCond.Data, ccAlertQuery)
sort.Slice(newCond.Data, func(i, j int) bool {
return newCond.Data[i].RefID < newCond.Data[j].RefID
})
return newCond, nil
}
type condition struct {
// Condition is the RefID of the query or expression from
// the Data property to get the results for.
Condition string `json:"condition"`
OrgID int64 `json:"-"`
// Data is an array of data source queries and/or server side expressions.
Data []alertQuery `json:"data"`
}
const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
// getNewRefID finds first capital letter in the alphabet not in use
// to use for a new RefID. It errors if it runs out of letters.
//
// TODO: Research if there is a limit. If so enforce is by
// number of queries not letters. If no limit generate more types
// of refIDs.
func getNewRefID(refIDs map[string][]int) (string, error) {
for _, r := range alpha {
sR := string(r)
if _, ok := refIDs[sR]; ok {
continue
}
return sR, nil
}
return "", fmt.Errorf("ran out of letters when creating expression")
}
// getRelativeDuration turns the alerting durations for dashboard conditions
// into a relative time range.
func getRelativeDuration(rawFrom, rawTo string) (*relativeTimeRange, error) {
fromD, err := getFrom(rawFrom)
if err != nil {
return nil, err
}
toD, err := getTo(rawTo)
if err != nil {
return nil, err
}
return &relativeTimeRange{
From: duration(fromD),
To: duration(toD),
}, nil
}
func getFrom(from string) (time.Duration, error) {
fromRaw := strings.Replace(from, "now-", "", 1)
d, err := time.ParseDuration("-" + fromRaw)
if err != nil {
return 0, err
}
return -d, err
}
func getTo(to string) (time.Duration, error) {
if to == "now" {
return 0, nil
} else if strings.HasPrefix(to, "now-") {
withoutNow := strings.Replace(to, "now-", "", 1)
d, err := time.ParseDuration("-" + withoutNow)
if err != nil {
return 0, err
}
return -d, nil
}
d, err := time.ParseDuration(to)
if err != nil {
return 0, err
}
return -d, nil
}
type classicConditionJSON struct {
Evaluator conditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
} `json:"operator"`
Query struct {
Params []string `json:"params"`
} `json:"query"`
Reducer struct {
// Params []interface{} `json:"params"` (Unused)
Type string `json:"type"`
} `json:"reducer"`
}

View File

@ -0,0 +1,123 @@
package ualert
import (
"encoding/json"
"time"
)
type dashAlert struct {
Id int64
OrgId int64
DashboardId int64
PanelId int64
Name string
Message string
Frequency int64
For time.Duration
Settings json.RawMessage
ParsedSettings *dashAlertSettings
DashboardUID string // Set from separate call
}
var slurpDashSQL = `
SELECT id,
org_id,
dashboard_id,
panel_id,
org_id,
name,
message,
frequency,
for,
settings
FROM
alert
`
// slurpDashAlerts loads all alerts from the alert database table into the
// the dashAlert type.
// Additionally it unmarshals the json settings for the alert into the
// ParsedSettings property of the dash alert.
func (m *migration) slurpDashAlerts() ([]dashAlert, error) {
dashAlerts := []dashAlert{}
err := m.sess.SQL(slurpDashSQL).Find(&dashAlerts)
if err != nil {
return nil, err
}
for i := range dashAlerts {
err = json.Unmarshal(dashAlerts[i].Settings, &dashAlerts[i].ParsedSettings)
if err != nil {
return nil, err
}
}
return dashAlerts, nil
}
// dashAlertSettings is a type for the JSON that is in the settings field of
// the alert table.
type dashAlertSettings struct {
NoDataState string `json:"noDataState"`
ExecutionErrorState string `json:"executionErrorState"`
Conditions []dashAlertCondition `json:"conditions"`
AlertRuleTags map[string]string `json:"alertRuleTags"`
Notifications []dashAlertNot `json:"notifications"`
}
// dashAlertNot is the object that represents the Notifications array in
// dashAlertSettings
type dashAlertNot struct {
UID string `json:"uid"`
}
// dashAlertingConditionJSON is like classic.ClassicConditionJSON except that it
// includes the model property with the query.
type dashAlertCondition struct {
Evaluator conditionEvalJSON `json:"evaluator"`
Operator struct {
Type string `json:"type"`
} `json:"operator"`
Query struct {
Params []string `json:"params"`
DatasourceID int64 `json:"datasourceId"`
Model json.RawMessage
} `json:"query"`
Reducer struct {
// Params []interface{} `json:"params"` (Unused)
Type string `json:"type"`
}
}
type conditionEvalJSON struct {
Params []float64 `json:"params"`
Type string `json:"type"` // e.g. "gt"
}
// slurpDashUIDs returns a map of [orgID, dashboardId] -> dashUID.
func (m *migration) slurpDashUIDs() (map[[2]int64]string, error) {
dashIDs := []struct {
OrgID int64 `xorm:"org_id"`
ID int64 `xorm:"id"`
UID string `xorm:"uid"`
}{}
err := m.sess.SQL(`SELECT org_id, id, uid FROM dashboard`).Find(&dashIDs)
if err != nil {
return nil, err
}
idToUID := make(map[[2]int64]string, len(dashIDs))
for _, ds := range dashIDs {
idToUID[[2]int64{ds.OrgID, ds.ID}] = ds.UID
}
return idToUID, nil
}

View File

@ -0,0 +1,128 @@
package ualert
import (
"encoding/base64"
"strings"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/gosimple/slug"
)
type dashboard struct {
Id int64
Uid string
Slug string
OrgId int64
GnetId int64
Version int
PluginId string
Created time.Time
Updated time.Time
UpdatedBy int64
CreatedBy int64
FolderId int64
IsFolder bool
HasAcl bool
Title string
Data *simplejson.Json
}
func (d *dashboard) setUid(uid string) {
d.Uid = uid
d.Data.Set("uid", uid)
}
func (d *dashboard) setVersion(version int) {
d.Version = version
d.Data.Set("version", version)
}
// UpdateSlug updates the slug
func (d *dashboard) updateSlug() {
title := d.Data.Get("title").MustString()
d.Slug = slugifyTitle(title)
}
func slugifyTitle(title string) string {
s := slug.Make(strings.ToLower(title))
if s == "" {
// If the dashboard name is only characters outside of the
// sluggable characters, the slug creation will return an
// empty string which will mess up URLs. This failsafe picks
// that up and creates the slug as a base64 identifier instead.
s = base64.RawURLEncoding.EncodeToString([]byte(title))
if slug.MaxLength != 0 && len(s) > slug.MaxLength {
s = s[:slug.MaxLength]
}
}
return s
}
func newDashboardFromJson(data *simplejson.Json) *dashboard {
dash := &dashboard{}
dash.Data = data
dash.Title = dash.Data.Get("title").MustString()
dash.updateSlug()
update := false
if id, err := dash.Data.Get("id").Float64(); err == nil {
dash.Id = int64(id)
update = true
}
if uid, err := dash.Data.Get("uid").String(); err == nil {
dash.Uid = uid
update = true
}
if version, err := dash.Data.Get("version").Float64(); err == nil && update {
dash.Version = int(version)
dash.Updated = time.Now()
} else {
dash.Data.Set("version", 0)
dash.Created = time.Now()
dash.Updated = time.Now()
}
if gnetId, err := dash.Data.Get("gnetId").Float64(); err == nil {
dash.GnetId = int64(gnetId)
}
return dash
}
type saveFolderCommand struct {
Dashboard *simplejson.Json `json:"dashboard" binding:"Required"`
UserId int64 `json:"userId"`
Message string `json:"message"`
OrgId int64 `json:"-"`
RestoredFrom int `json:"-"`
PluginId string `json:"-"`
FolderId int64 `json:"folderId"`
IsFolder bool `json:"isFolder"`
Result *dashboard
}
// GetDashboardModel turns the command into the saveable model
func (cmd *saveFolderCommand) getDashboardModel() *dashboard {
dash := newDashboardFromJson(cmd.Dashboard)
userId := cmd.UserId
if userId == 0 {
userId = -1
}
dash.UpdatedBy = userId
dash.OrgId = cmd.OrgId
dash.PluginId = cmd.PluginId
dash.IsFolder = cmd.IsFolder
dash.FolderId = cmd.FolderId
dash.updateSlug()
return dash
}

View File

@ -0,0 +1,25 @@
package ualert
// slurpDSIDs returns a map of [orgID, dataSourceId] -> [UID, Name].
func (m *migration) slurpDSIDs() (map[[2]int64][2]string, error) {
dsIDs := []struct {
OrgID int64 `xorm:"org_id"`
ID int64 `xorm:"id"`
UID string `xorm:"uid"`
Name string
}{}
err := m.sess.SQL(`SELECT org_id, id, uid, name FROM data_source`).Find(&dsIDs)
if err != nil {
return nil, err
}
idToUID := make(map[[2]int64][2]string, len(dsIDs))
for _, ds := range dsIDs {
idToUID[[2]int64{ds.OrgID, ds.ID}] = [2]string{ds.UID, ds.Name}
}
return idToUID, nil
}

View File

@ -0,0 +1,161 @@
package ualert
import (
"fmt"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/infra/metrics"
"github.com/grafana/grafana/pkg/models"
)
// getOrCreateGeneralFolder returns the general folder under the specific organisation
// If the general folder does not exist it creates it.
func (m *migration) getOrCreateGeneralFolder(orgID int64) (*dashboard, error) {
// there is a unique constraint on org_id, folder_id, title
// there are no nested folders so the parent folder id is always 0
dashboard := dashboard{OrgId: orgID, FolderId: 0, Title: GENERAL_FOLDER}
has, err := m.sess.Get(&dashboard)
if err != nil {
return nil, err
} else if !has {
// create folder
result, err := m.createFolder(orgID, GENERAL_FOLDER)
if err != nil {
return nil, err
}
return result, nil
}
return &dashboard, nil
}
// based on sqlstore.saveDashboard()
// it should be called from inside a transaction
func (m *migration) createFolder(orgID int64, title string) (*dashboard, error) {
cmd := saveFolderCommand{
OrgId: orgID,
FolderId: 0,
IsFolder: true,
Dashboard: simplejson.NewFromAny(map[string]interface{}{
"title": title,
}),
}
dash := cmd.getDashboardModel()
var userId int64 = -1
uid, err := m.generateNewDashboardUid(dash.OrgId)
if err != nil {
return nil, err
}
dash.setUid(uid)
parentVersion := dash.Version
dash.setVersion(1)
dash.Created = time.Now()
dash.CreatedBy = userId
dash.Updated = time.Now()
dash.UpdatedBy = userId
metrics.MApiDashboardInsert.Inc()
if _, err = m.sess.Insert(dash); err != nil {
return nil, err
}
dashVersion := &models.DashboardVersion{
DashboardId: dash.Id,
ParentVersion: parentVersion,
RestoredFrom: cmd.RestoredFrom,
Version: dash.Version,
Created: time.Now(),
CreatedBy: dash.UpdatedBy,
Message: cmd.Message,
Data: dash.Data,
}
// insert version entry
if _, err := m.sess.Insert(dashVersion); err != nil {
return nil, err
}
return dash, nil
}
func (m *migration) generateNewDashboardUid(orgId int64) (string, error) {
for i := 0; i < 3; i++ {
uid := util.GenerateShortUID()
exists, err := m.sess.Where("org_id=? AND uid=?", orgId, uid).Get(&models.Dashboard{})
if err != nil {
return "", err
}
if !exists {
return uid, nil
}
}
return "", models.ErrDashboardFailedGenerateUniqueUid
}
// based on SQLStore.UpdateDashboardACL()
// it should be called from inside a transaction
func (m *migration) setACL(orgID int64, dashboardID int64, items []*models.DashboardAcl) error {
if dashboardID <= 0 {
return fmt.Errorf("folder id must be greater than zero for a folder permission")
}
for _, item := range items {
if item.UserID == 0 && item.TeamID == 0 && (item.Role == nil || !item.Role.IsValid()) {
return models.ErrDashboardAclInfoMissing
}
item.OrgID = orgID
item.DashboardID = dashboardID
item.Created = time.Now()
item.Updated = time.Now()
m.sess.Nullable("user_id", "team_id")
if _, err := m.sess.Insert(item); err != nil {
return err
}
}
// Update dashboard HasAcl flag
dashboard := models.Dashboard{HasAcl: true}
_, err := m.sess.Cols("has_acl").Where("id=?", dashboardID).Update(&dashboard)
return err
}
// based on SQLStore.GetDashboardAclInfoList()
func (m *migration) getACL(orgID, dashboardID int64) ([]*models.DashboardAcl, error) {
var err error
falseStr := m.mg.Dialect.BooleanStr(false)
result := make([]*models.DashboardAcl, 0)
rawSQL := `
-- get distinct permissions for the dashboard and its parent folder
SELECT DISTINCT
da.user_id,
da.team_id,
da.permission,
da.role
FROM dashboard as d
LEFT JOIN dashboard folder on folder.id = d.folder_id
LEFT JOIN dashboard_acl AS da ON
da.dashboard_id = d.id OR
da.dashboard_id = d.folder_id OR
(
-- include default permissions --
da.org_id = -1 AND (
(folder.id IS NOT NULL AND folder.has_acl = ` + falseStr + `) OR
(folder.id IS NULL AND d.has_acl = ` + falseStr + `)
)
)
WHERE d.org_id = ? AND d.id = ? AND da.id IS NOT NULL
ORDER BY da.id ASC
`
err = m.sess.SQL(rawSQL, orgID, dashboardID).Find(&result)
return result, err
}

View File

@ -0,0 +1,180 @@
package ualert
import (
"fmt"
"os"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"xorm.io/xorm"
)
const GENERAL_FOLDER = "General Alerting"
const DASHBOARD_FOLDER = "Migrated %s"
type MigrationError struct {
AlertId int64
Err error
}
func (e MigrationError) Error() string {
return fmt.Sprintf("failed to migrate alert %d: %s", e.AlertId, e.Err.Error())
}
func (e *MigrationError) Unwrap() error { return e.Err }
func AddMigration(mg *migrator.Migrator) {
if os.Getenv("UALERT_MIG") == "iDidBackup" {
// TODO: unified alerting DB needs to be extacted into ../migrations.go
// so it runs and creates the tables before this migration runs.
mg.AddMigration("move dashboard alerts to unified alerting", &migration{})
}
}
type migration struct {
migrator.MigrationBase
// session and mg are attached for convenience.
sess *xorm.Session
mg *migrator.Migrator
}
func (m *migration) SQL(dialect migrator.Dialect) string {
return "code migration"
}
func (m *migration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
m.sess = sess
m.mg = mg
dashAlerts, err := m.slurpDashAlerts()
if err != nil {
return err
}
// [orgID, dataSourceId] -> [UID, Name]
dsIDMap, err := m.slurpDSIDs()
if err != nil {
return err
}
// [orgID, dashboardId] -> dashUID
dashIDMap, err := m.slurpDashUIDs()
if err != nil {
return err
}
for _, da := range dashAlerts {
newCond, err := transConditions(*da.ParsedSettings, da.OrgId, dsIDMap)
if err != nil {
return err
}
da.DashboardUID = dashIDMap[[2]int64{da.OrgId, da.DashboardId}]
// get dashboard
dash := dashboard{}
exists, err := m.sess.Where("org_id=? AND uid=?", da.OrgId, da.DashboardUID).Get(&dash)
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to get dashboard %s under organisation %d: %w", da.DashboardUID, da.OrgId, err),
AlertId: da.Id,
}
}
if !exists {
return MigrationError{
Err: fmt.Errorf("dashboard with UID %v under organisation %d not found: %w", da.DashboardUID, da.OrgId, err),
AlertId: da.Id,
}
}
// get folder if exists
folder := dashboard{}
if dash.FolderId > 0 {
exists, err := m.sess.Where("id=?", dash.FolderId).Get(&folder)
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to get folder %d: %w", dash.FolderId, err),
AlertId: da.Id,
}
}
if !exists {
return MigrationError{
Err: fmt.Errorf("folder with id %v not found", dash.FolderId),
AlertId: da.Id,
}
}
if !folder.IsFolder {
return MigrationError{
Err: fmt.Errorf("id %v is a dashboard not a folder", dash.FolderId),
AlertId: da.Id,
}
}
}
switch {
case dash.HasAcl:
// create folder and assign the permissions of the dashboard (included default and inherited)
ptr, err := m.createFolder(dash.OrgId, fmt.Sprintf(DASHBOARD_FOLDER, getMigrationInfo(da)))
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to create folder: %w", err),
AlertId: da.Id,
}
}
folder = *ptr
permissions, err := m.getACL(dash.OrgId, dash.Id)
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to get dashboard %d under organisation %d permissions: %w", dash.Id, dash.OrgId, err),
AlertId: da.Id,
}
}
err = m.setACL(folder.OrgId, folder.Id, permissions)
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to set folder %d under organisation %d permissions: %w", folder.Id, folder.OrgId, err),
AlertId: da.Id,
}
}
case dash.FolderId > 0:
// link the new rule to the existing folder
default:
// get or create general folder
ptr, err := m.getOrCreateGeneralFolder(dash.OrgId)
if err != nil {
return MigrationError{
Err: fmt.Errorf("failed to get or create general folder under organisation %d: %w", dash.OrgId, err),
AlertId: da.Id,
}
}
// No need to assign default permissions to general folder
// because they are included to the query result if it's a folder with no permissions
// https://github.com/grafana/grafana/blob/076e2ce06a6ecf15804423fcc8dca1b620a321e5/pkg/services/sqlstore/dashboard_acl.go#L109
folder = *ptr
}
if folder.Uid == "" {
return MigrationError{
Err: fmt.Errorf("empty folder identifier"),
AlertId: da.Id,
}
}
rule, err := m.makeAlertRule(*newCond, da, folder.Uid)
if err != nil {
return err
}
_, err = m.sess.Insert(rule)
if err != nil {
// TODO better error handling, if constraint
rule.Title += fmt.Sprintf(" %v", rule.Uid)
rule.RuleGroup += fmt.Sprintf(" %v", rule.Uid)
_, err = m.sess.Insert(rule)
if err != nil {
return err
}
}
}
return nil
}