Alerting: Remove ngalert feature toggle and introduce two new settings for enabling Grafana 8 alerts and disabling them for specific organisations (#38746)

* Remove `ngalert` feature toggle

* Update frontend

Remove all references of ngalert feature toggle

* Update docs

* Disable unified alerting for specific orgs

* Add backend tests

* Apply suggestions from code review

Co-authored-by: achatterjee-grafana <70489351+achatterjee-grafana@users.noreply.github.com>

* Disabled unified alerting by default

* Ensure backward compatibility with old ngalert feature toggle

* Apply suggestions from code review

Co-authored-by: gotjosh <josue@grafana.com>
This commit is contained in:
Sofia Papagiannaki 2021-09-29 17:16:40 +03:00 committed by GitHub
parent 2dedbcd3c3
commit 012d4f0905
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 705 additions and 183 deletions

View File

@ -731,6 +731,12 @@ global_alert_rule = -1
#################################### Unified Alerting ####################
[unified_alerting]
# Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed.
enabled = false
# Comma-separated list of organization IDs for which to disable unified alerting. Only supported if unified alerting is enabled.
disabled_orgs =
# Specify the frequency of polling for admin config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
admin_config_poll_interval = 60s
@ -780,7 +786,7 @@ min_interval = 10s
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
# Disable legacy alerting engine & UI features
enabled = true
# Makes it possible to turn off alert execution but alerting UI is visible

View File

@ -708,6 +708,12 @@
#################################### Unified Alerting ####################
[unified_alerting]
#Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed.```
;enabled = false
# Comma-separated list of organization IDs for which to disable unified alerting. Only supported if unified alerting is enabled.
;disabled_orgs =
# Specify the frequency of polling for admin config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;admin_config_poll_interval = 60s
@ -757,7 +763,7 @@
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
# Disable legacy alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert execution but alerting UI is visible

View File

@ -1113,6 +1113,16 @@ Sets a global limit on number of alert rules that can be created. Default is -1
For more information about the Grafana 8 alerts, refer to [Unified Alerting]({{< relref "../alerting/unified-alerting/_index.md" >}}).
### enabled
Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed. The default value is `false`.
Alerting Rules migrated from dashboards and panels will include a link back via the `annotations`.
### disabled_orgs
Comma-separated list of organization IDs for which to disable Grafana 8 Unified Alerting.
### admin_config_poll_interval
Specify the frequency of polling for admin config changes. The default value is `60s`.
@ -1189,7 +1199,7 @@ For more information about the Alerting feature in Grafana, refer to [Alerts ove
### enabled
Set to `false` to disable alerting engine and hide Alerting in the Grafana UI. Default is `true`.
Set to `false` to [enable Grafana 8 alerting]({{<relref "#unified_alerting">}}) and to disable legacy alerting engine. Default is `true`.
### execute_alerts
@ -1735,7 +1745,7 @@ For more information about Grafana Enterprise, refer to [Grafana Enterprise]({{<
### enable
Keys of alpha features to enable, separated by space. Available alpha features are: `ngalert`
Keys of alpha features to enable, separated by space.
## [date_formats]

View File

@ -7,9 +7,9 @@ weight = 110
Alerts allow you to know about problems in your systems moments after they occur. Robust and actionable alerts help you identify and resolve issues quickly, minimizing disruption to your services.
Grafana 8.0 has new and improved alerts. The new alerting system is an [opt-in]({{< relref "./unified-alerting/opt-in.md" >}}) feature that centralizes alerting information for Grafana managed alerts and alerts from Prometheus-compatible data sources in one UI and API.
Grafana 8.0 has new and improved alerts that centralizes alerting information for Grafana managed alerts as well as alerts from Prometheus-compatible data sources into one user interface and API.
> **Note:** Out of the box, Grafana still supports old [legacy dashboard alerts]({{< relref "./old-alerting/_index.md" >}}). We encourage you to create issues in the Grafana GitHub repository for bugs found while testing Grafana 8 alerts.
> **Note:** Grafana 8 alerts is an [opt-in]({{< relref "./unified-alerting/opt-in.md" >}}) feature. Out of the box, Grafana still supports old [legacy dashboard alerts]({{< relref "./old-alerting/_index.md" >}}). We encourage you to create issues in the Grafana GitHub repository for bugs found while testing Grafana 8 alerts.
Alerts have four main components:

View File

@ -1,13 +1,13 @@
+++
title = "What's New with Grafana 8 Alerts"
title = "What's New with Grafana 8 alerts"
description = "What's New with Grafana 8 Alerts"
keywords = ["grafana", "alerting", "guide"]
weight = 112
+++
# What's New with Grafana 8 Alerts
# What's New with Grafana 8 alerts
The Alerts released with Grafana 8.0 are an opt-in feature that centralizes alerting information for Grafana managed alerts and alerts from Prometheus-compatible datasources in one UI and API. You are able to create and edit alerting rules for Grafana managed alerts, Cortex alerts, and Loki alerts as well as see alerting information from prometheus-compatible datasources in a single, searchable view.
The alerts released with Grafana 8.0 centralizes alerting information for Grafana managed alerts and alerts from Prometheus-compatible datasources in one UI and API. You can create and edit alerting rules for Grafana managed alerts, Cortex alerts, and Loki alerts as well as see alerting information from prometheus-compatible datasources in a single, searchable view.
## Multi-dimensional alerting

View File

@ -6,9 +6,9 @@ weight = 113
# Overview of Grafana 8 alerts
Alerts allow you to know about problems in your systems moments after they occur. Robust and actionable alerts help you identify and resolve issues quickly, minimizing disruption to your services.
Grafana 8.0 has a new and improved alerting sub-system that centralizes alerting information for Grafana managed alerts and alerts from Prometheus-compatible data sources into one user interface and API.
> **Note:** Grafana 8 alerts (beta) is an [opt-in]({{< relref"./opt-in.md" >}}) feature. Out of the box, Grafana still supports old [legacy dashboard alerts]({{< relref "../old-alerting/_index.md" >}}). We encourage you to create issues in the Grafana GitHub repository for bugs found while testing this new feature.
> **Note:** Grafana 8 alerts is an [opt-in]({{< relref "../unified-alerting/opt-in.md" >}}) feature. Out of the box, Grafana still supports old [legacy dashboard alerts]({{< relref "./old-alerting/_index.md" >}}). We encourage you to create issues in the Grafana GitHub repository for bugs found while testing Grafana 8 alerts.
Grafana 8 alerts have four main components:
@ -45,7 +45,7 @@ Alerting rules can only query backend data sources with alerting enabled:
## Metrics from the alerting engine
The alerting engine publishes some internal metrics about itself. You can read more about how Grafana publishes [internal metrics]({{< relref "../../administration/view-server/internal-metrics.md" >}}).
The alerting engine publishes some internal metrics about itself. You can read more about how Grafana publishes [internal metrics]({{< relref "../../administration/view-server/internal-metrics.md" >}}). See also, [View alert rules and their current state]({{< relref "alerting-rules/rule-list.md" >}}).
| Metric Name | Type | Description |
| ------------------------------------------------- | --------- | ---------------------------------------------------------------------------------------- |
@ -57,4 +57,6 @@ The alerting engine publishes some internal metrics about itself. You can read m
| `grafana_alerting_rule_evaluation_duration` | summary | The duration for a rule to execute |
| `grafana_alerting_rule_group_rules` | gauge | The number of rules |
- [View alert rules and their current state]({{< relref "alerting-rules/rule-list.md" >}})
## Limitation
Grafana 8 alerting system can retrieve rules from all available Prometheus, Loki, and Alertmanager data sources. It might not be able to fetch rules from all other supported data sources at this time.

View File

@ -4,7 +4,7 @@ aliases = ["/docs/grafana/latest/alerting/rules/"]
weight = 130
+++
# Create and manage alerting Rules
# Create and manage alerting rules
One or more queries and/or expressions, a condition, the frequency of evaluation, and the (optional) duration that a condition must be met before creating an alert. Alerting rules are how you express the criteria for creating an alert. Queries and expressions select and can operate on the data you wish to alert on. A condition sets the threshold that an alert must meet or exceed to create an alert. The interval specifies how frequently the rule should be evaluated. The duration, when configured, sets a period that a condition must be met or exceeded before an alert is created. Alerting rules also can contain settings for what to do when your query does not return any data, or there is an error attempting to execute the query.

View File

@ -4,33 +4,53 @@ description = "Enable Grafana 8 Alerts"
weight = 128
+++
# Enable Grafana 8 Alerts
# Opt-in to Grafana 8 alerts
Setting the `ngalert` feature toggle enables the new Grafana 8 alerting system.
This topic describes how to enable Grafana 8 alerts as well as the rules and restrictions that govern the migration of existing dashboard alerts to this new alerting system. You can also [disable Grafana 8 alerts]({{< relref "./opt-in.md#disable-grafana-8-alerts" >}}) if needed.
> **Note:** We recommend that you backup Grafana's database before enabling this feature. If you are using PostgreSQL as the backend data source, then the minimum required version is 9.5.
Before you begin, we recommend that you backup Grafana's database. If you are using PostgreSQL as the backend data source, then the minimum required version is 9.5.
At startup, when [the feature toggle is enabled]({{< relref "../../administration/configuration.md">}}#feature_toggles), the legacy Grafana dashboard alerting is disabled and existing dashboard alerts are migrated into a format that is compatible with the Grafana 8 alerting system. You can view these migrated rules, alongside any new alerts you create after the migration, from the Alerting page of your Grafana instance.
## Enable Grafana 8 alerts
> **Note - v8.2 or earlier:** Since the new system stores the notification log and silences on disk, we require the use of persistent disks for using Grafana 8 alerts. Otherwise, the silences and notification log will get lost on a restart, and you might get unwanted or duplicate notifications.
To enable Grafana 8 alerts:
> **Note - v8.3+**: We have removed the need of persistent disk. The notification log and silences are now stored in the database. If you used the file-based approach, we'll read those files and eventually (every 15 minutes) persist them to the database.
1. Go to your custom configuration file located in $WORKING_DIR/conf/custom.ini.
1. In the [unified alerts]({{< relref "../../administration/configuration.md#unified_alerting" >}}) section, set the `enabled` property to `true`.
1. Next, in the [alerting]({{< relref "../../administration/configuration.md#alerting" >}}) section of the configuration file, update the configuration for the legacy dashboard alerts by setting the `enabled` property to `false`.
1. Restart Grafana for the configuration changes to take effect.
Read and write access to dashboard alerts in Grafana versions 7 and earlier were governed by the dashboard and folder permissions under which the alerts were stored. In Grafana 8, alerts are stored in folders and inherit the permissions of those folders. During the migration, dashboard alert permissions are matched to the new rules permissions as follows:
> **Note:** Before Grafana v8.2, to enable or disable Grafana 8 alerts, users configured the `ngalert` feature toggle. This toggle option is no longer available.
Moreover, before v8.2, notification logs and silences were stored on a disk. If you did not use persistent disks, any configured silences and logs would get lost on a restart, resulting in unwanted or duplicate notifications.
As of Grafana 8.2, we no longer require the use of a persistent disk. Instead, the notification logs and silences are stored regularly (every 15 minutes), and a clean shutdown to the database. If you used the file-based approach, Grafana will read the existing file and persisting it eventually.
## Migrating legacy alerts to Grafana 8 alerting system
When Grafana 8 alerting is enabled, existing legacy dashboard alerts migrate in a format compatible with the Grafana 8 alerting system. In the Alerting page of your Grafana instance, you can view the migrated alerts alongside new alerts.
Read and write access to legacy dashboard alerts was governed by the dashboard and folder permissions storing them. In Grafana 8, alerts inherit the permissions of the folders they are stored in. During migration, legacy dashboard alert permissions are matched to the new rules permissions as follows:
- If alert's dashboard has permissions, it will create a folder named like `Migrated {"dashboardUid": "UID", "panelId": 1, "alertId": 1}` to match permissions of the dashboard (including the inherited permissions from the folder).
- If there are no dashboard permissions and the dashboard is under a folder, then the rule is linked to this folder and inherits its permissions.
- If there are no dashboard permissions and the dashboard is under the General folder, then the rule is linked to the `General Alerting` folder and the rule inherits the default permissions.
- If there are no dashboard permissions and the dashboard is under the General folder, then the rule is linked to the `General Alerting` folder, and the rule inherits the default permissions.
During beta, Grafana 8 alerting system can retrieve rules from all available Prometheus, Loki, and Alertmanager data sources. It might not be able to fetch rules from all other supported data sources at this time.
Notification channels are migrated to an Alertmanager configuration with the appropriate routes and receivers. Default notification channels are added as contact points to the default route. Notification channels not associated with any Dashboard alert go to the `autogen-unlinked-channel-recv` route.
Also notification channels are migrated to an Alertmanager configuration with the appropriate routes and receivers. Default notification channels are added as contact points to the default route. Notification channels not associated with any Dashboard alert go to the `autogen-unlinked-channel-recv` route.
Since `Hipchat` and `Sensu` notification channels are no longer supported, legacy alerts associated with these channels are not automatically migrated to Grafana 8 alerting. Assign the legacy alerts to a supported notification channel so that you continue to receive notifications for those alerts.
Silences (expiring after one year) are created for all paused dashboard alerts.
Since `Hipchat` and `Sensu` are discontinued, they are not migrated to the new alerting. If you have dashboard alerts associated with those types of channels and you want to migrate to the new alerting, make sure you assign another supported notification channel, so that you continue to receive notifications for those alerts.
Finally, silences (expiring after one year) are created for all paused dashboard alerts.
### Limitation
## Disabling Grafana 8 Alerting after migration
Grafana 8 alerting system can retrieve rules from all available Prometheus, Loki, and Alertmanager data sources. It might not be able to fetch rules from all other supported data sources at this time.
To disable Grafana 8 Alerting, remove or disable the `ngalert` feature toggle. Dashboard alerts will be re-enabled and any alerts created during or after the migration are deleted.
## Disable Grafana 8 alerts
> **Note:** Any alerting rules created in the Grafana 8 Alerting system will be lost when migrating back to dashboard alerts
To disable Grafana 8 alerts and enable legacy dashboard alerts:
1. Go to your custom configuration file located in $WORKING_DIR/conf/custom.ini.
1. In the [unified alerts]({{< relref "../../administration/configuration.md#unified_alerting" >}}) section, set the `enabled` property to `false`.
1. Next, in the [alerting]({{< relref "../../administration/configuration.md#alerting" >}}) section of the configuration file, update the configuration for the legacy dashboard alerts by setting the `enabled` property to `true`.
1. Restart Grafana for the configuration changes to take effect.
> **Note:** If you choose to migrate from Grafana 8 alerts to legacy dashboard alerts, you will lose any new alerts that you created in the Grafana 8 alerting system.

View File

@ -28,6 +28,7 @@ e2e.scenario({
e2e.components.QueryTab.content().should('be.visible');
e2e.components.TransformTab.content().should('not.exist');
e2e.components.AlertTab.content().should('not.exist');
e2e.components.PanelAlertTabContent.content().should('not.exist');
// Bottom pane tabs
// Can change to Transform tab
@ -38,6 +39,7 @@ e2e.scenario({
e2e.components.Transforms.card('Merge').scrollIntoView().should('be.visible');
e2e.components.QueryTab.content().should('not.exist');
e2e.components.AlertTab.content().should('not.exist');
e2e.components.PanelAlertTabContent.content().should('not.exist');
// Can change to Alerts tab (graph panel is the default vis so the alerts tab should be rendered)
e2e.components.Tab.title('Alert').should('be.visible').click();
@ -47,6 +49,7 @@ e2e.scenario({
e2e.components.AlertTab.content().should('be.visible');
e2e.components.QueryTab.content().should('not.exist');
e2e.components.TransformTab.content().should('not.exist');
e2e.components.PanelAlertTabContent.content().should('not.exist');
e2e.components.Tab.title('Query').should('be.visible').click();
});

View File

@ -45,7 +45,6 @@ export enum GrafanaEdition {
export interface FeatureToggles {
[name: string]: boolean;
ngalert: boolean;
trimDefaults: boolean;
accesscontrol: boolean;
tempoServiceGraph: boolean;
@ -133,4 +132,5 @@ export interface GrafanaConfig {
customTheme?: any;
geomapDefaultBaseLayer?: MapLayerOptions;
geomapDisableCustomBaseLayer?: boolean;
unifiedAlertingEnabled: boolean;
}

View File

@ -243,4 +243,7 @@ export const Components = {
name: 'data-testid-import-dashboard-title',
submit: 'data-testid-import-dashboard-submit',
},
PanelAlertTabContent: {
content: 'Unified alert editor tab content',
},
};

View File

@ -60,7 +60,6 @@ export class GrafanaBootConfig implements GrafanaConfig {
theme2: GrafanaTheme2;
pluginsToPreload: string[] = [];
featureToggles: FeatureToggles = {
ngalert: false,
accesscontrol: false,
trimDefaults: false,
tempoServiceGraph: false,
@ -95,6 +94,7 @@ export class GrafanaBootConfig implements GrafanaConfig {
};
geomapDefaultBaseLayerConfig?: MapLayerOptions;
geomapDisableCustomBaseLayer?: boolean;
unifiedAlertingEnabled = false;
applicationInsightsConnectionString?: string;
applicationInsightsEndpointUrl?: string;

View File

@ -387,7 +387,7 @@ func (hs *HTTPServer) registerRoutes() {
})
apiRoute.Get("/alert-notifiers", reqEditorRole, routing.Wrap(
GetAlertNotifiers(hs.Cfg.IsNgAlertEnabled())),
GetAlertNotifiers(hs.Cfg.UnifiedAlerting.Enabled)),
)
apiRoute.Group("/alert-notifications", func(alertNotifications routing.RouteRegister) {

View File

@ -268,6 +268,7 @@ func (hs *HTTPServer) getFrontendSettingsMap(c *models.ReqContext) (map[string]i
"caching": map[string]bool{
"enabled": hs.Cfg.SectionWithEnvOverrides("caching").Key("enabled").MustBool(true),
},
"unifiedAlertingEnabled": hs.Cfg.UnifiedAlerting.Enabled,
}
if hs.Cfg.GeomapDefaultBaseLayerConfig != nil {

View File

@ -205,16 +205,16 @@ func (hs *HTTPServer) getNavTree(c *models.ReqContext, hasEditPerm bool) ([]*dto
navTree = append(navTree, hs.getProfileNode(c))
}
if setting.AlertingEnabled {
if setting.AlertingEnabled || hs.Cfg.UnifiedAlerting.Enabled {
alertChildNavs := []*dtos.NavLink{
{Text: "Alert rules", Id: "alert-list", Url: hs.Cfg.AppSubURL + "/alerting/list", Icon: "list-ul"},
}
if hs.Cfg.IsNgAlertEnabled() {
if hs.Cfg.UnifiedAlerting.Enabled {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Alert groups", Id: "groups", Url: hs.Cfg.AppSubURL + "/alerting/groups", Icon: "layer-group"})
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Silences", Id: "silences", Url: hs.Cfg.AppSubURL + "/alerting/silences", Icon: "bell-slash"})
}
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
if hs.Cfg.IsNgAlertEnabled() {
if hs.Cfg.UnifiedAlerting.Enabled {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Contact points", Id: "receivers", Url: hs.Cfg.AppSubURL + "/alerting/notifications",
Icon: "comment-alt-share",
@ -227,7 +227,7 @@ func (hs *HTTPServer) getNavTree(c *models.ReqContext, hasEditPerm bool) ([]*dto
})
}
}
if c.OrgRole == models.ROLE_ADMIN && hs.Cfg.IsNgAlertEnabled() {
if c.OrgRole == models.ROLE_ADMIN && hs.Cfg.UnifiedAlerting.Enabled {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Admin", Id: "alerting-admin", Url: hs.Cfg.AppSubURL + "/alerting/admin",
Icon: "cog",

View File

@ -209,7 +209,7 @@ func TestMiddlewareQuota(t *testing.T) {
cfg.Quota.Enabled = false
})
middlewareScenario(t, "org alert quota reached and ngalert enabled", func(t *testing.T, sc *scenarioContext) {
middlewareScenario(t, "org alert quota reached and unified alerting is enabled", func(t *testing.T, sc *scenarioContext) {
setUp(sc)
quotaHandler := getQuotaHandler(sc, "alert_rule")
@ -219,11 +219,11 @@ func TestMiddlewareQuota(t *testing.T) {
}, func(cfg *setting.Cfg) {
configure(cfg)
cfg.FeatureToggles = map[string]bool{"ngalert": true}
cfg.UnifiedAlerting.Enabled = true
cfg.Quota.Org.AlertRule = quotaUsed
})
middlewareScenario(t, "org alert quota not reached and ngalert enabled", func(t *testing.T, sc *scenarioContext) {
middlewareScenario(t, "org alert quota not reached and unified alerting is enabled", func(t *testing.T, sc *scenarioContext) {
setUp(sc)
quotaHandler := getQuotaHandler(sc, "alert_rule")
@ -233,7 +233,7 @@ func TestMiddlewareQuota(t *testing.T) {
}, func(cfg *setting.Cfg) {
configure(cfg)
cfg.FeatureToggles = map[string]bool{"ngalert": true}
cfg.UnifiedAlerting.Enabled = true
cfg.Quota.Org.AlertRule = quotaUsed + 1
})

View File

@ -44,38 +44,38 @@ type GlobalQuotaDTO struct {
}
type GetOrgQuotaByTargetQuery struct {
Target string
OrgId int64
Default int64
IsNgAlertEnabled bool
Result *OrgQuotaDTO
Target string
OrgId int64
Default int64
UnifiedAlertingEnabled bool
Result *OrgQuotaDTO
}
type GetOrgQuotasQuery struct {
OrgId int64
IsNgAlertEnabled bool
Result []*OrgQuotaDTO
OrgId int64
UnifiedAlertingEnabled bool
Result []*OrgQuotaDTO
}
type GetUserQuotaByTargetQuery struct {
Target string
UserId int64
Default int64
IsNgAlertEnabled bool
Result *UserQuotaDTO
Target string
UserId int64
Default int64
UnifiedAlertingEnabled bool
Result *UserQuotaDTO
}
type GetUserQuotasQuery struct {
UserId int64
IsNgAlertEnabled bool
Result []*UserQuotaDTO
UserId int64
UnifiedAlertingEnabled bool
Result []*UserQuotaDTO
}
type GetGlobalQuotaByTargetQuery struct {
Target string
Default int64
IsNgAlertEnabled bool
Result *GlobalQuotaDTO
Target string
Default int64
UnifiedAlertingEnabled bool
Result *GlobalQuotaDTO
}
type UpdateOrgQuotaCmd struct {

View File

@ -42,7 +42,7 @@ type AlertEngine struct {
// IsDisabled returns true if the alerting service is disable for this instance.
func (e *AlertEngine) IsDisabled() bool {
return !setting.AlertingEnabled || !setting.ExecuteAlerts || e.Cfg.IsNgAlertEnabled()
return !setting.AlertingEnabled || !setting.ExecuteAlerts || e.Cfg.UnifiedAlerting.Enabled
}
// ProvideAlertEngine returns a new AlertEngine.

View File

@ -0,0 +1,28 @@
###
# set external Alertmanager
POST http://admin:admin@localhost:3000/api/v1/ngalert/admin_config
content-type: application/json
{
"alertmanagers": ["http://localhost:9093"]
}
###
GET http://admin:admin@localhost:3000/api/v1/ngalert/admin_config
###
# after a few minutes it should be discovered
GET http://admin:admin@localhost:3000/api/v1/ngalert/alertmanagers
###
# remove it
POST http://admin:admin@localhost:3000/api/v1/ngalert/admin_config
content-type: application/json
{
"alertmanagers": []
}
###
# check again
GET http://admin:admin@localhost:3000/api/v1/ngalert/alertmanagers

View File

@ -135,6 +135,7 @@ type GetAlertRuleByUIDQuery struct {
type ListAlertRulesQuery struct {
OrgID int64
NamespaceUIDs []string
ExcludeOrgs []int64
Result []*AlertRule
}

View File

@ -122,6 +122,7 @@ func (ng *AlertNG) init() error {
MultiOrgNotifier: ng.MultiOrgAlertmanager,
Metrics: ng.Metrics.GetSchedulerMetrics(),
AdminConfigPollInterval: ng.Cfg.UnifiedAlerting.AdminConfigPollInterval,
DisabledOrgs: ng.Cfg.UnifiedAlerting.DisabledOrgs,
MinRuleInterval: ng.getRuleMinInterval(),
}
stateManager := state.NewManager(ng.Log, ng.Metrics.GetStateMetrics(), store, store)
@ -173,7 +174,7 @@ func (ng *AlertNG) IsDisabled() bool {
if ng.Cfg == nil {
return true
}
return !ng.Cfg.IsNgAlertEnabled()
return !ng.Cfg.UnifiedAlerting.Enabled
}
// getRuleDefaultIntervalSeconds returns the default rule interval if the interval is not set.

View File

@ -149,9 +149,14 @@ func (moa *MultiOrgAlertmanager) SyncAlertmanagersForOrgs(ctx context.Context, o
}
moa.alertmanagersMtx.Lock()
for _, orgID := range orgIDs {
if _, isDisabledOrg := moa.settings.UnifiedAlerting.DisabledOrgs[orgID]; isDisabledOrg {
moa.logger.Debug("skipping syncing Alertmanger for disabled org", "org", orgID)
continue
}
orgsFound[orgID] = struct{}{}
alertmanager, found := moa.alertmanagers[orgID]
if !found {
// These metrics are not exported by Grafana and are mostly a placeholder.
// To export them, we need to translate the metrics from each individual registry and,

View File

@ -32,8 +32,12 @@ func TestMultiOrgAlertmanager_SyncAlertmanagersForOrgs(t *testing.T) {
reg := prometheus.NewPedanticRegistry()
m := metrics.NewNGAlert(reg)
cfg := &setting.Cfg{
DataPath: tmpDir,
UnifiedAlerting: setting.UnifiedAlertingSettings{AlertmanagerConfigPollInterval: 3 * time.Minute, DefaultConfiguration: setting.GetAlertmanagerDefaultConfiguration()}, // do not poll in tests.
DataPath: tmpDir,
UnifiedAlerting: setting.UnifiedAlertingSettings{
AlertmanagerConfigPollInterval: 3 * time.Minute,
DefaultConfiguration: setting.GetAlertmanagerDefaultConfiguration(),
DisabledOrgs: map[int64]struct{}{5: {}},
}, // do not poll in tests.
}
mam, err := NewMultiOrgAlertmanager(cfg, configStore, orgStore, kvStore, m.GetMultiOrgAlertmanagerMetrics(), log.New("testlogger"))
require.NoError(t, err)
@ -82,6 +86,12 @@ grafana_alerting_active_configurations 4
grafana_alerting_discovered_configurations 4
`), "grafana_alerting_discovered_configurations", "grafana_alerting_active_configurations"))
}
// if the disabled org comes back, it should not detect it.
{
orgStore.orgs = []int64{1, 2, 3, 4, 5}
require.NoError(t, mam.LoadAndSyncAlertmanagersForOrgs(ctx))
require.Len(t, mam.alertmanagers, 4)
}
}
func TestMultiOrgAlertmanager_AlertmanagerFor(t *testing.T) {

View File

@ -4,8 +4,10 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/models"
)
func (sch *schedule) fetchAllDetails() []*models.AlertRule {
q := models.ListAlertRulesQuery{}
func (sch *schedule) fetchAllDetails(disabledOrgs []int64) []*models.AlertRule {
q := models.ListAlertRulesQuery{
ExcludeOrgs: disabledOrgs,
}
err := sch.ruleStore.GetAlertRulesForScheduling(&q)
if err != nil {
sch.log.Error("failed to fetch alert definitions", "err", err)

View File

@ -84,6 +84,7 @@ type schedule struct {
sendersCfgHash map[int64]string
senders map[int64]*sender.Sender
adminConfigPollInterval time.Duration
disabledOrgs map[int64]struct{}
minRuleInterval time.Duration
}
@ -103,6 +104,7 @@ type SchedulerCfg struct {
MultiOrgNotifier *notifier.MultiOrgAlertmanager
Metrics *metrics.Scheduler
AdminConfigPollInterval time.Duration
DisabledOrgs map[int64]struct{}
MinRuleInterval time.Duration
}
@ -132,6 +134,7 @@ func NewScheduler(cfg SchedulerCfg, dataService *tsdb.Service, appURL string, st
senders: map[int64]*sender.Sender{},
sendersCfgHash: map[int64]string{},
adminConfigPollInterval: cfg.AdminConfigPollInterval,
disabledOrgs: cfg.DisabledOrgs,
minRuleInterval: cfg.MinRuleInterval,
}
return &sch
@ -190,6 +193,12 @@ func (sch *schedule) SyncAndApplyConfigFromDatabase() error {
orgsFound := make(map[int64]struct{}, len(cfgs))
sch.sendersMtx.Lock()
for _, cfg := range cfgs {
_, isDisabledOrg := sch.disabledOrgs[cfg.OrgID]
if isDisabledOrg {
sch.log.Debug("skipping starting sender for disabled org", "org", cfg.OrgID)
continue
}
orgsFound[cfg.OrgID] = struct{}{} // keep track of the which senders we need to keep.
existing, ok := sch.senders[cfg.OrgID]
@ -318,8 +327,12 @@ func (sch *schedule) ruleEvaluationLoop(ctx context.Context) error {
select {
case tick := <-sch.heartbeat.C:
tickNum := tick.Unix() / int64(sch.baseInterval.Seconds())
alertRules := sch.fetchAllDetails()
sch.log.Debug("alert rules fetched", "count", len(alertRules))
disabledOrgs := make([]int64, 0, len(sch.disabledOrgs))
for disabledOrg := range sch.disabledOrgs {
disabledOrgs = append(disabledOrgs, disabledOrg)
}
alertRules := sch.fetchAllDetails(disabledOrgs)
sch.log.Debug("alert rules fetched", "count", len(alertRules), "disabled_orgs", disabledOrgs)
// registeredDefinitions is a map used for finding deleted alert rules
// initially it is assigned to all known alert rules from the previous cycle

View File

@ -37,7 +37,8 @@ func TestWarmStateCache(t *testing.T) {
require.NoError(t, err)
_, dbstore := tests.SetupTestEnv(t, 1)
rule := tests.CreateTestAlertRule(t, dbstore, 600)
const mainOrgID int64 = 1
rule := tests.CreateTestAlertRule(t, dbstore, 600, mainOrgID)
expectedEntries := []*state.State{
{
@ -123,8 +124,11 @@ func TestAlertingTicker(t *testing.T) {
alerts := make([]*models.AlertRule, 0)
// create alert rule with one second interval
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, 1))
const mainOrgID int64 = 1
// create alert rule under main org with one second interval
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, 1, mainOrgID))
const disabledOrgID int64 = 3
evalAppliedCh := make(chan evalAppliedInfo, len(alerts))
stopAppliedCh := make(chan models.AlertRuleKey, len(alerts))
@ -146,6 +150,9 @@ func TestAlertingTicker(t *testing.T) {
Logger: log.New("ngalert schedule test"),
Metrics: testMetrics.GetSchedulerMetrics(),
AdminConfigPollInterval: 10 * time.Minute, // do not poll in unit tests.
DisabledOrgs: map[int64]struct{}{
disabledOrgID: {},
},
}
st := state.NewManager(schedCfg.Logger, testMetrics.GetStateMetrics(), dbstore, dbstore)
sched := schedule.NewScheduler(schedCfg, nil, "http://localhost", st)
@ -164,9 +171,9 @@ func TestAlertingTicker(t *testing.T) {
assertEvalRun(t, evalAppliedCh, tick, expectedAlertRulesEvaluated...)
})
// change alert rule interval to three seconds
// add alert rule under main org with three seconds interval
var threeSecInterval int64 = 3
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, threeSecInterval))
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, threeSecInterval, mainOrgID))
t.Logf("alert rule: %v added with interval: %d", alerts[1].GetKey(), threeSecInterval)
expectedAlertRulesEvaluated = []models.AlertRuleKey{alerts[0].GetKey()}
@ -187,9 +194,10 @@ func TestAlertingTicker(t *testing.T) {
assertEvalRun(t, evalAppliedCh, tick, expectedAlertRulesEvaluated...)
})
key := alerts[0].GetKey()
err := dbstore.DeleteAlertRuleByUID(alerts[0].OrgID, alerts[0].UID)
require.NoError(t, err)
t.Logf("alert rule: %v deleted", alerts[1].GetKey())
t.Logf("alert rule: %v deleted", key)
expectedAlertRulesEvaluated = []models.AlertRuleKey{}
t.Run(fmt.Sprintf("on 5th tick alert rules: %s should be evaluated", concatenate(expectedAlertRulesEvaluated)), func(t *testing.T) {
@ -208,13 +216,22 @@ func TestAlertingTicker(t *testing.T) {
})
// create alert rule with one second interval
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, 1))
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, 1, mainOrgID))
expectedAlertRulesEvaluated = []models.AlertRuleKey{alerts[2].GetKey()}
t.Run(fmt.Sprintf("on 7th tick alert rules: %s should be evaluated", concatenate(expectedAlertRulesEvaluated)), func(t *testing.T) {
tick := advanceClock(t, mockedClock)
assertEvalRun(t, evalAppliedCh, tick, expectedAlertRulesEvaluated...)
})
// create alert rule with one second interval under disabled org
alerts = append(alerts, tests.CreateTestAlertRule(t, dbstore, 1, disabledOrgID))
expectedAlertRulesEvaluated = []models.AlertRuleKey{alerts[2].GetKey()}
t.Run(fmt.Sprintf("on 8th tick alert rules: %s should be evaluated", concatenate(expectedAlertRulesEvaluated)), func(t *testing.T) {
tick := advanceClock(t, mockedClock)
assertEvalRun(t, evalAppliedCh, tick, expectedAlertRulesEvaluated...)
})
}
func assertEvalRun(t *testing.T, ch <-chan evalAppliedInfo, tick time.Time, keys ...models.AlertRuleKey) {
@ -229,13 +246,12 @@ func assertEvalRun(t *testing.T, ch <-chan evalAppliedInfo, tick time.Time, keys
select {
case info := <-ch:
_, ok := expected[info.alertDefKey]
if !ok {
t.Fatal(fmt.Sprintf("alert rule: %v should not have been evaluated at: %v", info.alertDefKey, info.now))
}
t.Logf("alert rule: %v evaluated at: %v", info.alertDefKey, info.now)
assert.True(t, ok)
assert.Equal(t, tick, info.now)
delete(expected, info.alertDefKey)
if len(expected) == 0 {
return
}
case <-timeout:
if len(expected) == 0 {
return

View File

@ -873,7 +873,8 @@ func TestStaleResultsHandler(t *testing.T) {
_, dbstore := tests.SetupTestEnv(t, 1)
rule := tests.CreateTestAlertRule(t, dbstore, 600)
const mainOrgID int64 = 1
rule := tests.CreateTestAlertRule(t, dbstore, 600, mainOrgID)
saveCmd1 := &models.SaveAlertInstanceCommand{
RuleOrgID: rule.OrgID,

View File

@ -422,10 +422,12 @@ func (st DBstore) GetAlertRulesForScheduling(query *ngmodels.ListAlertRulesQuery
return st.SQLStore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
alerts := make([]*ngmodels.AlertRule, 0)
q := "SELECT uid, org_id, interval_seconds, version FROM alert_rule"
if len(query.ExcludeOrgs) > 0 {
q = fmt.Sprintf("%s WHERE org_id NOT IN (%s)", q, strings.Join(strings.Split(strings.Trim(fmt.Sprint(query.ExcludeOrgs), "[]"), " "), ","))
}
if err := sess.SQL(q).Find(&alerts); err != nil {
return err
}
query.Result = alerts
return nil
})

View File

@ -28,16 +28,18 @@ func mockTimeNow() {
func TestAlertInstanceOperations(t *testing.T) {
_, dbstore := tests.SetupTestEnv(t, baseIntervalSeconds)
alertRule1 := tests.CreateTestAlertRule(t, dbstore, 60)
const mainOrgID int64 = 1
alertRule1 := tests.CreateTestAlertRule(t, dbstore, 60, mainOrgID)
orgID := alertRule1.OrgID
alertRule2 := tests.CreateTestAlertRule(t, dbstore, 60)
alertRule2 := tests.CreateTestAlertRule(t, dbstore, 60, mainOrgID)
require.Equal(t, orgID, alertRule2.OrgID)
alertRule3 := tests.CreateTestAlertRule(t, dbstore, 60)
alertRule3 := tests.CreateTestAlertRule(t, dbstore, 60, mainOrgID)
require.Equal(t, orgID, alertRule3.OrgID)
alertRule4 := tests.CreateTestAlertRule(t, dbstore, 60)
alertRule4 := tests.CreateTestAlertRule(t, dbstore, 60, mainOrgID)
require.Equal(t, orgID, alertRule4.OrgID)
t.Run("can save and read new alert instance", func(t *testing.T) {

View File

@ -30,9 +30,8 @@ func SetupTestEnv(t *testing.T, baseInterval time.Duration) (*ngalert.AlertNG, *
cfg := setting.NewCfg()
cfg.AlertingBaseInterval = baseInterval
// AlertNG is disabled by default and only if it's enabled
// its database migrations run and the relative database tables are created
cfg.FeatureToggles = map[string]bool{"ngalert": true}
// AlertNG database migrations run and the relative database tables are created only when it's enabled
cfg.UnifiedAlerting.Enabled = true
m := metrics.NewNGAlert(prometheus.NewRegistry())
ng, err := ngalert.ProvideService(cfg, nil, routing.NewRouteRegister(), sqlstore.InitTestDB(t), nil, nil, nil, nil, m)
@ -45,11 +44,11 @@ func SetupTestEnv(t *testing.T, baseInterval time.Duration) (*ngalert.AlertNG, *
}
// CreateTestAlertRule creates a dummy alert definition to be used by the tests.
func CreateTestAlertRule(t *testing.T, dbstore *store.DBstore, intervalSeconds int64) *models.AlertRule {
func CreateTestAlertRule(t *testing.T, dbstore *store.DBstore, intervalSeconds int64, orgID int64) *models.AlertRule {
d := rand.Intn(1000)
ruleGroup := fmt.Sprintf("ruleGroup-%d", d)
err := dbstore.UpdateRuleGroup(store.UpdateRuleGroupCmd{
OrgID: 1,
OrgID: orgID,
NamespaceUID: "namespace",
RuleGroupConfig: apimodels.PostableRuleGroupConfig{
Name: ruleGroup,
@ -84,7 +83,7 @@ func CreateTestAlertRule(t *testing.T, dbstore *store.DBstore, intervalSeconds i
require.NoError(t, err)
q := models.ListRuleGroupAlertRulesQuery{
OrgID: 1,
OrgID: orgID,
NamespaceUID: "namespace",
RuleGroup: ruleGroup,
}

View File

@ -62,7 +62,7 @@ func (qs *QuotaService) QuotaReached(c *models.ReqContext, target string) (bool,
}
continue
}
query := models.GetGlobalQuotaByTargetQuery{Target: scope.Target, IsNgAlertEnabled: qs.Cfg.IsNgAlertEnabled()}
query := models.GetGlobalQuotaByTargetQuery{Target: scope.Target, UnifiedAlertingEnabled: qs.Cfg.UnifiedAlerting.Enabled}
if err := bus.DispatchCtx(c.Req.Context(), &query); err != nil {
return true, err
}
@ -74,10 +74,10 @@ func (qs *QuotaService) QuotaReached(c *models.ReqContext, target string) (bool,
continue
}
query := models.GetOrgQuotaByTargetQuery{
OrgId: c.OrgId,
Target: scope.Target,
Default: scope.DefaultLimit,
IsNgAlertEnabled: qs.Cfg.IsNgAlertEnabled(),
OrgId: c.OrgId,
Target: scope.Target,
Default: scope.DefaultLimit,
UnifiedAlertingEnabled: qs.Cfg.UnifiedAlerting.Enabled,
}
if err := bus.DispatchCtx(c.Req.Context(), &query); err != nil {
return true, err
@ -96,7 +96,7 @@ func (qs *QuotaService) QuotaReached(c *models.ReqContext, target string) (bool,
if !c.IsSignedIn || c.UserId == 0 {
continue
}
query := models.GetUserQuotaByTargetQuery{UserId: c.UserId, Target: scope.Target, Default: scope.DefaultLimit, IsNgAlertEnabled: qs.Cfg.IsNgAlertEnabled()}
query := models.GetUserQuotaByTargetQuery{UserId: c.UserId, Target: scope.Target, Default: scope.DefaultLimit, UnifiedAlertingEnabled: qs.Cfg.UnifiedAlerting.Enabled}
if err := bus.DispatchCtx(c.Req.Context(), &query); err != nil {
return true, err
}

View File

@ -49,10 +49,8 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
_, migrationRun := logs[migTitle]
ngEnabled := mg.Cfg.IsNgAlertEnabled()
switch {
case ngEnabled && !migrationRun:
case mg.Cfg.UnifiedAlerting.Enabled && !migrationRun:
// Remove the migration entry that removes all unified alerting data. This is so when the feature
// flag is removed in future the "remove unified alerting data" migration will be run again.
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, rmMigTitle), &clearMigrationEntry{
@ -67,7 +65,7 @@ func AddDashAlertMigration(mg *migrator.Migrator) {
portedChannelGroupsPerOrg: make(map[int64]map[string]string),
silences: make(map[int64][]*pb.MeshSilence),
})
case !ngEnabled && migrationRun:
case !mg.Cfg.UnifiedAlerting.Enabled && migrationRun:
// Remove the migration entry that creates unified alerting data. This is so when the feature
// flag is enabled in the future the migration "move dashboard alerts to unified alerting" will be run again.
mg.AddMigration(fmt.Sprintf(clearMigrationEntryTitle, migTitle), &clearMigrationEntry{
@ -92,7 +90,7 @@ func RerunDashAlertMigration(mg *migrator.Migrator) {
cloneMigTitle := fmt.Sprintf("clone %s", migTitle)
_, migrationRun := logs[cloneMigTitle]
ngEnabled := mg.Cfg.IsNgAlertEnabled()
ngEnabled := mg.Cfg.UnifiedAlerting.Enabled
switch {
case ngEnabled && !migrationRun:

View File

@ -43,7 +43,7 @@ func (ss *SQLStore) GetOrgQuotaByTarget(ctx context.Context, query *models.GetOr
}
var used int64
if query.Target != alertRuleTarget || query.IsNgAlertEnabled {
if query.Target != alertRuleTarget || query.UnifiedAlertingEnabled {
// get quota used.
rawSQL := fmt.Sprintf("SELECT COUNT(*) AS count FROM %s WHERE org_id=?",
dialect.Quote(query.Target))
@ -97,7 +97,7 @@ func (ss *SQLStore) GetOrgQuotas(ctx context.Context, query *models.GetOrgQuotas
result := make([]*models.OrgQuotaDTO, len(quotas))
for i, q := range quotas {
var used int64
if q.Target != alertRuleTarget || query.IsNgAlertEnabled {
if q.Target != alertRuleTarget || query.UnifiedAlertingEnabled {
// get quota used.
rawSQL := fmt.Sprintf("SELECT COUNT(*) as count from %s where org_id=?", dialect.Quote(q.Target))
resp := make([]*targetCount, 0)
@ -163,7 +163,7 @@ func (ss *SQLStore) GetUserQuotaByTarget(ctx context.Context, query *models.GetU
}
var used int64
if query.Target != alertRuleTarget || query.IsNgAlertEnabled {
if query.Target != alertRuleTarget || query.UnifiedAlertingEnabled {
// get quota used.
rawSQL := fmt.Sprintf("SELECT COUNT(*) as count from %s where user_id=?", dialect.Quote(query.Target))
resp := make([]*targetCount, 0)
@ -211,7 +211,7 @@ func (ss *SQLStore) GetUserQuotas(ctx context.Context, query *models.GetUserQuot
result := make([]*models.UserQuotaDTO, len(quotas))
for i, q := range quotas {
var used int64
if q.Target != alertRuleTarget || query.IsNgAlertEnabled {
if q.Target != alertRuleTarget || query.UnifiedAlertingEnabled {
// get quota used.
rawSQL := fmt.Sprintf("SELECT COUNT(*) as count from %s where user_id=?", dialect.Quote(q.Target))
resp := make([]*targetCount, 0)
@ -266,7 +266,7 @@ func (ss *SQLStore) UpdateUserQuota(ctx context.Context, cmd *models.UpdateUserQ
func (ss *SQLStore) GetGlobalQuotaByTarget(ctx context.Context, query *models.GetGlobalQuotaByTargetQuery) error {
return ss.WithDbSession(ctx, func(sess *DBSession) error {
var used int64
if query.Target != alertRuleTarget || query.IsNgAlertEnabled {
if query.Target != alertRuleTarget || query.UnifiedAlertingEnabled {
// get quota used.
rawSQL := fmt.Sprintf("SELECT COUNT(*) AS count FROM %s",
dialect.Quote(query.Target))

View File

@ -427,11 +427,6 @@ func (cfg Cfg) IsLiveConfigEnabled() bool {
return cfg.FeatureToggles["live-config"]
}
// IsNgAlertEnabled returns whether the standalone alerts feature is enabled.
func (cfg Cfg) IsNgAlertEnabled() bool {
return cfg.FeatureToggles["ngalert"]
}
// IsTrimDefaultsEnabled returns whether the standalone trim dashboard default feature is enabled.
func (cfg Cfg) IsTrimDefaultsEnabled() bool {
return cfg.FeatureToggles["trimDefaults"]
@ -938,12 +933,12 @@ func (cfg *Cfg) Load(args CommandLineArgs) error {
cfg.PluginAdminEnabled = pluginsSection.Key("plugin_admin_enabled").MustBool(false)
cfg.PluginAdminExternalManageEnabled = pluginsSection.Key("plugin_admin_external_manage_enabled").MustBool(false)
// Read and populate feature toggles list
featureTogglesSection := iniFile.Section("feature_toggles")
cfg.FeatureToggles = make(map[string]bool)
featuresTogglesStr := valueAsString(featureTogglesSection, "enable", "")
for _, feature := range util.SplitString(featuresTogglesStr) {
cfg.FeatureToggles[feature] = true
if err := cfg.readFeatureToggles(iniFile); err != nil {
return err
}
if err := cfg.ReadUnifiedAlertingSettings(iniFile); err != nil {
return err
}
// check old location for this option
@ -1372,6 +1367,17 @@ func (cfg *Cfg) readRenderingSettings(iniFile *ini.File) error {
return nil
}
func (cfg *Cfg) readFeatureToggles(iniFile *ini.File) error {
// Read and populate feature toggles list
featureTogglesSection := iniFile.Section("feature_toggles")
cfg.FeatureToggles = make(map[string]bool)
featuresTogglesStr := valueAsString(featureTogglesSection, "enable", "")
for _, feature := range util.SplitString(featuresTogglesStr) {
cfg.FeatureToggles[feature] = true
}
return nil
}
func readAlertingSettings(iniFile *ini.File) error {
alerting := iniFile.Section("alerting")
AlertingEnabled = alerting.Key("enabled").MustBool(true)

View File

@ -68,7 +68,7 @@ func (cfg *Cfg) readQuotaSettings() {
var alertOrgQuota int64
var alertGlobalQuota int64
if cfg.IsNgAlertEnabled() {
if cfg.UnifiedAlerting.Enabled {
alertOrgQuota = quota.Key("org_alert_rule").MustInt64(100)
alertGlobalQuota = quota.Key("global_alert_rule").MustInt64(-1)
}

View File

@ -11,6 +11,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/ini.v1"
@ -427,3 +428,253 @@ func TestGetCDNPathWithAlphaVersion(t *testing.T) {
require.Equal(t, "http://cdn.grafana.com/grafana-oss/v7.5.0-alpha.11124/", cfg.GetContentDeliveryURL("grafana-oss"))
require.Equal(t, "http://cdn.grafana.com/grafana/v7.5.0-alpha.11124/", cfg.GetContentDeliveryURL("grafana"))
}
func TestAlertingEnabled(t *testing.T) {
testCases := []struct {
desc string
unifiedAlertingEnabled string
legacyAlertingEnabled string
featureToggleSet bool
verifyCfg func(*testing.T, Cfg, *ini.File)
}{
{
desc: "when legacy alerting is enabled and unified is disabled",
legacyAlertingEnabled: "true",
unifiedAlertingEnabled: "false",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, false)
assert.Equal(t, AlertingEnabled, true)
},
},
{
desc: "when legacy alerting is disabled and unified is enabled",
legacyAlertingEnabled: "false",
unifiedAlertingEnabled: "true",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when both alerting are enabled, it should error",
legacyAlertingEnabled: "true",
unifiedAlertingEnabled: "true",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.Error(t, err)
},
},
{
desc: "when legacy alerting is invalid and unified is disabled",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "false",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, false)
assert.Equal(t, AlertingEnabled, true)
},
},
{
desc: "when legacy alerting is invalid and unified is enabled",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "true",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.Error(t, err)
},
},
{
desc: "when legacy alerting is enabled and unified is invalid",
legacyAlertingEnabled: "true",
unifiedAlertingEnabled: "invalid",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, false)
assert.Equal(t, AlertingEnabled, true)
},
},
{
desc: "when legacy alerting is disabled and unified is invalid",
legacyAlertingEnabled: "false",
unifiedAlertingEnabled: "invalid",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, false)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when both are invalid",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "invalid",
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, false)
assert.Equal(t, AlertingEnabled, true)
},
},
{
desc: "when legacy alerting is enabled and unified is disabled and feature toggle is set",
legacyAlertingEnabled: "true",
unifiedAlertingEnabled: "false",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when legacy alerting is disabled and unified is disabled and feature toggle is set",
legacyAlertingEnabled: "false",
unifiedAlertingEnabled: "false",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when legacy alerting is disabled and unified is invalid and feature toggle is set",
legacyAlertingEnabled: "false",
unifiedAlertingEnabled: "invalid",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when legacy alerting is invalid and unified is disabled and feature toggle is set",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "false",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
{
desc: "when legacy alerting is invalid and unified is enabled and feature toggle is set",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "true",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.Error(t, err)
},
},
{
desc: "when both are invalid and feature toggle is set",
legacyAlertingEnabled: "invalid",
unifiedAlertingEnabled: "invalid",
featureToggleSet: true,
verifyCfg: func(t *testing.T, cfg Cfg, f *ini.File) {
err := readAlertingSettings(f)
require.NoError(t, err)
err = cfg.readFeatureToggles(f)
require.NoError(t, err)
err = cfg.ReadUnifiedAlertingSettings(f)
require.NoError(t, err)
assert.Equal(t, cfg.UnifiedAlerting.Enabled, true)
assert.Equal(t, AlertingEnabled, false)
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
t.Cleanup(func() {
AlertingEnabled = false
})
f := ini.Empty()
cfg := NewCfg()
unifiedAlertingSec, err := f.NewSection("unified_alerting")
require.NoError(t, err)
_, err = unifiedAlertingSec.NewKey("enabled", tc.unifiedAlertingEnabled)
require.NoError(t, err)
alertingSec, err := f.NewSection("alerting")
require.NoError(t, err)
_, err = alertingSec.NewKey("enabled", tc.legacyAlertingEnabled)
require.NoError(t, err)
if tc.featureToggleSet {
alertingSec, err := f.NewSection("feature_toggles")
require.NoError(t, err)
_, err = alertingSec.NewKey("enable", "ngalert")
require.NoError(t, err)
}
tc.verifyCfg(t, *cfg, f)
})
}
}

View File

@ -1,10 +1,13 @@
package setting
import (
"errors"
"strconv"
"strings"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
"github.com/grafana/grafana/pkg/util"
"github.com/prometheus/alertmanager/cluster"
"gopkg.in/ini.v1"
@ -60,6 +63,8 @@ type UnifiedAlertingSettings struct {
EvaluationTimeout time.Duration
ExecuteAlerts bool
DefaultConfiguration string
Enabled bool
DisabledOrgs map[int64]struct{}
}
// ReadUnifiedAlertingSettings reads both the `unified_alerting` and `alerting` sections of the configuration while preferring configuration the `alerting` section.
@ -67,6 +72,29 @@ type UnifiedAlertingSettings struct {
func (cfg *Cfg) ReadUnifiedAlertingSettings(iniFile *ini.File) error {
uaCfg := UnifiedAlertingSettings{}
ua := iniFile.Section("unified_alerting")
uaCfg.Enabled = ua.Key("enabled").MustBool(false)
// TODO: Deprecate this in v8.4, if the old feature toggle ngalert is set, enable Grafana 8 Unified Alerting anyway.
if !uaCfg.Enabled && cfg.FeatureToggles["ngalert"] {
cfg.Logger.Warn("ngalert feature flag is deprecated: use unified alerting enabled setting instead")
uaCfg.Enabled = true
AlertingEnabled = false
}
if uaCfg.Enabled && AlertingEnabled {
return errors.New("both legacy and Grafana 8 Alerts are enabled")
}
uaCfg.DisabledOrgs = make(map[int64]struct{})
orgsStr := valueAsString(ua, "disabled_orgs", "")
for _, org := range util.SplitString(orgsStr) {
orgID, err := strconv.ParseInt(org, 10, 64)
if err != nil {
return err
}
uaCfg.DisabledOrgs[orgID] = struct{}{}
}
var err error
uaCfg.AdminConfigPollInterval, err = gtime.ParseDuration(valueAsString(ua, "admin_config_poll_interval", (schedulerDefaultAdminConfigPollInterval).String()))
if err != nil {

View File

@ -20,10 +20,13 @@ import (
)
func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
const disableOrgID int64 = 3
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
NGAlertAdminConfigPollInterval: 2 * time.Second,
UnifiedAlertingDisabledOrgs: []int64{disableOrgID}, // disable unified alerting for organisation 3
})
grafanaListedAddr, s := testinfra.StartGrafana(t, dir, path)
@ -31,15 +34,29 @@ func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
s.Bus = bus.GetBus()
// Create a user to make authenticated requests
createUser(t, s, models.CreateUserCommand{
userID := createUser(t, s, models.CreateUserCommand{
DefaultOrgRole: string(models.ROLE_ADMIN),
Login: "grafana",
Password: "password",
})
// create another organisation
orgID := createOrg(t, s, "another org", userID)
// ensure that the orgID is 3 (the disabled org)
require.Equal(t, disableOrgID, orgID)
// create user under different organisation
createUser(t, s, models.CreateUserCommand{
DefaultOrgRole: string(models.ROLE_ADMIN),
Password: "admin-42",
Login: "admin-42",
OrgId: orgID,
})
// Create a couple of "fake" Alertmanagers
fakeAM1 := schedule.NewFakeExternalAlertmanager(t)
fakeAM2 := schedule.NewFakeExternalAlertmanager(t)
fakeAM3 := schedule.NewFakeExternalAlertmanager(t)
// Now, let's test the configuration API.
{
@ -50,7 +67,7 @@ func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
require.JSONEq(t, string(b), "{\"message\": \"no admin configuration available\"}")
}
// Now, lets re-set external Alertmanagers.
// Now, lets re-set external Alertmanagers for main organisation.
{
ac := apimodels.PostableNGalertConfig{
Alertmanagers: []string{fakeAM1.URL(), fakeAM2.URL()},
@ -76,7 +93,7 @@ func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
require.JSONEq(t, string(b), fmt.Sprintf("{\"alertmanagers\":[\"%s\",\"%s\"]}\n", fakeAM1.URL(), fakeAM2.URL()))
}
// With the configuration set, we should eventually discover those Alertmanagers set.
// With the configuration set, we should eventually discover those Alertmanagers.
{
alertsURL := fmt.Sprintf("http://grafana:password@%s/api/v1/ngalert/alertmanagers", grafanaListedAddr)
require.Eventually(t, func() bool {
@ -88,7 +105,7 @@ func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
require.NoError(t, json.Unmarshal(b, &alertmanagers))
return len(alertmanagers.Data.Active) == 2
}, 80*time.Second, 4*time.Second)
}, 16*time.Second, 8*time.Second) // the sync interval is 2s so after 8s all alertmanagers most probably are started
}
// Now, let's set an alert that should fire as quickly as possible.
@ -148,4 +165,45 @@ func TestAdminConfiguration_SendingToExternalAlertmanagers(t *testing.T) {
return fakeAM1.AlertsCount() == 1 && fakeAM2.AlertsCount() == 1
}, 60*time.Second, 5*time.Second)
}
// Now, lets re-set external Alertmanagers for the other organisation.
{
ac := apimodels.PostableNGalertConfig{
Alertmanagers: []string{fakeAM3.URL()},
}
buf := bytes.Buffer{}
enc := json.NewEncoder(&buf)
err := enc.Encode(&ac)
require.NoError(t, err)
alertsURL := fmt.Sprintf("http://admin-42:admin-42@%s/api/v1/ngalert/admin_config", grafanaListedAddr)
resp := postRequest(t, alertsURL, buf.String(), http.StatusCreated) // nolint
b, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.JSONEq(t, string(b), "{\"message\": \"admin configuration updated\"}")
}
// If we get the configuration again, it shows us what we've set.
{
alertsURL := fmt.Sprintf("http://admin-42:admin-42@%s/api/v1/ngalert/admin_config", grafanaListedAddr)
resp := getRequest(t, alertsURL, http.StatusOK) // nolint
b, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.JSONEq(t, string(b), fmt.Sprintf("{\"alertmanagers\":[\"%s\"]}\n", fakeAM3.URL()))
}
// With the configuration set, we should eventually not discover Alertmanagers.
{
alertsURL := fmt.Sprintf("http://admin-42:admin-42@%s/api/v1/ngalert/alertmanagers", grafanaListedAddr)
require.Eventually(t, func() bool {
resp := getRequest(t, alertsURL, http.StatusOK) // nolint
b, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
var alertmanagers apimodels.GettableAlertmanagers
require.NoError(t, json.Unmarshal(b, &alertmanagers))
return len(alertmanagers.Data.Active) == 0
}, 16*time.Second, 8*time.Second) // the sync interval is 2s so after 8s all alertmanagers (if any) most probably are started
}
}

View File

@ -18,7 +18,8 @@ import (
func TestAlertmanagerConfigurationIsTransactional(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
NGAlertAlertmanagerConfigPollInterval: 2 * time.Second,
DisableAnonymous: true,
})
@ -127,8 +128,9 @@ func TestAlertmanagerConfigurationIsTransactional(t *testing.T) {
func TestAlertmanagerConfigurationPersistSecrets(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)

View File

@ -29,8 +29,9 @@ import (
func TestAMConfigAccess(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -387,8 +388,9 @@ func TestAMConfigAccess(t *testing.T) {
func TestAlertAndGroupsQuery(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -552,10 +554,11 @@ func TestAlertAndGroupsQuery(t *testing.T) {
func TestRulerAccess(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -678,10 +681,11 @@ func TestRulerAccess(t *testing.T) {
func TestDeleteFolderWithRules(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -837,9 +841,10 @@ func TestDeleteFolderWithRules(t *testing.T) {
func TestAlertRuleCRUD(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -1905,7 +1910,8 @@ func TestAlertRuleCRUD(t *testing.T) {
func TestAlertmanagerStatus(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, _ := testinfra.StartGrafana(t, dir, path)
@ -1965,9 +1971,10 @@ func TestAlertmanagerStatus(t *testing.T) {
func TestQuota(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -2207,9 +2214,10 @@ func TestQuota(t *testing.T) {
func TestEval(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)

View File

@ -15,8 +15,9 @@ import (
func TestAvailableChannels(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)

View File

@ -34,7 +34,8 @@ func TestTestReceivers(t *testing.T) {
t.Run("assert no receivers returns 400 Bad Request", func(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -64,7 +65,8 @@ func TestTestReceivers(t *testing.T) {
t.Run("assert working receiver returns OK", func(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -131,7 +133,8 @@ func TestTestReceivers(t *testing.T) {
t.Run("assert invalid receiver returns 400 Bad Request", func(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -194,7 +197,8 @@ func TestTestReceivers(t *testing.T) {
t.Run("assert timed out receiver returns 408 Request Timeout", func(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -266,7 +270,8 @@ func TestTestReceivers(t *testing.T) {
t.Run("assert multiple different errors returns 207 Multi Status", func(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -359,8 +364,9 @@ func TestTestReceivers(t *testing.T) {
func TestNotificationChannels(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, s := testinfra.StartGrafana(t, dir, path)

View File

@ -21,8 +21,9 @@ import (
func TestPrometheusRules(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -265,8 +266,9 @@ func TestPrometheusRules(t *testing.T) {
func TestPrometheusRulesPermissions(t *testing.T) {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)

View File

@ -22,8 +22,9 @@ import (
func TestAlertRulePermissions(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
DisableAnonymous: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
DisableAnonymous: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)
@ -306,10 +307,11 @@ func createRule(t *testing.T, grafanaListedAddr string, folder string, user, pas
func TestAlertRuleConflictingTitle(t *testing.T) {
// Setup Grafana and its Database
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{"ngalert"},
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
DisableLegacyAlerting: true,
EnableUnifiedAlerting: true,
EnableQuota: true,
DisableAnonymous: true,
ViewersCanEdit: true,
})
grafanaListedAddr, store := testinfra.StartGrafana(t, dir, path)

View File

@ -194,6 +194,14 @@ func CreateGrafDir(t *testing.T, opts ...GrafanaOpts) (string, string) {
_, err = alertingSect.NewKey("max_attempts", "3")
require.NoError(t, err)
getOrCreateSection := func(name string) (*ini.Section, error) {
section, err := cfg.GetSection(name)
if err != nil {
return cfg.NewSection(name)
}
return section, err
}
for _, o := range opts {
if o.EnableCSP {
securitySect, err := cfg.NewSection("security")
@ -214,7 +222,7 @@ func CreateGrafDir(t *testing.T, opts ...GrafanaOpts) (string, string) {
require.NoError(t, err)
}
if o.NGAlertAlertmanagerConfigPollInterval != 0 {
ngalertingSection, err := cfg.NewSection("unified_alerting")
ngalertingSection, err := getOrCreateSection("unified_alerting")
require.NoError(t, err)
_, err = ngalertingSection.NewKey("alertmanager_config_poll_interval", o.NGAlertAlertmanagerConfigPollInterval.String())
require.NoError(t, err)
@ -247,6 +255,25 @@ func CreateGrafDir(t *testing.T, opts ...GrafanaOpts) (string, string) {
_, err = usersSection.NewKey("viewers_can_edit", "true")
require.NoError(t, err)
}
if o.DisableLegacyAlerting {
alertingSection, err := cfg.GetSection("alerting")
require.NoError(t, err)
_, err = alertingSection.NewKey("enabled", "false")
require.NoError(t, err)
}
if o.EnableUnifiedAlerting {
unifiedAlertingSection, err := getOrCreateSection("unified_alerting")
require.NoError(t, err)
_, err = unifiedAlertingSection.NewKey("enabled", "true")
require.NoError(t, err)
}
if len(o.UnifiedAlertingDisabledOrgs) > 0 {
unifiedAlertingSection, err := getOrCreateSection("unified_alerting")
require.NoError(t, err)
disableOrgStr := strings.Join(strings.Split(strings.Trim(fmt.Sprint(o.UnifiedAlertingDisabledOrgs), "[]"), " "), ",")
_, err = unifiedAlertingSection.NewKey("disabled_orgs", disableOrgStr)
require.NoError(t, err)
}
}
cfgPath := filepath.Join(cfgDir, "test.ini")
@ -270,4 +297,7 @@ type GrafanaOpts struct {
CatalogAppEnabled bool
ViewersCanEdit bool
PluginAdminEnabled bool
DisableLegacyAlerting bool
EnableUnifiedAlerting bool
UnifiedAlertingDisabledOrgs []int64
}

View File

@ -112,7 +112,7 @@ export class AlertRuleListUnconnected extends PureComponent<Props> {
</div>
</div>
<div className="page-action-bar__spacer" />
{config.featureToggles.ngalert && (
{config.unifiedAlertingEnabled && (
<LinkButton variant="primary" href="alerting/ng/new">
Add NG Alert
</LinkButton>

View File

@ -4,4 +4,4 @@ import AlertRuleList from './AlertRuleList';
// route between unified and "old" alerting pages based on feature flag
export default config.featureToggles.ngalert ? RuleList : AlertRuleList;
export default config.unifiedAlertingEnabled ? RuleList : AlertRuleList;

View File

@ -4,4 +4,4 @@ import { PanelAlertTabContent } from './unified/PanelAlertTabContent';
// route between unified and "old" alerting pages based on feature flag
export default config.featureToggles.ngalert ? PanelAlertTabContent : AlertTab;
export default config.unifiedAlertingEnabled ? PanelAlertTabContent : AlertTab;

View File

@ -4,4 +4,4 @@ import Receivers from './unified/Receivers';
// route between unified and "old" alerting pages based on feature flag
export default config.featureToggles.ngalert ? Receivers : NotificationsListPage;
export default config.unifiedAlertingEnabled ? Receivers : NotificationsListPage;

View File

@ -6,6 +6,7 @@ import React, { FC } from 'react';
import { NewRuleFromPanelButton } from './components/panel-alerts-tab/NewRuleFromPanelButton';
import { RulesTable } from './components/rules/RulesTable';
import { usePanelCombinedRules } from './hooks/usePanelCombinedRules';
import { selectors } from '@grafana/e2e-selectors';
interface Props {
dashboard: DashboardModel;
@ -52,7 +53,7 @@ export const PanelAlertTabContent: FC<Props> = ({ dashboard, panel }) => {
}
return (
<div className={styles.noRulesWrapper}>
<div aria-label={selectors.components.PanelAlertTabContent.content} className={styles.noRulesWrapper}>
{alert}
{!!dashboard.uid && (
<>

View File

@ -40,7 +40,7 @@ export const PanelEditorTabs: FC<PanelEditorTabsProps> = React.memo(({ panel, da
<div className={styles.wrapper}>
<TabsBar className={styles.tabBar}>
{tabs.map((tab) => {
if (config.featureToggles.ngalert && tab.id === PanelEditorTabId.Alert) {
if (config.unifiedAlertingEnabled && tab.id === PanelEditorTabId.Alert) {
return (
<PanelAlertTab
key={tab.id}

View File

@ -449,7 +449,7 @@ export class PanelChrome extends Component<Props, State> {
const { errorMessage, data } = this.state;
const { transparent } = panel;
let alertState = config.featureToggles.ngalert ? undefined : data.alertState?.state;
let alertState = config.unifiedAlertingEnabled ? undefined : data.alertState?.state;
const containerClassNames = classNames({
'panel-container': true,

View File

@ -186,7 +186,7 @@ export class PanelChromeAngularUnconnected extends PureComponent<Props, State> {
const { errorMessage, data } = this.state;
const { transparent } = panel;
let alertState = config.featureToggles.ngalert ? undefined : data.alertState?.state;
let alertState = config.unifiedAlertingEnabled ? undefined : data.alertState?.state;
const containerClassNames = classNames({
'panel-container': true,

View File

@ -26,9 +26,7 @@ jest.mock('@grafana/runtime', () => ({
...jest.requireActual('@grafana/runtime').config,
buildInfo: {},
panels: {},
featureToggles: {
ngalert: true,
},
unifiedAlertingEnabled: true,
},
}));

View File

@ -17,7 +17,7 @@ import { useFilteredGroups } from './useFilteredGroups';
export const AlertGroupsPanel = (props: PanelProps<AlertGroupPanelOptions>) => {
const dispatch = useDispatch();
const isAlertingEnabled = config.featureToggles.ngalert;
const isAlertingEnabled = config.unifiedAlertingEnabled;
const expandAll = props.options.expandAll;
const alertManagerSourceName = props.options.alertmanager;

View File

@ -223,4 +223,4 @@ const unifiedAlertList = new PanelPlugin<UnifiedAlertListOptions>(UnifiedAlertLi
});
});
export const plugin = config.featureToggles.ngalert ? unifiedAlertList : alertList;
export const plugin = config.unifiedAlertingEnabled ? unifiedAlertList : alertList;

View File

@ -377,7 +377,7 @@ export function getAppRoutes(): RouteDescriptor[] {
},
{
path: '/alerting/notifications',
roles: config.featureToggles.ngalert ? () => ['Editor', 'Admin'] : undefined,
roles: config.unifiedAlertingEnabled ? () => ['Editor', 'Admin'] : undefined,
component: SafeDynamicImport(
() => import(/* webpackChunkName: "NotificationsListPage" */ 'app/features/alerting/NotificationsIndex')
),