grafana/conf/provisioning/alerting/sample.yaml
ismail simsek 91221bc436
Expressions: Fixes the issue showing expressions editor (#62510)
* Use suggested value for uid

* update the snapshot

* use __expr__

* replace all -100 with __expr__

* update snapshot

* more changes

* revert redundant change

* Use expr.DatasourceUID where it's possible

* generate files
2023-01-31 18:50:10 +01:00

189 lines
7.1 KiB
YAML

# # config file version
apiVersion: 1
# # List of rule groups to import or update
# groups:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> name of the rule group
# name: my_rule_group
# # <string, required> name of the folder the rule group will be stored in
# folder: my_first_folder
# # <duration, required> interval of the rule group evaluation
# interval: 60s
# # <list, required> list of rules that are part of the rule group
# rules:
# # <string, required> unique identifier for the rule
# - uid: my_id_1
# # <string, required> title of the rule, will be displayed in the UI
# title: my_first_rule
# # <string, required> query used for the condition
# condition: A
# # <list, required> list of query objects that should be executed on each
# # evaluation - should be obtained via the API
# data:
# - refId: A
# datasourceUid: "__expr__"
# model:
# conditions:
# - evaluator:
# params:
# - 3
# type: gt
# operator:
# type: and
# query:
# params:
# - A
# reducer:
# type: last
# type: query
# datasource:
# type: __expr__
# uid: "__expr__"
# expression: 1==0
# intervalMs: 1000
# maxDataPoints: 43200
# refId: A
# type: math
# # <string> UID of a dashboard that the alert rule should be linked to
# dashboardUid: my_dashboard
# # <int> ID of the panel that the alert rule should be linked to
# panelId: 123
# # <string> state of the alert rule when no data is returned
# # possible values: "NoData", "Alerting", "OK", default = NoData
# noDataState: Alerting
# # <string> state of the alert rule when the query execution
# # fails - possible values: "Error", "Alerting", "OK"
# # default = Alerting
# executionErrorState: Alerting
# # <duration, required> how long the alert condition should be breached before Firing. Before this time has elapsed, the alert is considered to be Pending
# for: 60s
# # <map<string, string>> map of strings to attach arbitrary custom data
# annotations:
# some_key: some_value
# # <map<string, string> map of strings to filter and
# # route alerts
# labels:
# team: sre_team_1
# isPaused: false
# # List of alert rule UIDs that should be deleted
# deleteRules:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> unique identifier for the rule
# uid: my_id_1
# # List of contact points to import or update
# contactPoints:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> name of the contact point
# name: cp_1
# receivers:
# # <string, required> unique identifier for the receiver
# - uid: first_uid
# # <string, required> type of the receiver
# type: prometheus-alertmanager
# # <object, required> settings for the specific receiver type
# settings:
# url: http://test:9000
# # List of receivers that should be deleted
# deleteContactPoints:
# - orgId: 1
# uid: first_uid
# # List of notification policies to import or update
# policies:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string> name of the receiver that should be used for this route
# receiver: grafana-default-email
# # <list<string>> The labels by which incoming alerts are grouped together. For example,
# # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# # be batched into a single group.
# #
# # To aggregate by all possible labels, use the special value '...' as
# # the sole label name, for example:
# # group_by: ['...']
# # This effectively disables aggregation entirely, passing through all
# # alerts as-is. This is unlikely to be what you want, unless you have
# # a very low alert volume or your upstream notification system performs
# # its own grouping.
# group_by:
# - grafana_folder
# - alertname
# # <list> a list of matchers that an alert has to fulfill to match the node
# matchers:
# - alertname = Watchdog
# - severity =~ "warning|critical"
# # <list> Times when the route should be muted. These must match the name of a
# # mute time interval.
# # Additionally, the root node cannot have any mute times.
# # When a route is muted it will not send any notifications, but
# # otherwise acts normally (including ending the route-matching process
# # if the `continue` option is not set)
# mute_time_intervals:
# - abc
# # <duration> How long to initially wait to send a notification for a group
# # of alerts. Allows to collect more initial alerts for the same group.
# # (Usually ~0s to few minutes), default = 30s
# group_wait: 30s
# # <duration> How long to wait before sending a notification about new alerts that
# # are added to a group of alerts for which an initial notification has
# # already been sent. (Usually ~5m or more), default = 5m
# group_interval: 5m
# # <duration> How long to wait before sending a notification again if it has already
# # been sent successfully for an alert. (Usually ~3h or more), default = 4h
# repeat_interval: 4h
# # <list> Zero or more child routes
# routes:
# ...
# # List of orgIds that should be reset to the default policy
# resetPolicies:
# - 1
# # List of templates to import or update
# templates:
# # <int> organization ID, default = 1
# - orgID: 1
# # <string, required> name of the template, must be unique
# name: my_first_template
# # <string, required> content of the the template
# template: Alerting with a custome text template
# # List of templates that should be deleted
# deleteTemplates:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> name of the template, must be unique
# name: my_first_template
# # List of mute time intervals to import or update
# muteTimes:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> name of the mute time interval, must be unique
# name: mti_1
# # <list> time intervals that should trigger the muting
# refer to https://prometheus.io/docs/alerting/latest/configuration/#time_interval-0
# time_intervals:
# - times:
# - start_time: '06:00'
# end_time: '23:59'
# weekdays: ['monday:wednesday','saturday', 'sunday']
# months: ['1:3', 'may:august', 'december']
# years: ['2020:2022', '2030']
# days_of_month: ['1:5', '-3:-1']
# # List of mute time intervals that should be deleted
# deleteMuteTimes:
# # <int> organization ID, default = 1
# - orgId: 1
# # <string, required> name of the mute time interval, must be unique
# name: mti_1