# my global config global: scrape_interval: 10s # By default, scrape targets every 15 seconds. evaluation_interval: 10s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: - "alert.yml" - "recording.yml" # - "second.rules" alerting: alertmanagers: - scheme: http static_configs: - targets: - "alertmanager:9093" scrape_configs: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] - job_name: 'node_exporter' static_configs: - targets: ['node_exporter:9100'] - job_name: 'fake-data-gen' static_configs: - targets: ['fake-prometheus-data:9091'] - job_name: 'grafana' static_configs: - targets: ['host.docker.internal:3000'] - job_name: 'prometheus-random-data' static_configs: - targets: ['prometheus-random-data:8080'] - job_name: 'mysql' static_configs: - targets: ['mysql-exporter:9104'] # - job_name: 'grafana-test-datasource' # metrics_path: /metrics/plugins/grafana-test-datasource # static_configs: # - targets: ['host.docker.internal:3000']