[WIP] Add alerts on blackbox metrics for monitoring endpoints #803

Draft
prathamesh wants to merge 4 commits from deep-stack/stack-orchestrator:pm-endpoint-alerts into main
5 changed files with 137 additions and 13 deletions

View File

@ -0,0 +1,115 @@
apiVersion: 1
groups:
- orgId: 1
name: blackbox
folder: BlackboxAlerts
interval: 30s
rules:
# Azimuth Gateway endpoint
- uid: azimuth_gateway
title: azimuth_gateway_endpoint_tracking
condition: condition
data:
- refId: http_status_code
relativeTimeRange:
from: 600
to: 0
datasourceUid: PBFA97CFB590B2093
model:
editorMode: code
expr: probe_http_status_code{destination="azimuth_gateway"}
instant: true
intervalMs: 1000
legendFormat: __auto
maxDataPoints: 43200
range: false
refId: http_status_code
- refId: condition
relativeTimeRange:
from: 600
to: 0
datasourceUid: __expr__
model:
conditions:
- evaluator:
params:
- 200
- 200
type: outside_range
operator:
type: and
query:
params:
- http_status_code
reducer:
params: []
type: last
type: query
datasource:
name: Expression
type: __expr__
uid: __expr__
expression: ""
hide: false
refId: condition
type: classic_conditions
noDataState: Alerting
execErrState: Alerting
for: 5m
annotations:
summary: Probe failed for Azimuth gateway endpoint, http status {{ index $values "http_status_code" }}
isPaused: false
# Laconicd GQL endpoint
- uid: laconicd_gql
title: laconicd_gql_endpoint_tracking
condition: condition
data:
- refId: http_status_code
relativeTimeRange:
from: 600
to: 0
datasourceUid: PBFA97CFB590B2093
model:
editorMode: code
expr: probe_http_status_code{destination="laconicd_gql"}
instant: true
intervalMs: 1000
legendFormat: __auto
maxDataPoints: 43200
range: false
refId: http_status_code
- refId: condition
relativeTimeRange:
from: 600
to: 0
datasourceUid: __expr__
model:
conditions:
- evaluator:
params:
- 200
- 200
type: outside_range
operator:
type: and
query:
params:
- http_status_code
reducer:
params: []
type: last
type: query
datasource:
name: Expression
type: __expr__
uid: __expr__
expression: ""
hide: false
refId: condition
type: classic_conditions
noDataState: Alerting
execErrState: Alerting
for: 5m
annotations:
summary: Probe failed for Laconicd GQL endpoint, http status {{ index $values "http_status_code" }}
isPaused: false

View File

@ -49,7 +49,7 @@
},
"gridPos": {
"h": 3,
"w": 3,
"w": 4,
"x": 0,
"y": 0
},

View File

@ -24,9 +24,10 @@ scrape_configs:
params:
module: [http_2xx]
static_configs:
# Add URLs to be monitored below
- targets:
# - https://github.com
# Add URLs for targets to be monitored below
# - targets: [https://github.com]
# labels:
# destination: 'github'
relabel_configs:
- source_labels: [__address__]
regex: (.*)(:80)?

View File

@ -123,8 +123,9 @@ laconic-so --stack monitoring deploy create --spec-file monitoring-spec.yml --de
```yml
...
- job_name: laconicd
static_configs:
- targets: ['example-host:1317']
...
static_configs:
- targets: ['example-host:1317']
...
```

View File

@ -44,13 +44,18 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
- job_name: 'blackbox'
...
static_configs:
- targets:
- <AZIMUTH_GATEWAY_GQL_ENDPOINT>
- <LACONICD_GQL_ENDPOINT>
- targets: [<AZIMUTH_GATEWAY_GQL_ENDPOINT>]
labels:
# Add destination label for pre-configured alerts
destination: 'azimuth_gateway'
- targets: [<LACONICD_GQL_ENDPOINT>]
labels:
destination: 'laconicd_gql'
...
- job_name: laconicd
static_configs:
- targets: ['LACONICD_REST_HOST:LACONICD_REST_PORT']
...
static_configs:
- targets: ['LACONICD_REST_HOST:LACONICD_REST_PORT']
...
- job_name: azimuth
scrape_interval: 10s
@ -119,10 +124,12 @@ Add scrape config as done above for any additional watcher to add it to the Watc
### Grafana alerts config
Place the pre-configured watcher alerts rules in Grafana provisioning directory:
Place the pre-configured watcher and blackbox endpoint alerts rules in Grafana provisioning directory:
```bash
cp monitoring-watchers-deployment/config/monitoring/watcher-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
cp monitoring-watchers-deployment/config/monitoring/blackbox-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
```
Update the alerting contact points config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/contactpoints.yml`) with desired contact points
@ -135,7 +142,7 @@ Add corresponding routes to the notification policies config (`monitoring-watche
- receiver: SlackNotifier
object_matchers:
# Add matchers below
- ['grafana_folder', '=', 'WatcherAlerts']
- ['grafana_folder', '=~', 'WatcherAlerts|BlackboxAlerts']
```
### Env