From b72e69e8efb8efc8b0c152c2d101611d1278438f Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Thu, 14 Dec 2023 15:22:57 +0530 Subject: [PATCH] Separate out preconfigured watcher dashboards and add instructions --- .../data/compose/docker-compose-grafana.yml | 6 + .../compose/docker-compose-prom-server.yml | 6 + .../azimuth/azimuth-watcher-dashboard.json | 0 .../azimuth/censures-watcher-dashboard.json | 0 .../azimuth/claims-watcher-dashboard.json | 0 ...tional-star-release-watcher-dashboard.json | 0 ...ted-sending-watcher-watcher-dashboard.json | 0 .../azimuth/ecliptic-watcher-dashboard.json | 0 ...linear-star-release-watcher-dashboard.json | 0 .../azimuth/polls-watcher-dashboard.json | 0 .../merkl-sushiswap-watcher-dashboard.json | 0 .../sushi/sushiswap-watcher-dashboard.json | 0 .../monitoring/prometheus/prometheus.yml | 41 ------ .../data/stacks/monitoring/README.md | 93 +++++++++++++ .../stacks/monitoring/monitoring-watchers.md | 124 ++++++++++++++++++ 15 files changed, 229 insertions(+), 41 deletions(-) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/azimuth-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/censures-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/claims-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/conditional-star-release-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/delegated-sending-watcher-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/ecliptic-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/linear-star-release-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/azimuth/polls-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/sushi/merkl-sushiswap-watcher-dashboard.json (100%) rename stack_orchestrator/data/config/monitoring/grafana/{dashboards => watcher-dashboards}/sushi/sushiswap-watcher-dashboard.json (100%) create mode 100644 stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md diff --git a/stack_orchestrator/data/compose/docker-compose-grafana.yml b/stack_orchestrator/data/compose/docker-compose-grafana.yml index 54fcd0d6..4aa8d1e1 100644 --- a/stack_orchestrator/data/compose/docker-compose-grafana.yml +++ b/stack_orchestrator/data/compose/docker-compose-grafana.yml @@ -10,6 +10,12 @@ services: - grafana_storage:/var/lib/grafana ports: - "3000" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "3000"] + interval: 30s + timeout: 5s + retries: 10 + start_period: 3s volumes: grafana_storage: diff --git a/stack_orchestrator/data/compose/docker-compose-prom-server.yml b/stack_orchestrator/data/compose/docker-compose-prom-server.yml index 3d46cac9..26a61e88 100644 --- a/stack_orchestrator/data/compose/docker-compose-prom-server.yml +++ b/stack_orchestrator/data/compose/docker-compose-prom-server.yml @@ -9,6 +9,12 @@ services: - prometheus_data:/prometheus ports: - "9090" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9090"] + interval: 30s + timeout: 5s + retries: 10 + start_period: 3s extra_hosts: - "host.docker.internal:host-gateway" diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/azimuth-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/azimuth-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/azimuth-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/azimuth-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/censures-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/censures-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/censures-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/censures-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/claims-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/claims-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/claims-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/claims-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/conditional-star-release-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/conditional-star-release-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/conditional-star-release-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/conditional-star-release-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/delegated-sending-watcher-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/delegated-sending-watcher-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/delegated-sending-watcher-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/delegated-sending-watcher-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/ecliptic-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/ecliptic-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/ecliptic-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/ecliptic-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/linear-star-release-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/linear-star-release-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/linear-star-release-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/linear-star-release-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/polls-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/polls-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/azimuth/polls-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/azimuth/polls-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/sushi/merkl-sushiswap-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/sushi/merkl-sushiswap-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/sushi/merkl-sushiswap-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/sushi/merkl-sushiswap-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/sushi/sushiswap-watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/sushi/sushiswap-watcher-dashboard.json similarity index 100% rename from stack_orchestrator/data/config/monitoring/grafana/dashboards/sushi/sushiswap-watcher-dashboard.json rename to stack_orchestrator/data/config/monitoring/grafana/watcher-dashboards/sushi/sushiswap-watcher-dashboard.json diff --git a/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml b/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml index 000b92bb..e8e7b8b8 100644 --- a/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml +++ b/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml @@ -10,44 +10,3 @@ scrape_configs: - job_name: prometheus static_configs: - targets: ['localhost:9090'] - - - job_name: azimuth - metrics_path: /metrics - scheme: http - static_configs: - - targets: ['host.docker.internal:9000'] - labels: - instance: 'azimuth' - - targets: ['host.docker.internal:9002'] - labels: - instance: 'censures' - - targets: ['host.docker.internal:9004'] - labels: - instance: 'claims' - - targets: ['host.docker.internal:9006'] - labels: - instance: 'conditional_star_release' - - targets: ['host.docker.internal:9008'] - labels: - instance: 'delegated_sending_watcher' - - targets: ['host.docker.internal:9010'] - labels: - instance: 'ecliptic' - - targets: ['host.docker.internal:9012'] - labels: - instance: 'linear_star_release' - - targets: ['host.docker.internal:9014'] - labels: - instance: 'polls' - - - job_name: sushi - metrics_path: /metrics - scheme: http - static_configs: - # TODO: Replace address programmatically - - targets: ['host.docker.internal:9016'] - labels: - instance: 'sushiswap' - - targets: ['host.docker.internal:9018'] - labels: - instance: 'merkl_sushiswap' diff --git a/stack_orchestrator/data/stacks/monitoring/README.md b/stack_orchestrator/data/stacks/monitoring/README.md index 35325515..4e721126 100644 --- a/stack_orchestrator/data/stacks/monitoring/README.md +++ b/stack_orchestrator/data/stacks/monitoring/README.md @@ -1 +1,94 @@ # monitoring + +* Instructions to setup and run a Prometheus server and Grafana dashboard +* See [monitoring-watchers.md](./monitoring-watchers.md) for an example usage of the stack with pre-configured dashboards for watchers + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: + +```bash +laconic-so --stack monitoring deploy init --output monitoring-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host: + +``` +... +network: + ports: + prometheus: + - '9090:9090' + grafana: + - '3000:3000' +... +``` + +### Data volumes + +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: + +```bash +laconic-so --stack monitoring deploy create --spec-file monitoring-spec.yml --deployment-dir monitoring-deployment +``` + +## Configure + +### Prometheus Config + +Add desired scrape configs to prometheus config file (`monitoring-deployment/config/monitoring/prometheus/prometheus.yml`) in the deployment folder; for example: + + ```yml + ... + - job_name: + metrics_path: /metrics/path + scheme: http + static_configs: + - targets: [':'] + ``` + +Note: Use `host.docker.internal` as host to access ports on the host machine + +### Grafana Config + +Place the dashboard configs files (json) in grafana dashboards config directory (`monitoring-deployment/config/monitoring/grafana/dashboards`) in the deployment folder + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir monitoring-deployment start +``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + +* Grafana should now be visible at http://localhost:3000 with configured dashboards + +## Clean up + +To stop monitoring services running in the background, while preserving data: + +```bash +# Only stop the docker containers +laconic-so deployment --dir monitoring-deployment stop + +# Run 'start' to restart the deployment +``` + +To stop monitoring services and also delete data: + +```bash +# Stop the docker containers +laconic-so deployment --dir monitoring-deployment stop --delete-volumes + +# Remove deployment directory (deployment will have to be recreated for a re-run) +rm -rf monitoring-deployment +``` diff --git a/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md b/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md new file mode 100644 index 00000000..f49bdcda --- /dev/null +++ b/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md @@ -0,0 +1,124 @@ +# Monitoring Watchers + +Instructions to setup and run monitoring stack with pre-configured watcher dashboards + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: + +```bash +laconic-so --stack monitoring deploy init --output monitoring-watchers-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host: + +``` +... +network: + ports: + prometheus: + - '9090:9090' + grafana: + - '3000:3000' +... +``` + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: + +```bash +laconic-so --stack monitoring deploy create --spec-file monitoring-watchers-spec.yml --deployment-dir monitoring-watchers-deployment +``` + +## Configure + +### Prometheus Config + +Add the following scrape configs to prometheus config file (`monitoring-watchers-deployment/config/monitoring/prometheus/prometheus.yml`) in the deployment folder: + + ```yml + ... + - job_name: azimuth + metrics_path: /metrics + scheme: http + static_configs: + - targets: ['AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_PORT'] + labels: + instance: 'azimuth' + - targets: ['CENSURES_WATCHER_HOST:CENSURES_WATCHER_PORT'] + labels: + instance: 'censures' + - targets: ['CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_PORT'] + labels: + instance: 'claims' + - targets: ['CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_PORT'] + labels: + instance: 'conditional_star_release' + - targets: ['DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_PORT'] + labels: + instance: 'delegated_sending_watcher' + - targets: ['ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_PORT'] + labels: + instance: 'ecliptic' + - targets: ['LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_PORT'] + labels: + instance: 'linear_star_release' + - targets: ['POLLS_WATCHER_HOST:POLLS_WATCHER_PORT'] + labels: + instance: 'polls' + + - job_name: sushi + metrics_path: /metrics + scheme: http + static_configs: + - targets: ['SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_PORT'] + labels: + instance: 'sushiswap' + - targets: ['MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_PORT'] + labels: + instance: 'merkl_sushiswap' + ``` + +### Grafana Config + +In the deployment folder, copy over the pre-configured watcher dashboard JSON files to grafana dashboards config directory: + +```bash +cp -r monitoring-watchers-deployment/config/monitoring/grafana/watcher-dashboards/* monitoring-watchers-deployment/config/monitoring/grafana/dashboards/ +``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir monitoring-watchers-deployment start +``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + +* Grafana should now be visible at http://localhost:3000 with configured dashboards + +## Clean up + +To stop monitoring services running in the background, while preserving data: + +```bash +# Only stop the docker containers +laconic-so deployment --dir monitoring-watchers-deployment stop + +# Run 'start' to restart the deployment +``` + +To stop monitoring services and also delete data: + +```bash +# Stop the docker containers +laconic-so deployment --dir monitoring-watchers-deployment stop --delete-volumes + +# Remove deployment directory (deployment will have to be recreated for a re-run) +rm -rf monitoring-watchers-deployment +```