diff --git a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml
index b5e8d22c..4117c679 100644
--- a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml
+++ b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml
@@ -9,10 +9,6 @@ on:
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '2 14 * * *'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
-
jobs:
test:
diff --git a/.gitea/workflows/fixturenet-eth-plugeth-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-test.yml
index f9db5e86..1ac20e30 100644
--- a/.gitea/workflows/fixturenet-eth-plugeth-test.yml
+++ b/.gitea/workflows/fixturenet-eth-plugeth-test.yml
@@ -9,10 +9,6 @@ on:
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '2 14 * * *'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
-
jobs:
test:
@@ -41,10 +37,6 @@ jobs:
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth-plugeth/run-test.sh
- name: Notify Vulcanize Slack on CI failure
diff --git a/.gitea/workflows/fixturenet-eth-test.yml b/.gitea/workflows/fixturenet-eth-test.yml
index 671184a9..4d0f3503 100644
--- a/.gitea/workflows/fixturenet-eth-test.yml
+++ b/.gitea/workflows/fixturenet-eth-test.yml
@@ -7,10 +7,6 @@ on:
- '!**'
- '.gitea/workflows/triggers/fixturenet-eth-test'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
-
jobs:
test:
@@ -39,10 +35,6 @@ jobs:
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth/run-test.sh
- name: Notify Vulcanize Slack on CI failure
diff --git a/.gitea/workflows/test-deploy.yml b/.gitea/workflows/test-deploy.yml
index 426b629b..ca32b876 100644
--- a/.gitea/workflows/test-deploy.yml
+++ b/.gitea/workflows/test-deploy.yml
@@ -10,9 +10,6 @@ on:
paths-ignore:
- '.gitea/workflows/triggers/*'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
@@ -41,10 +38,6 @@ jobs:
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
diff --git a/.gitea/workflows/test-external-stack.yml b/.gitea/workflows/test-external-stack.yml
new file mode 100644
index 00000000..1d6794c5
--- /dev/null
+++ b/.gitea/workflows/test-external-stack.yml
@@ -0,0 +1,58 @@
+name: External Stack Test
+
+on:
+ push:
+ branches: '*'
+ paths:
+ - '!**'
+ - '.gitea/workflows/triggers/test-external-stack'
+ - '.gitea/workflows/test-external-stack.yml'
+ - 'tests/external-stack/run-test.sh'
+ schedule: # Note: coordinate with other tests to not overload runners at the same time of day
+ - cron: '8 19 * * *'
+
+jobs:
+ test:
+ name: "Run external stack test suite"
+ runs-on: ubuntu-latest
+ steps:
+ - name: "Clone project repository"
+ uses: actions/checkout@v3
+ # At present the stock setup-python action fails on Linux/aarch64
+ # Conditional steps below workaroud this by using deadsnakes for that case only
+ - name: "Install Python for ARM on Linux"
+ if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
+ uses: deadsnakes/action@v3.0.1
+ with:
+ python-version: '3.8'
+ - name: "Install Python cases other than ARM on Linux"
+ if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.8'
+ - name: "Print Python version"
+ run: python3 --version
+ - name: "Install shiv"
+ run: pip install shiv
+ - name: "Generate build version file"
+ run: ./scripts/create_build_tag_file.sh
+ - name: "Build local shiv package"
+ run: ./scripts/build_shiv_package.sh
+ - name: "Run external stack tests"
+ run: ./tests/external-stack/run-test.sh
+ - name: Notify Vulcanize Slack on CI failure
+ if: ${{ always() && github.ref_name == 'main' }}
+ uses: ravsamhq/notify-slack-action@v2
+ with:
+ status: ${{ job.status }}
+ notify_when: 'failure'
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
+ - name: Notify DeepStack Slack on CI failure
+ if: ${{ always() && github.ref_name == 'main' }}
+ uses: ravsamhq/notify-slack-action@v2
+ with:
+ status: ${{ job.status }}
+ notify_when: 'failure'
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml
index 708c6b3d..65c0c043 100644
--- a/.gitea/workflows/test-webapp.yml
+++ b/.gitea/workflows/test-webapp.yml
@@ -10,10 +10,6 @@ on:
paths-ignore:
- '.gitea/workflows/triggers/*'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
-
jobs:
test:
name: "Run webapp test suite"
@@ -43,10 +39,6 @@ jobs:
run: ./scripts/build_shiv_package.sh
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
run: apt update && apt install -y wget
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh
- name: Notify Vulcanize Slack on CI failure
diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml
index f017dc49..b92dfae1 100644
--- a/.gitea/workflows/test.yml
+++ b/.gitea/workflows/test.yml
@@ -10,9 +10,6 @@ on:
paths-ignore:
- '.gitea/workflows/triggers/*'
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
@@ -41,10 +38,6 @@ jobs:
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh
- name: Notify Vulcanize Slack on CI failure
diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test
index 10db7fd7..f377ae09 100644
--- a/.gitea/workflows/triggers/fixturenet-laconicd-test
+++ b/.gitea/workflows/triggers/fixturenet-laconicd-test
@@ -4,3 +4,4 @@ Trigger
Trigger
Trigger
Trigger
+Trigger
diff --git a/.gitea/workflows/triggers/test-external-stack b/.gitea/workflows/triggers/test-external-stack
new file mode 100644
index 00000000..a92eb34f
--- /dev/null
+++ b/.gitea/workflows/triggers/test-external-stack
@@ -0,0 +1,2 @@
+Change this file to trigger running the external-stack CI job
+trigger
diff --git a/setup.py b/setup.py
index 773451f5..ace0d536 100644
--- a/setup.py
+++ b/setup.py
@@ -4,9 +4,11 @@ with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("requirements.txt", "r", encoding="utf-8") as fh:
requirements = fh.read()
+with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
+ version = fh.readlines()[-1].strip(" \n")
setup(
name='laconic-stack-orchestrator',
- version='1.0.12',
+ version=version,
author='Cerc',
author_email='info@cerc.io',
license='GNU Affero General Public License',
diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py
index 71debf09..2b78306b 100644
--- a/stack_orchestrator/build/build_containers.py
+++ b/stack_orchestrator/build/build_containers.py
@@ -71,7 +71,7 @@ def process_container(build_context: BuildContext) -> bool:
# Check if this is in an external stack
if stack_is_external(build_context.stack):
- container_parent_dir = Path(build_context.stack).joinpath("container-build")
+ container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
# Now check if the container exists in the external stack.
diff --git a/stack_orchestrator/build/build_util.py b/stack_orchestrator/build/build_util.py
index 7eb89ba9..15be1f9b 100644
--- a/stack_orchestrator/build/build_util.py
+++ b/stack_orchestrator/build/build_util.py
@@ -21,11 +21,6 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
def get_containers_in_scope(stack: str):
- # See: https://stackoverflow.com/a/20885799/1701505
- from stack_orchestrator import data
- with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
- all_containers = container_list_file.read().splitlines()
-
containers_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
@@ -33,11 +28,14 @@ def get_containers_in_scope(stack: str):
warn_exit(f"stack {stack} does not define any containers")
containers_in_scope = stack_config['containers']
else:
- containers_in_scope = all_containers
+ # See: https://stackoverflow.com/a/20885799/1701505
+ from stack_orchestrator import data
+ with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
+ containers_in_scope = container_list_file.read().splitlines()
if opts.o.verbose:
print(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
- return containers_in_scope
\ No newline at end of file
+ return containers_in_scope
diff --git a/stack_orchestrator/data/compose/docker-compose-grafana.yml b/stack_orchestrator/data/compose/docker-compose-grafana.yml
index d559b246..504c8d8d 100644
--- a/stack_orchestrator/data/compose/docker-compose-grafana.yml
+++ b/stack_orchestrator/data/compose/docker-compose-grafana.yml
@@ -6,12 +6,20 @@ services:
restart: always
environment:
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
+ CERC_GRAFANA_ALERTS_SUBGRAPH_IDS: ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
volumes:
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
+ - ../config/monitoring/update-grafana-alerts-config.sh:/update-grafana-alerts-config.sh
- grafana_storage:/var/lib/grafana
+ user: root
+ entrypoint: ["bash", "-c"]
+ command: |
+ "/update-grafana-alerts-config.sh && /run.sh"
ports:
- "3000"
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3000"]
interval: 30s
diff --git a/stack_orchestrator/data/compose/docker-compose-graph-node.yml b/stack_orchestrator/data/compose/docker-compose-graph-node.yml
index e35ff494..023fe03b 100644
--- a/stack_orchestrator/data/compose/docker-compose-graph-node.yml
+++ b/stack_orchestrator/data/compose/docker-compose-graph-node.yml
@@ -16,8 +16,13 @@ services:
postgres_pass: password
postgres_db: graph-node
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
+ # Env varaibles reference: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
GRAPH_LOG: debug
ETHEREUM_REORG_THRESHOLD: 3
+ GRAPH_ETHEREUM_JSON_RPC_TIMEOUT: ${GRAPH_ETHEREUM_JSON_RPC_TIMEOUT:-180}
+ GRAPH_ETHEREUM_REQUEST_RETRIES: ${GRAPH_ETHEREUM_REQUEST_RETRIES:-10}
+ GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE: ${GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE:-2000}
+ GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS: ${GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS:-1000}
entrypoint: ["bash", "-c"]
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
command: |
@@ -27,6 +32,7 @@ services:
- "8001"
- "8020"
- "8030"
+ - "8040"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "8020"]
interval: 30s
diff --git a/stack_orchestrator/data/compose/docker-compose-prom-server.yml b/stack_orchestrator/data/compose/docker-compose-prom-server.yml
index 9095b6dc..594c48f0 100644
--- a/stack_orchestrator/data/compose/docker-compose-prom-server.yml
+++ b/stack_orchestrator/data/compose/docker-compose-prom-server.yml
@@ -28,15 +28,37 @@ services:
extra_hosts:
- "host.docker.internal:host-gateway"
- chain-head-exporter:
+ ethereum-chain-head-exporter:
image: cerc/watcher-ts:local
restart: always
working_dir: /app/packages/cli
environment:
- ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
- FIL_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT}
+ ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT:-https://mainnet.infura.io/v3}
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
- PORT: ${CERC_METRICS_PORT}
+ command: ["sh", "-c", "yarn export-metrics:chain-heads"]
+ ports:
+ - '5000'
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+ filecoin-chain-head-exporter:
+ image: cerc/watcher-ts:local
+ restart: always
+ working_dir: /app/packages/cli
+ environment:
+ ETH_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT:-https://api.node.glif.io/rpc/v1}
+ command: ["sh", "-c", "yarn export-metrics:chain-heads"]
+ ports:
+ - '5000'
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+ graph-node-upstream-head-exporter:
+ image: cerc/watcher-ts:local
+ restart: always
+ working_dir: /app/packages/cli
+ environment:
+ ETH_RPC_ENDPOINT: ${GRAPH_NODE_RPC_ENDPOINT}
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
ports:
- '5000'
diff --git a/stack_orchestrator/data/compose/docker-compose-snowballtools-base-backend.yml b/stack_orchestrator/data/compose/docker-compose-snowballtools-base-backend.yml
deleted file mode 100644
index 3445ed9d..00000000
--- a/stack_orchestrator/data/compose/docker-compose-snowballtools-base-backend.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-services:
- snowballtools-base-backend:
- image: cerc/snowballtools-base-backend:local
- restart: always
- volumes:
- - data:/data
- - config:/config:ro
- ports:
- - 8000
-
-volumes:
- data:
- config:
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml b/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml
index b3fcaab5..84291ec4 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml
@@ -29,7 +29,7 @@ services:
image: cerc/watcher-ajna:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-job-runner.sh"]
volumes:
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
@@ -37,7 +37,7 @@ services:
ports:
- "9000"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "9000"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
interval: 20s
timeout: 5s
retries: 15
@@ -55,16 +55,17 @@ services:
image: cerc/watcher-ajna:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-server.sh"]
volumes:
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
+ - ajna_watcher_gql_logs_data:/app/gql-logs
ports:
- "3008"
- "9001"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "3008"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
interval: 20s
timeout: 5s
retries: 15
@@ -74,3 +75,4 @@ services:
volumes:
ajna_watcher_db_data:
+ ajna_watcher_gql_logs_data:
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml
index 48e77082..5bbac851 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml
@@ -32,8 +32,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CERC_HISTORICAL_BLOCK_RANGE: 500
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
CONTRACT_NAME: Azimuth
@@ -47,7 +47,7 @@ services:
ports:
- "9000"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9000"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
interval: 20s
timeout: 5s
retries: 15
@@ -66,18 +66,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/azimuth-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
+ - azimuth_watcher_gql_logs_data:/app/packages/azimuth-watcher/gql-logs
ports:
- "3001"
+ - "9001"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3001"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3001"]
interval: 20s
timeout: 5s
retries: 15
@@ -94,8 +96,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
CONTRACT_NAME: Censures
STARTING_BLOCK: 6784954
@@ -108,7 +110,7 @@ services:
ports:
- "9002"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9002"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9002"]
interval: 20s
timeout: 5s
retries: 15
@@ -127,18 +129,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/censures-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
+ - censures_watcher_gql_logs_data:/app/packages/censures-watcher/gql-logs
ports:
- "3002"
+ - "9003"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3002"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3002"]
interval: 20s
timeout: 5s
retries: 15
@@ -155,8 +159,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
CONTRACT_NAME: Claims
STARTING_BLOCK: 6784941
@@ -169,7 +173,7 @@ services:
ports:
- "9004"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9004"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9004"]
interval: 20s
timeout: 5s
retries: 15
@@ -188,18 +192,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/claims-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
+ - claims_watcher_gql_logs_data:/app/packages/claims-watcher/gql-logs
ports:
- "3003"
+ - "9005"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3003"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3003"]
interval: 20s
timeout: 5s
retries: 15
@@ -216,8 +222,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
CONTRACT_NAME: ConditionalStarRelease
STARTING_BLOCK: 6828004
@@ -230,7 +236,7 @@ services:
ports:
- "9006"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9006"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9006"]
interval: 20s
timeout: 5s
retries: 15
@@ -249,18 +255,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/conditional-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
+ - conditional_star_release_watcher_gql_logs_data:/app/packages/conditional-star-release-watcher/gql-logs
ports:
- "3004"
+ - "9007"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3004"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3004"]
interval: 20s
timeout: 5s
retries: 15
@@ -277,8 +285,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
CONTRACT_NAME: DelegatedSending
STARTING_BLOCK: 6784956
@@ -291,7 +299,7 @@ services:
ports:
- "9008"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9008"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9008"]
interval: 20s
timeout: 5s
retries: 15
@@ -310,18 +318,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/delegated-sending-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
+ - delegated_sending_watcher_gql_logs_data:/app/packages/delegated-sending-watcher/gql-logs
ports:
- "3005"
+ - "9009"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3005"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3005"]
interval: 20s
timeout: 5s
retries: 15
@@ -338,8 +348,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
CONTRACT_NAME: Ecliptic
STARTING_BLOCK: 13692129
@@ -352,7 +362,7 @@ services:
ports:
- "9010"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9010"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9010"]
interval: 20s
timeout: 5s
retries: 15
@@ -371,18 +381,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/ecliptic-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
+ - ecliptic_watcher_gql_logs_data:/app/packages/ecliptic-watcher/gql-logs
ports:
- "3006"
+ - "9011"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3006"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3006"]
interval: 20s
timeout: 5s
retries: 15
@@ -399,8 +411,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
CONTRACT_NAME: LinearStarRelease
STARTING_BLOCK: 6784943
@@ -413,7 +425,7 @@ services:
ports:
- "9012"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9012"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9012"]
interval: 20s
timeout: 5s
retries: 15
@@ -432,18 +444,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/linear-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
+ - linear_star_release_watcher_gql_logs_data:/app/packages/linear-star-release-watcher/gql-logs
ports:
- "3007"
+ - "9013"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3007"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3007"]
interval: 20s
timeout: 5s
retries: 15
@@ -460,8 +474,8 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
CONTRACT_NAME: Polls
STARTING_BLOCK: 6784912
@@ -474,7 +488,7 @@ services:
ports:
- "9014"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "9014"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9014"]
interval: 20s
timeout: 5s
retries: 15
@@ -493,18 +507,20 @@ services:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
- CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
+ CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
working_dir: /app/packages/polls-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
+ - polls_watcher_gql_logs_data:/app/packages/polls-watcher/gql-logs
ports:
- "3008"
+ - "9015"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "3008"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
interval: 20s
timeout: 5s
retries: 15
@@ -542,7 +558,7 @@ services:
ports:
- "0.0.0.0:4000:4000"
healthcheck:
- test: ["CMD", "nc", "-vz", "localhost", "4000"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "4000"]
interval: 20s
timeout: 5s
retries: 15
@@ -552,3 +568,11 @@ services:
volumes:
watcher_db_data:
+ azimuth_watcher_gql_logs_data:
+ censures_watcher_gql_logs_data:
+ claims_watcher_gql_logs_data:
+ conditional_star_release_watcher_gql_logs_data:
+ delegated_sending_watcher_gql_logs_data:
+ ecliptic_watcher_gql_logs_data:
+ linear_star_release_watcher_gql_logs_data:
+ polls_watcher_gql_logs_data:
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
index aae7bb47..6a446b1f 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
@@ -29,7 +29,7 @@ services:
image: cerc/watcher-merkl-sushiswap-v3:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-job-runner.sh"]
volumes:
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
@@ -37,7 +37,7 @@ services:
ports:
- "9002:9000"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "9000"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
interval: 20s
timeout: 5s
retries: 15
@@ -55,16 +55,17 @@ services:
image: cerc/watcher-merkl-sushiswap-v3:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-server.sh"]
volumes:
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
+ - merkl_sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
ports:
- "127.0.0.1:3007:3008"
- "9003:9001"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "3008"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
interval: 20s
timeout: 5s
retries: 15
@@ -74,3 +75,4 @@ services:
volumes:
merkl_sushiswap_v3_watcher_db_data:
+ merkl_sushiswap_v3_watcher_gql_logs_data:
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
index 6c39320c..5cb6176f 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
@@ -29,7 +29,7 @@ services:
image: cerc/watcher-sushiswap-v3:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-job-runner.sh"]
volumes:
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
@@ -37,7 +37,7 @@ services:
ports:
- "9000:9000"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "9000"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
interval: 20s
timeout: 5s
retries: 15
@@ -55,16 +55,17 @@ services:
image: cerc/watcher-sushiswap-v3:local
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
- CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
+ CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
command: ["bash", "./start-server.sh"]
volumes:
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
+ - sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
ports:
- "127.0.0.1:3008:3008"
- "9001:9001"
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "3008"]
+ test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
interval: 20s
timeout: 5s
retries: 15
@@ -74,3 +75,4 @@ services:
volumes:
sushiswap_v3_watcher_db_data:
+ sushiswap_v3_watcher_gql_logs_data:
diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/subgraphs-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/dashboards/subgraphs-dashboard.json
new file mode 100644
index 00000000..2bdf04c8
--- /dev/null
+++ b/stack_orchestrator/data/config/monitoring/grafana/dashboards/subgraphs-dashboard.json
@@ -0,0 +1,1904 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 39,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Head block number for a deployment",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 0
+ },
+ "id": 62,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "deployment_head{deployment=~\"$subgraph_hash\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Latest indexed block",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Upstream head from graph-node ETH RPC endpoint",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 0
+ },
+ "id": 12,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"filecoin\", instance=\"graph-node\", exported_chain=\"\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Upstream head",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Chain head from an external public endpoint",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 8,
+ "y": 0
+ },
+ "id": 65,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"filecoin\", instance=\"external\", exported_chain=\"\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "External head (filecoin)",
+ "type": "stat"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 7,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.2.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/eth_getLogs/",
+ "color": "#5794F2"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "deployment_eth_rpc_request_duration{deployment=\"$subgraph_hash\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "legendFormat": "{{method}}, {{provider}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "ETH RPC Request Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Block number of the most recent block synced from Ethereum",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 3
+ },
+ "id": 25,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "ethereum_chain_head_number",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Graph Node Head",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 8
+ },
+ {
+ "color": "red",
+ "value": 16
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 4,
+ "x": 4,
+ "y": 3
+ },
+ "id": 24,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 25
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "deployment_head{deployment=~\"$subgraph_hash\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"filecoin\", instance=\"graph-node\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "diff_upstream",
+ "transformations": [
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "",
+ "binary": {
+ "left": "latest_block_number",
+ "operator": "-",
+ "right": "deployment_head"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": true
+ }
+ }
+ ],
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 8
+ },
+ {
+ "color": "red",
+ "value": 16
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 4,
+ "x": 8,
+ "y": 3
+ },
+ "id": 66,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 25
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "deployment_head{deployment=~\"$subgraph_hash\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"filecoin\", instance=\"external\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "diff_external",
+ "transformations": [
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "",
+ "binary": {
+ "left": "latest_block_number",
+ "operator": "-",
+ "right": "deployment_head"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": true
+ }
+ }
+ ],
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Block number of the latest block currently present in the chain head cache",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 5
+ },
+ "id": 63,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "chain_head_cache_latest_block",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Chain Cache Latest",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "semi-dark-green",
+ "index": 2,
+ "text": "SUCCESS"
+ },
+ "1": {
+ "color": "semi-dark-red",
+ "index": 1,
+ "text": "FAILED"
+ }
+ },
+ "type": "value"
+ },
+ {
+ "options": {
+ "match": "null+nan",
+ "result": {
+ "color": "orange",
+ "index": 0,
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 8,
+ "y": 5
+ },
+ "id": 20,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "deployment_failed{deployment=~\"$subgraph_hash\",}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Deployment Status",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 8
+ },
+ {
+ "color": "red",
+ "value": 16
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 4,
+ "x": 0,
+ "y": 6
+ },
+ "id": 22,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "text": {
+ "valueSize": 25
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "deployment_head{deployment=~\"$subgraph_hash\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "ethereum_chain_head_number",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "diff_node",
+ "transformations": [
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "",
+ "binary": {
+ "left": "ethereum_chain_head_number",
+ "operator": "-",
+ "right": "deployment_head"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": true
+ }
+ }
+ ],
+ "type": "stat"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 7,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "hiddenSeries": false,
+ "id": 34,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.2.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/value/",
+ "color": "#FFB357"
+ },
+ {}
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "deployment_block_processing_duration_sum{deployment=\"$subgraph_hash\"}",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": " ",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Block Processing Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Rate of successful ETH RPC requests",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "id": 28,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "rate(endpoint_request{conn_type=~\"rpc\", result=~\"success\"}[$__rate_interval])",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{provider}}, {{req_type}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "ETH RPC successful requests rate",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Rate of failed ETH RPC requests",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "id": 64,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "rate(endpoint_request{conn_type=~\"rpc\", result=~\"failure\"}[$__rate_interval])",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{provider}}, {{req_type}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "ETH RPC failed requests rate",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 32,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "
\n GraphQL Server
\n",
+ "mode": "html"
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "refId": "A"
+ }
+ ],
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "cards": {},
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolatePlasma",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 24
+ },
+ "heatmap": {},
+ "hideZeroBuckets": true,
+ "highlightCards": true,
+ "id": 29,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Plasma",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "showColorScale": false,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false,
+ "unit": "short"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeatDirection": "v",
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(query_execution_time_bucket{deployment=\"[[subgraph_hash]]\"}[1m])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Query Execution Time Histogram ([[subgraph_name]])",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "yAxis": {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ "yBucketBound": "auto"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineWidth": 1
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 24
+ },
+ "id": 26,
+ "links": [],
+ "options": {
+ "bucketOffset": 0,
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeat": "subgraph_hash",
+ "repeatDirection": "v",
+ "targets": [
+ {
+ "datasource": {
+ "uid": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(query_execution_time_bucket{deployment=\"[[subgraph_hash]]\"}[1m])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Query Execution Time Histogram ([[subgraph_name]])",
+ "type": "histogram"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "locale"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 24
+ },
+ "id": 36,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeatDirection": "v",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum(increase(query_execution_time_count{deployment=\"$subgraph_hash\"}[1m]))",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{subgraph_deployment}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Queries ([[subgraph_name]])",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "Queries per minute per minute",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "locale"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "id": 27,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeatDirection": "v",
+ "targets": [
+ {
+ "datasource": {
+ "uid": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(query_execution_time_count[1m])) by (deployment)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{deployment}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Query Rate (All Subgraphs)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 80,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "locale"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "id": 67,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "sum"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeatDirection": "v",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(query_execution_time_count[1m])) by (deployment)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{deployment}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Queries (All Subgraphs)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 70,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "locale"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 40
+ },
+ "id": 41,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "repeatDirection": "v",
+ "targets": [
+ {
+ "datasource": {
+ "uid": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "increase(query_execution_time_count{status=\"failed\"}[1m])",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{deployment}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Failed Queries (All Subgraphs)",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "10s",
+ "schemaVersion": 39,
+ "tags": [
+ "graph-node",
+ "subgraph"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "isNone": true,
+ "selected": false,
+ "value": ""
+ },
+ "datasource": {
+ "type": "grafana-postgresql-datasource",
+ "uid": "PA8D093265C513DCC"
+ },
+ "definition": "SELECT name\nFROM subgraphs.subgraph\nWHERE current_version IS NOT NULL;",
+ "description": "Name of deployed subgraph",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Subgraph Name",
+ "multi": false,
+ "name": "subgraph_name",
+ "options": [],
+ "query": "SELECT name\nFROM subgraphs.subgraph\nWHERE current_version IS NOT NULL;",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "type": "query"
+ },
+ {
+ "datasource": {
+ "type": "grafana-postgresql-datasource",
+ "uid": "PA8D093265C513DCC"
+ },
+ "definition": "SELECT deployment\nFROM subgraphs.subgraph_version\n JOIN subgraphs.subgraph ON subgraph.current_version = subgraph_version.id\nWHERE subgraph.name = '$subgraph_name';",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Subgraph Deployment",
+ "multi": false,
+ "name": "subgraph_hash",
+ "query": "SELECT deployment\nFROM subgraphs.subgraph_version\n JOIN subgraphs.subgraph ON subgraph.current_version = subgraph_version.id\nWHERE subgraph.name = '$subgraph_name';",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Graph Node Subgraphs",
+ "uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
+ "version": 18,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/stack_orchestrator/data/config/monitoring/grafana/dashboards/watcher-dashboard.json b/stack_orchestrator/data/config/monitoring/grafana/dashboards/watcher-dashboard.json
index fe5fd244..f93bbe36 100644
--- a/stack_orchestrator/data/config/monitoring/grafana/dashboards/watcher-dashboard.json
+++ b/stack_orchestrator/data/config/monitoring/grafana/dashboards/watcher-dashboard.json
@@ -18,7 +18,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 4,
+ "id": 2,
"links": [
{
"asDropdown": false,
@@ -55,7 +55,7 @@
"x": 0,
"y": 0
},
- "id": 1,
+ "id": 29,
"panels": [
{
"datasource": {
@@ -65,15 +65,21 @@
"fieldConfig": {
"defaults": {
"color": {
- "fixedColor": "blue",
- "mode": "shades"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "yellow",
"value": null
}
]
@@ -83,27 +89,25 @@
},
"gridPos": {
"h": 3,
- "w": 4,
+ "w": 12,
"x": 0,
"y": 1
},
- "id": 25,
+ "id": 34,
"options": {
- "colorMode": "background",
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "enablePagination": false,
"fields": "",
- "values": false
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "textMode": "auto",
- "wideLayout": true
+ "showHeader": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -111,158 +115,44 @@
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
- "editorMode": "builder",
- "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_indexed\"}",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "Latest indexed block",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "description": "Chain head block number of the upstream endpoint",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "blue",
- "mode": "shades"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 4,
- "x": 4,
- "y": 1
- },
- "id": 11,
- "options": {
- "colorMode": "background",
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto",
- "wideLayout": true
- },
- "pluginVersion": "10.2.2",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "latest_upstream_block_number{job=~\"$job\", instance=~\"$watcher\"}",
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "max(watcher_info{job=\"$job\", instance=\"$watcher\"}) by (repository, version, commitHash)",
+ "format": "table",
"fullMetaSearch": false,
"includeNullMetadata": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
+ "instant": true,
+ "legendFormat": "",
+ "range": false,
"refId": "A",
"useBackend": false
}
],
- "title": "Upstream head",
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "description": "Chain head from an external public endpoint",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "blue",
- "mode": "shades"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 4,
- "x": 8,
- "y": 1
- },
- "id": 12,
- "options": {
- "colorMode": "background",
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto",
- "wideLayout": true
- },
- "pluginVersion": "10.2.2",
- "targets": [
+ "transformations": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "latest_block_number{job=\"chain_heads\", chain=\"$target_chain\"}",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Time": true,
+ "Value": true
+ },
+ "includeByName": {},
+ "indexByName": {
+ "Time": 0,
+ "Value": 4,
+ "commitHash": 3,
+ "repository": 1,
+ "version": 2
+ },
+ "renameByName": {
+ "commitHash": "Commit hash",
+ "repository": "Repository",
+ "version": "Release"
+ }
+ }
}
],
- "title": "External head ($target_chain)",
- "type": "stat"
+ "type": "table"
},
{
"datasource": {
@@ -325,7 +215,7 @@
"overrides": []
},
"gridPos": {
- "h": 6,
+ "h": 7,
"w": 12,
"x": 12,
"y": 1
@@ -366,6 +256,282 @@
"title": "Last block process duration (s)",
"type": "timeseries"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 0,
+ "y": 4
+ },
+ "id": 25,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_processed\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Event processed block",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 3,
+ "y": 4
+ },
+ "id": 38,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_indexed\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Inserted processed block",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Chain head block number of the upstream endpoint",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 6,
+ "y": 4
+ },
+ "id": 11,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "latest_upstream_block_number{job=~\"$job\", instance=~\"$watcher\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Upstream head",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Chain head from an external public endpoint",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "shades"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 9,
+ "y": 4
+ },
+ "id": 12,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"$target_chain\", instance=\"external\", exported_chain=\"\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "External head ($target_chain)",
+ "type": "stat"
+ },
{
"datasource": {
"type": "prometheus",
@@ -420,7 +586,7 @@
"h": 2.5,
"w": 4,
"x": 0,
- "y": 4
+ "y": 7
},
"id": 20,
"options": {
@@ -438,7 +604,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -497,7 +663,7 @@
"h": 2,
"w": 4,
"x": 4,
- "y": 4
+ "y": 7
},
"id": 22,
"options": {
@@ -518,7 +684,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -542,7 +708,7 @@
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
- "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_indexed\"}",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_processed\"}",
"hide": false,
"instant": false,
"legendFormat": "{{__name__}}",
@@ -608,7 +774,7 @@
"h": 2,
"w": 4,
"x": 8,
- "y": 4
+ "y": 7
},
"id": 24,
"options": {
@@ -629,7 +795,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -638,7 +804,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "latest_block_number{job=\"chain_heads\", chain=\"$target_chain\"}",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"$target_chain\", instance=\"external\"}",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
@@ -653,7 +819,7 @@
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
- "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_indexed\"}",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_processed\"}",
"hide": false,
"instant": false,
"legendFormat": "{{__name__}}",
@@ -682,6 +848,107 @@
],
"type": "stat"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "orange",
+ "mode": "shades"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 8
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "last_block_num_events_total{job=~\"$job\", instance=~\"$watcher\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Number of events in the last block",
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "prometheus",
@@ -711,7 +978,7 @@
"h": 3,
"w": 8,
"x": 4,
- "y": 6
+ "y": 9
},
"id": 13,
"options": {
@@ -730,7 +997,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -738,12 +1005,12 @@
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
- "editorMode": "builder",
- "expr": "last_processed_block_number{job=~\"$job\", instance=~\"$watcher\"}",
+ "editorMode": "code",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_processed\"}",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
- "legendFormat": "latest_processed",
+ "legendFormat": "{{kind}}",
"range": true,
"refId": "A",
"useBackend": false
@@ -754,7 +1021,7 @@
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
- "editorMode": "builder",
+ "editorMode": "code",
"expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_canonical\"}",
"fullMetaSearch": false,
"hide": false,
@@ -836,7 +1103,7 @@
"h": 2.5,
"w": 4,
"x": 0,
- "y": 6.5
+ "y": 9.5
},
"id": 16,
"options": {
@@ -854,7 +1121,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -876,107 +1143,6 @@
"title": "Watcher sync mode",
"type": "stat"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "orange",
- "mode": "shades"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 6,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "10.2.2",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "PBFA97CFB590B2093"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "last_block_num_events_total{job=~\"$job\", instance=~\"$watcher\"}",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{__name__}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Number of events in the last block",
- "type": "timeseries"
- },
{
"datasource": {
"type": "prometheus",
@@ -1006,7 +1172,7 @@
"h": 3,
"w": 6,
"x": 0,
- "y": 9
+ "y": 12
},
"id": 7,
"options": {
@@ -1025,7 +1191,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -1092,7 +1258,7 @@
"h": 3,
"w": 6,
"x": 6,
- "y": 9
+ "y": 12
},
"id": 4,
"options": {
@@ -1111,7 +1277,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -1146,6 +1312,230 @@
"title": "DB size (MB)",
"type": "stat"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "dark-green",
+ "value": null
+ },
+ {
+ "color": "dark-orange",
+ "value": 8
+ },
+ {
+ "color": "dark-red",
+ "value": 16
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "diff_external"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "thresholds",
+ "seriesBy": "last"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "diff_external"
+ },
+ "properties": [
+ {
+ "id": "thresholds",
+ "value": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 8
+ },
+ {
+ "color": "red",
+ "value": 16
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 15
+ },
+ "id": 33,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "latest_upstream_block_number{job=~\"$job\", instance=~\"$watcher\"}",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "sync_status_block_number{job=~\"$job\", instance=~\"$watcher\", kind=\"latest_processed\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "latest_block_number{job=\"chain_heads\", chain=\"$target_chain\", instance=\"external\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "diff",
+ "transformations": [
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "diff_upstream",
+ "binary": {
+ "left": "latest_upstream_block_number",
+ "operator": "-",
+ "right": "sync_status_block_number"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": false
+ }
+ },
+ {
+ "id": "calculateField",
+ "options": {
+ "alias": "diff_external",
+ "binary": {
+ "left": "latest_block_number",
+ "operator": "-",
+ "right": "sync_status_block_number"
+ },
+ "mode": "binary",
+ "reduce": {
+ "reducer": "sum"
+ },
+ "replaceFields": false
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Time": false,
+ "latest_block_number": true,
+ "latest_upstream_block_number": true,
+ "sync_status_block_number": true
+ },
+ "includeByName": {},
+ "indexByName": {},
+ "renameByName": {}
+ }
+ }
+ ],
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "prometheus",
@@ -1208,9 +1598,9 @@
},
"gridPos": {
"h": 6,
- "w": 12,
- "x": 0,
- "y": 12
+ "w": 8,
+ "x": 8,
+ "y": 15
},
"id": 5,
"options": {
@@ -1310,9 +1700,9 @@
},
"gridPos": {
"h": 6,
- "w": 12,
- "x": 12,
- "y": 12
+ "w": 8,
+ "x": 16,
+ "y": 15
},
"id": 15,
"options": {
@@ -1350,6 +1740,712 @@
"title": "Num event-processing jobs",
"type": "timeseries"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Total number of failed ETH RPC requests by method and provider endpoint ",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 21
+ },
+ "id": 28,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "watcher_eth_rpc_errors{job=~\"$job\", instance=~\"$watcher\"}",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{method}}, {{provider}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "ETH RPC request failures",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Time taken by last ETH RPC requests by method and provider endpoint",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 21
+ },
+ "id": 26,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "watcher_eth_rpc_request_duration{job=~\"$job\", instance=~\"$watcher\"}",
+ "instant": false,
+ "legendFormat": "{{method}}, {{provider}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "ETH RPC request durations",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Total number of failed ETH RPC requests by method and provider endpoint (across all watchers)",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 27
+ },
+ "id": 30,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (method, provider) (watcher_eth_rpc_errors{chain=\"$target_chain\"})",
+ "format": "time_series",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{method}}, {{provider}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Total ETH RPC request failures (across all watchers)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "Configured upstream ETH RPC endpoints",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": false,
+ "inspect": false
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "red",
+ "index": 0,
+ "text": "inactive"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "active"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "blue",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "status"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "type": "color-text"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 27
+ },
+ "id": 32,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "enablePagination": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "frameIndex": 0,
+ "showHeader": true,
+ "sortBy": [
+ {
+ "desc": true,
+ "displayName": "status"
+ }
+ ]
+ },
+ "pluginVersion": "10.2.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "watcher_config_upstream_endpoints{job=~\"$job\", instance=~\"$watcher\"}",
+ "format": "table",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Configured upstream endpoints",
+ "transformations": [
+ {
+ "id": "labelsToFields",
+ "options": {
+ "mode": "columns"
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {
+ "Time": true,
+ "__name__": true,
+ "chain": true,
+ "instance": true,
+ "job": true
+ },
+ "includeByName": {},
+ "indexByName": {},
+ "renameByName": {
+ "Value": "status"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 33
+ },
+ "id": 35,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "gql_query_count_total{job=~\"$job\", instance=~\"$watcher\"}",
+ "instant": false,
+ "legendFormat": "{{__name__}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Total GQL query count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 33
+ },
+ "id": 36,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "expr": "gql_query_count{job=~\"$job\", instance=~\"$watcher\"}",
+ "instant": false,
+ "legendFormat": "{{name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GQL query count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 33
+ },
+ "id": 37,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "gql_query_duration_seconds{job=~\"$job\", instance=~\"$watcher\"}",
+ "instant": false,
+ "legendFormat": "{{name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GQL queries duration",
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "prometheus",
@@ -1377,7 +2473,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -1388,7 +2485,7 @@
"h": 5,
"w": 8,
"x": 0,
- "y": 18
+ "y": 39
},
"id": 18,
"options": {
@@ -1410,7 +2507,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -1522,7 +2619,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -1533,7 +2631,7 @@
"h": 5,
"w": 8,
"x": 8,
- "y": 18
+ "y": 39
},
"id": 17,
"options": {
@@ -1555,7 +2653,7 @@
"textMode": "value_and_name",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -1669,7 +2767,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -1680,7 +2779,7 @@
"h": 5,
"w": 8,
"x": 16,
- "y": 18
+ "y": 39
},
"id": 19,
"options": {
@@ -1702,7 +2801,7 @@
"textMode": "auto",
"wideLayout": true
},
- "pluginVersion": "10.2.2",
+ "pluginVersion": "10.2.3",
"targets": [
{
"datasource": {
@@ -1792,7 +2891,6 @@
"uid": "PBFA97CFB590B2093"
},
"description": "Total number of subgraph entities loaded in event processing",
- "hide": true,
"fieldConfig": {
"defaults": {
"color": {
@@ -1836,7 +2934,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -1847,8 +2946,9 @@
"h": 6,
"w": 8,
"x": 0,
- "y": 23
+ "y": 44
},
+ "hide": true,
"id": 8,
"options": {
"legend": {
@@ -1932,7 +3032,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -1943,7 +3044,7 @@
"h": 6,
"w": 8,
"x": 8,
- "y": 23
+ "y": 44
},
"id": 9,
"options": {
@@ -2028,7 +3129,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
}
@@ -2039,7 +3141,7 @@
"h": 6,
"w": 8,
"x": 16,
- "y": 23
+ "y": 44
},
"id": 10,
"options": {
@@ -2084,7 +3186,7 @@
}
],
"refresh": "10s",
- "schemaVersion": 38,
+ "schemaVersion": 39,
"tags": [
"watcher"
],
@@ -2182,6 +3284,6 @@
"timepicker": {},
"timezone": "",
"title": "Watchers",
- "version": 1,
+ "version": 5,
"weekStart": ""
}
diff --git a/stack_orchestrator/data/config/monitoring/grafana/provisioning/datasources/graph-node-postgres.yml b/stack_orchestrator/data/config/monitoring/grafana/provisioning/datasources/graph-node-postgres.yml
new file mode 100644
index 00000000..d1604b06
--- /dev/null
+++ b/stack_orchestrator/data/config/monitoring/grafana/provisioning/datasources/graph-node-postgres.yml
@@ -0,0 +1,20 @@
+apiVersion: 1
+
+datasources:
+ - name: Graph Node Postgres
+ type: postgres
+ jsonData:
+ database: graph-node
+ sslmode: 'disable'
+ maxOpenConns: 100
+ maxIdleConns: 100
+ maxIdleConnsAuto: true
+ connMaxLifetime: 14400
+ postgresVersion: 1411 # 903=9.3, 1000=10, 1411=14.11
+ timescaledb: false
+ user: graph-node
+ # # Add URL for graph-node database
+ # url: graph-node-db:5432
+ # # Set password for graph-node database
+ # secureJsonData:
+ # password: 'password'
diff --git a/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml b/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml
index 8229ff53..dea7052d 100644
--- a/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml
+++ b/stack_orchestrator/data/config/monitoring/prometheus/prometheus.yml
@@ -45,7 +45,18 @@ scrape_configs:
metrics_path: /metrics
scheme: http
static_configs:
- - targets: ['chain-head-exporter:5000']
+ - targets: ['ethereum-chain-head-exporter:5000']
+ labels:
+ instance: 'external'
+ chain: 'ethereum'
+ - targets: ['filecoin-chain-head-exporter:5000']
+ labels:
+ instance: 'external'
+ chain: 'filecoin'
+ - targets: ['graph-node-upstream-head-exporter:5000']
+ labels:
+ instance: 'graph-node'
+ chain: 'filecoin'
- job_name: 'postgres'
scrape_interval: 30s
@@ -74,3 +85,11 @@ scrape_configs:
# - targets: ['example-host:1317']
params:
format: ['prometheus']
+
+ - job_name: graph-node
+ metrics_path: /metrics
+ scrape_interval: 30s
+ scheme: http
+ static_configs:
+ # Add graph-node targets to be monitored below
+ # - targets: ['graph-node:8040']
diff --git a/stack_orchestrator/data/config/monitoring/subgraph-alert-rules.yml b/stack_orchestrator/data/config/monitoring/subgraph-alert-rules.yml
new file mode 100644
index 00000000..ed59e8ef
--- /dev/null
+++ b/stack_orchestrator/data/config/monitoring/subgraph-alert-rules.yml
@@ -0,0 +1,64 @@
+apiVersion: 1
+groups:
+ - orgId: 1
+ name: subgraph
+ folder: SubgraphAlerts
+ interval: 30s
+ rules:
+ - uid: b2a9144b-6104-46fc-92b5-352f4e643c4c
+ title: subgraph_head_tracking
+ condition: condition
+ data:
+ - refId: diff
+ relativeTimeRange:
+ from: 600
+ to: 0
+ datasourceUid: PBFA97CFB590B2093
+ model:
+ datasource:
+ type: prometheus
+ uid: PBFA97CFB590B2093
+ editorMode: code
+ expr: ethereum_chain_head_number - on(network) group_right deployment_head{deployment=~"REPLACE_WITH_SUBGRAPH_IDS"}
+ instant: true
+ intervalMs: 1000
+ legendFormat: __auto
+ maxDataPoints: 43200
+ range: false
+ refId: diff
+ - refId: condition
+ relativeTimeRange:
+ from: 600
+ to: 0
+ datasourceUid: __expr__
+ model:
+ conditions:
+ - evaluator:
+ params:
+ - 15
+ - 0
+ type: gt
+ operator:
+ type: and
+ query:
+ params: []
+ reducer:
+ params: []
+ type: avg
+ type: query
+ datasource:
+ name: Expression
+ type: __expr__
+ uid: __expr__
+ expression: diff
+ intervalMs: 1000
+ maxDataPoints: 43200
+ refId: condition
+ type: threshold
+ noDataState: OK
+ execErrState: Alerting
+ for: 5m
+ annotations:
+ summary: Subgraph deployment {{ index $labels "deployment" }} is falling behind head by {{ index $values "diff" }}
+ labels: {}
+ isPaused: false
diff --git a/stack_orchestrator/data/config/monitoring/update-grafana-alerts-config.sh b/stack_orchestrator/data/config/monitoring/update-grafana-alerts-config.sh
new file mode 100755
index 00000000..9f81203f
--- /dev/null
+++ b/stack_orchestrator/data/config/monitoring/update-grafana-alerts-config.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+echo Using CERC_GRAFANA_ALERTS_SUBGRAPH_IDS ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
+
+# Replace subgraph ids in subgraph alerting config
+# Note: Requires the grafana container to be run with user root
+if [ -n "$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS" ]; then
+ sed -i "s/REPLACE_WITH_SUBGRAPH_IDS/$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS/g" /etc/grafana/provisioning/alerting/subgraph-alert-rules.yml
+fi
diff --git a/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml b/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml
index c2025029..c1c3e8e2 100644
--- a/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml
+++ b/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml
@@ -24,7 +24,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -100,7 +100,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -176,7 +176,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -252,7 +252,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -328,7 +328,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -404,7 +404,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -480,7 +480,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -556,7 +556,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -634,7 +634,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -710,7 +710,7 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
@@ -788,7 +788,85 @@ groups:
uid: PBFA97CFB590B2093
disableTextWrap: false
editorMode: code
- expr: latest_block_number - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
+ fullMetaSearch: false
+ includeNullMetadata: true
+ instant: true
+ intervalMs: 1000
+ legendFormat: __auto
+ maxDataPoints: 43200
+ range: false
+ refId: diff
+ useBackend: false
+ - refId: latest_external
+ relativeTimeRange:
+ from: 600
+ to: 0
+ datasourceUid: PBFA97CFB590B2093
+ model:
+ datasource:
+ type: prometheus
+ uid: PBFA97CFB590B2093
+ editorMode: code
+ expr: latest_block_number{chain="filecoin"}
+ hide: false
+ instant: true
+ legendFormat: __auto
+ range: false
+ refId: latest_external
+ - refId: condition
+ relativeTimeRange:
+ from: 600
+ to: 0
+ datasourceUid: __expr__
+ model:
+ conditions:
+ - evaluator:
+ params:
+ - 0
+ - 0
+ type: gt
+ operator:
+ type: and
+ query:
+ params: []
+ reducer:
+ params: []
+ type: avg
+ type: query
+ datasource:
+ name: Expression
+ type: __expr__
+ uid: __expr__
+ expression: ${diff} >= 16
+ intervalMs: 1000
+ maxDataPoints: 43200
+ refId: condition
+ type: math
+ noDataState: Alerting
+ execErrState: Alerting
+ for: 15m
+ annotations:
+ summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
+ isPaused: false
+
+ # Secured Finance
+ - uid: secured_finance_diff_external
+ title: secured_finance_watcher_head_tracking
+ condition: condition
+ data:
+ - refId: diff
+ relativeTimeRange:
+ from: 600
+ to: 0
+ datasourceUid: PBFA97CFB590B2093
+ model:
+ datasource:
+ type: prometheus
+ uid: PBFA97CFB590B2093
+ disableTextWrap: false
+ editorMode: code
+ expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="secured-finance", instance="secured-finance", kind="latest_indexed"}
fullMetaSearch: false
includeNullMetadata: true
instant: true
diff --git a/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh b/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh
index 819b1096..7a7a83b5 100755
--- a/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh
+++ b/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-ajna/start-server.sh b/stack_orchestrator/data/config/watcher-ajna/start-server.sh
index e2bbdaad..9aaa77ec 100755
--- a/stack_orchestrator/data/config/watcher-ajna/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-ajna/start-server.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml
index 70fc0466..daa5238d 100644
--- a/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml
@@ -2,7 +2,6 @@
host = "0.0.0.0"
port = 3008
kind = "active"
- gqlPath = "/"
# Checkpointing state.
checkpointing = true
@@ -22,23 +21,30 @@
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
- # Max block range for which to return events in eventsInRange GQL query.
- # Use -1 for skipping check on block range.
- maxEventsBlockRange = 1000
-
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
rpcSupportsBlockHashParam = false
- # GQL cache settings
- [server.gqlCache]
- enabled = true
+ # Server GQL config
+ [server.gql]
+ path = "/"
- # Max in-memory cache size (in bytes) (default 8 MB)
- # maxCacheSize
+ # Max block range for which to return events in eventsInRange GQL query.
+ # Use -1 for skipping check on block range.
+ maxEventsBlockRange = 1000
- # GQL cache-control max-age settings (in seconds)
- maxAge = 15
- timeTravelMaxAge = 86400 # 1 day
+ # Log directory for GQL requests
+ logDir = "./gql-logs"
+
+ # GQL cache settings
+ [server.gql.cache]
+ enabled = true
+
+ # Max in-memory cache size (in bytes) (default 8 MB)
+ # maxCacheSize
+
+ # GQL cache-control max-age settings (in seconds)
+ maxAge = 15
+ timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
@@ -58,7 +64,7 @@
[upstream]
[upstream.ethServer]
- rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
+ rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
rpcClient = true
@@ -85,6 +91,9 @@
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
blockDelayInMilliSecs = 30000
+ # Number of blocks by which block processing lags behind head
+ blockProcessingOffset = 0
+
# Boolean to switch between modes of processing events when starting the server.
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
@@ -96,3 +105,6 @@
# Max block range of historical processing after which it waits for completion of events processing
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
historicalMaxFetchAhead = 10000
+
+ # Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
+ maxNewBlockRetries = 3
diff --git a/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh
index 4bcad74c..f59ef6c6 100755
--- a/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh
+++ b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh
@@ -4,16 +4,19 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
-echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
-echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
+echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
# Replace env variables in template TOML file
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
- s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
+ s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
# Write the modified content to a new file
diff --git a/stack_orchestrator/data/config/watcher-azimuth/start-server.sh b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh
index fa334653..4e6bbf59 100755
--- a/stack_orchestrator/data/config/watcher-azimuth/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh
@@ -4,16 +4,19 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
-echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
-echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
+echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
# Replace env variables in template TOML file
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
- s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
+ s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
# Write the modified content to a new file
diff --git a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml
index 2a91fedf..40b7f80c 100644
--- a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml
@@ -1,6 +1,7 @@
[server]
host = "0.0.0.0"
- maxSimultaneousRequests = -1
+ [server.gql]
+ maxSimultaneousRequests = -1
[metrics]
host = "0.0.0.0"
@@ -13,8 +14,8 @@
[upstream]
[upstream.ethServer]
- gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
- rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
+ gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT"
+ rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
[jobQueue]
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh
index 819b1096..7a7a83b5 100755
--- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh
+++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
index e2bbdaad..9aaa77ec 100755
--- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
index 57494ce3..035843ff 100644
--- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
@@ -2,7 +2,6 @@
host = "0.0.0.0"
port = 3008
kind = "active"
- gqlPath = '/'
# Checkpointing state.
checkpointing = true
@@ -22,23 +21,30 @@
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
- # Max block range for which to return events in eventsInRange GQL query.
- # Use -1 for skipping check on block range.
- maxEventsBlockRange = 1000
-
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
rpcSupportsBlockHashParam = false
- # GQL cache settings
- [server.gqlCache]
- enabled = true
+ # Server GQL config
+ [server.gql]
+ path = "/"
- # Max in-memory cache size (in bytes) (default 8 MB)
- # maxCacheSize
+ # Max block range for which to return events in eventsInRange GQL query.
+ # Use -1 for skipping check on block range.
+ maxEventsBlockRange = 1000
- # GQL cache-control max-age settings (in seconds)
- maxAge = 15
- timeTravelMaxAge = 86400 # 1 day
+ # Log directory for GQL requests
+ logDir = "./gql-logs"
+
+ # GQL cache settings
+ [server.gql.cache]
+ enabled = true
+
+ # Max in-memory cache size (in bytes) (default 8 MB)
+ # maxCacheSize
+
+ # GQL cache-control max-age settings (in seconds)
+ maxAge = 15
+ timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
@@ -58,7 +64,7 @@
[upstream]
[upstream.ethServer]
- rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
+ rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
rpcClient = true
@@ -69,7 +75,7 @@
# Boolean flag to filter event logs by contracts
filterLogsByAddresses = true
# Boolean flag to filter event logs by topics
- filterLogsByTopics = false
+ filterLogsByTopics = true
[upstream.cache]
name = "requests"
@@ -85,6 +91,9 @@
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
blockDelayInMilliSecs = 30000
+ # Number of blocks by which block processing lags behind head
+ blockProcessingOffset = 0
+
# Boolean to switch between modes of processing events when starting the server.
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
@@ -96,3 +105,6 @@
# Max block range of historical processing after which it waits for completion of events processing
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
historicalMaxFetchAhead = 10000
+
+ # Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
+ maxNewBlockRetries = 3
diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh
index 819b1096..7a7a83b5 100755
--- a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh
+++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
index e2bbdaad..9aaa77ec 100755
--- a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
@@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi
set -u
-echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
+echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
+
+# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
+RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
+
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
- sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
+ sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
index 24449d4f..817d5323 100644
--- a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
@@ -2,7 +2,6 @@
host = "0.0.0.0"
port = 3008
kind = "active"
- gqlPath = "/"
# Checkpointing state.
checkpointing = true
@@ -22,23 +21,30 @@
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
- # Max block range for which to return events in eventsInRange GQL query.
- # Use -1 for skipping check on block range.
- maxEventsBlockRange = 1000
-
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
rpcSupportsBlockHashParam = false
- # GQL cache settings
- [server.gqlCache]
- enabled = true
+ # Server GQL config
+ [server.gql]
+ path = "/"
- # Max in-memory cache size (in bytes) (default 8 MB)
- # maxCacheSize
+ # Max block range for which to return events in eventsInRange GQL query.
+ # Use -1 for skipping check on block range.
+ maxEventsBlockRange = 1000
- # GQL cache-control max-age settings (in seconds)
- maxAge = 15
- timeTravelMaxAge = 86400 # 1 day
+ # Log directory for GQL requests
+ logDir = "./gql-logs"
+
+ # GQL cache settings
+ [server.gql.cache]
+ enabled = true
+
+ # Max in-memory cache size (in bytes) (default 8 MB)
+ # maxCacheSize
+
+ # GQL cache-control max-age settings (in seconds)
+ maxAge = 15
+ timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
@@ -58,7 +64,7 @@
[upstream]
[upstream.ethServer]
- rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
+ rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
rpcClient = true
@@ -85,6 +91,9 @@
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
blockDelayInMilliSecs = 30000
+ # Number of blocks by which block processing lags behind head
+ blockProcessingOffset = 0
+
# Boolean to switch between modes of processing events when starting the server.
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
@@ -96,3 +105,6 @@
# Max block range of historical processing after which it waits for completion of events processing
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
historicalMaxFetchAhead = 10000
+
+ # Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
+ maxNewBlockRetries = 3
diff --git a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile b/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile
deleted file mode 100644
index ca5c4586..00000000
--- a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM cerc/snowballtools-base-backend-base:local
-
-WORKDIR /app/packages/backend
-COPY run.sh .
-
-ENTRYPOINT ["./run.sh"]
diff --git a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile-base b/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile-base
deleted file mode 100644
index 7a264ca3..00000000
--- a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/Dockerfile-base
+++ /dev/null
@@ -1,26 +0,0 @@
-FROM ubuntu:22.04 as builder
-
-RUN apt update && \
- apt install -y --no-install-recommends --no-install-suggests \
- ca-certificates curl gnupg
-
-# Node
-ARG NODE_MAJOR=20
-RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
- echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \
- apt update && apt install -y nodejs
-
-# npm setup
-RUN npm config set @cerc-io:registry https://git.vdb.to/api/packages/cerc-io/npm/ && npm install -g yarn
-
-COPY . /app/
-WORKDIR /app/
-
-RUN find . -name 'node_modules' | xargs -n1 rm -rf
-RUN yarn && yarn build --ignore frontend
-
-FROM cerc/webapp-base:local
-
-COPY --from=builder /app /app
-
-WORKDIR /app/packages/backend
diff --git a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/build.sh b/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/build.sh
deleted file mode 100755
index 4f7c7cdc..00000000
--- a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/build.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-# Build cerc/webapp-deployer-backend
-
-source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
-
-# See: https://stackoverflow.com/a/246128/1701505
-SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-
-docker build -t cerc/snowballtools-base-backend-base:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile-base ${CERC_REPO_BASE_DIR}/snowballtools-base
-docker build -t cerc/snowballtools-base-backend:local ${build_command_args} ${SCRIPT_DIR}
diff --git a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/run.sh b/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/run.sh
deleted file mode 100755
index ae14ed19..00000000
--- a/stack_orchestrator/data/container-build/cerc-snowballtools-base-backend/run.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-
-LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
-if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
- if [ -f "/config/laconic-hosted-config.yml" ]; then
- LACONIC_HOSTED_CONFIG_FILE="/config/laconic-hosted-config.yml"
- elif [ -f "/config/config.yml" ]; then
- LACONIC_HOSTED_CONFIG_FILE="/config/config.yml"
- fi
-fi
-
-if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
- /scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE "`pwd`/dist"
-fi
-
-/scripts/apply-runtime-env.sh "`pwd`/dist"
-
-yarn start
diff --git a/stack_orchestrator/data/container-build/cerc-test-container/build.sh b/stack_orchestrator/data/container-build/cerc-test-container/build.sh
index ee56576a..fdc86a90 100755
--- a/stack_orchestrator/data/container-build/cerc-test-container/build.sh
+++ b/stack_orchestrator/data/container-build/cerc-test-container/build.sh
@@ -2,4 +2,4 @@
# Build cerc/test-container
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
\ No newline at end of file
+docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
diff --git a/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile
index 4369d50a..6b20eff6 100644
--- a/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile
+++ b/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile
@@ -6,5 +6,10 @@ WORKDIR /app
COPY . .
+# Get the latest Git commit hash and set in package.json
+RUN COMMIT_HASH=$(git rev-parse HEAD) && \
+ jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
+ mv tmp.json package.json
+
RUN echo "Installing dependencies and building ajna-watcher-ts" && \
yarn && yarn build
diff --git a/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile
index eaf717aa..81253895 100644
--- a/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile
+++ b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile
@@ -1,11 +1,20 @@
FROM node:18.16.0-alpine3.16
-RUN apk --update --no-cache add git python3 alpine-sdk
+RUN apk --update --no-cache add git python3 alpine-sdk jq
WORKDIR /app
COPY . .
+# Get the latest Git commit hash and set it in package.json of all watchers
+RUN COMMIT_HASH=$(git rev-parse HEAD) && \
+ find . -name 'package.json' -exec sh -c ' \
+ for packageFile; do \
+ jq --arg commitHash "$0" ".commitHash = \$commitHash" "$packageFile" > "$packageFile.tmp" && \
+ mv "$packageFile.tmp" "$packageFile"; \
+ done \
+ ' "$COMMIT_HASH" {} \;
+
RUN echo "Building azimuth-watcher-ts" && \
yarn && yarn build
diff --git a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile
index e09738ac..6ca9c7de 100644
--- a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile
+++ b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile
@@ -6,5 +6,10 @@ WORKDIR /app
COPY . .
+# Get the latest Git commit hash and set in package.json
+RUN COMMIT_HASH=$(git rev-parse HEAD) && \
+ jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
+ mv tmp.json package.json
+
RUN echo "Installing dependencies and building merkl-sushiswap-v3-watcher-ts" && \
yarn && yarn build
diff --git a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile
index ac6241c4..62e580a8 100644
--- a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile
+++ b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile
@@ -6,5 +6,10 @@ WORKDIR /app
COPY . .
+# Get the latest Git commit hash and set in package.json
+RUN COMMIT_HASH=$(git rev-parse HEAD) && \
+ jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
+ mv tmp.json package.json
+
RUN echo "Installing dependencies and building sushiswap-v3-watcher-ts" && \
yarn && yarn build
diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile
index b9e4740a..856cc778 100644
--- a/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile
+++ b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile
@@ -34,7 +34,7 @@ RUN \
# [Optional] Uncomment this section to install additional OS packages.
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
- && apt-get -y install --no-install-recommends jq gettext-base
+ && apt-get -y install --no-install-recommends jq gettext-base git
# [Optional] Uncomment if you want to install an additional version of node using nvm
# ARG EXTRA_NODE_VERSION=10
diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh
index 4fa1dc03..3a114ee0 100755
--- a/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh
+++ b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh
@@ -8,21 +8,27 @@ CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
-if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
- if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ] && [ -d "${CERC_WEBAPP_FILES_DIR}/static" ]; then
+if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
+ # If there is only one HTML file, assume an SPA.
+ if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
CERC_SINGLE_PAGE_APP=true
else
CERC_SINGLE_PAGE_APP=false
fi
fi
-if [ "true" == "$CERC_ENABLE_CORS" ]; then
+# ${var,,} is a lower-case comparison
+if [ "true" == "${CERC_ENABLE_CORS,,}" ]; then
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
fi
-if [ "true" == "$CERC_SINGLE_PAGE_APP" ]; then
+# ${var,,} is a lower-case comparison
+if [ "true" == "${CERC_SINGLE_PAGE_APP,,}" ]; then
+ echo "Serving content as single-page app. If this is wrong, set 'CERC_SINGLE_PAGE_APP=false'"
# Create a catchall redirect back to /
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --proxy http://localhost:${CERC_LISTEN_PORT}?"
+else
+ echo "Serving content normally. If this is a single-page app, set 'CERC_SINGLE_PAGE_APP=true'"
fi
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
@@ -39,4 +45,4 @@ if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
fi
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
-http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT} "${CERC_WEBAPP_FILES_DIR}"
\ No newline at end of file
+http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT} "${CERC_WEBAPP_FILES_DIR}"
diff --git a/stack_orchestrator/data/stacks/ajna/README.md b/stack_orchestrator/data/stacks/ajna/README.md
index 6f88ec0a..137e9c89 100644
--- a/stack_orchestrator/data/stacks/ajna/README.md
+++ b/stack_orchestrator/data/stacks/ajna/README.md
@@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
```bash
# External Filecoin (ETH RPC) endpoint to point the watcher to
-CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
+CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
```
### Start the deployment
diff --git a/stack_orchestrator/data/stacks/ajna/stack.yml b/stack_orchestrator/data/stacks/ajna/stack.yml
index 4d64559e..c6ba0c04 100644
--- a/stack_orchestrator/data/stacks/ajna/stack.yml
+++ b/stack_orchestrator/data/stacks/ajna/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: ajna
description: "Ajna watcher stack"
repos:
- - git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.1
+ - git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.13
containers:
- cerc/watcher-ajna
pods:
diff --git a/stack_orchestrator/data/stacks/azimuth/README.md b/stack_orchestrator/data/stacks/azimuth/README.md
index 21200369..37876f66 100644
--- a/stack_orchestrator/data/stacks/azimuth/README.md
+++ b/stack_orchestrator/data/stacks/azimuth/README.md
@@ -4,7 +4,7 @@ Instructions to setup and deploy Azimuth Watcher stack
## Setup
-Prerequisite: `ipld-eth-server` RPC and GQL endpoints
+Prerequisite: External RPC endpoints
Clone required repositories:
@@ -44,34 +44,42 @@ network:
- 0.0.0.0:9000:9000
azimuth-watcher-server:
- 0.0.0.0:3001:3001
+ - 0.0.0.0:9001:9001
censures-watcher-job-runner:
- 0.0.0.0:9002:9002
censures-watcher-server:
- 0.0.0.0:3002:3002
+ - 0.0.0.0:9003:9003
claims-watcher-job-runner:
- 0.0.0.0:9004:9004
claims-watcher-server:
- 0.0.0.0:3003:3003
+ - 0.0.0.0:9005:9005
conditional-star-release-watcher-job-runner:
- 0.0.0.0:9006:9006
conditional-star-release-watcher-server:
- 0.0.0.0:3004:3004
+ - 0.0.0.0:9007:9007
delegated-sending-watcher-job-runner:
- 0.0.0.0:9008:9008
delegated-sending-watcher-server:
- 0.0.0.0:3005:3005
+ - 0.0.0.0:9009:9009
ecliptic-watcher-job-runner:
- 0.0.0.0:9010:9010
ecliptic-watcher-server:
- 0.0.0.0:3006:3006
+ - 0.0.0.0:9011:9011
linear-star-release-watcher-job-runner:
- 0.0.0.0:9012:9012
linear-star-release-watcher-server:
- 0.0.0.0:3007:3007
+ - 0.0.0.0:9013:9013
polls-watcher-job-runner:
- 0.0.0.0:9014:9014
polls-watcher-server:
- 0.0.0.0:3008:3008
+ - 0.0.0.0:9015:9015
gateway-server:
- 0.0.0.0:4000:4000
...
@@ -94,7 +102,7 @@ Inside the deployment directory, open the file `config.env` and add variable to
```bash
# External RPC endpoints
- CERC_IPLD_ETH_RPC=
+ CERC_ETH_RPC_ENDPOINTS=https://example-rpc-endpoint-1,https://example-rpc-endpoint-2
```
* NOTE: If RPC endpoint is on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`)
@@ -120,4 +128,7 @@ To stop all azimuth services and also delete data:
```bash
laconic-so deployment --dir azimuth-deployment stop --delete-volumes
+
+# Remove deployment directory (deployment will have to be recreated for a re-run)
+rm -r azimuth-deployment
```
diff --git a/stack_orchestrator/data/stacks/azimuth/stack.yml b/stack_orchestrator/data/stacks/azimuth/stack.yml
index 374f2af0..8c942319 100644
--- a/stack_orchestrator/data/stacks/azimuth/stack.yml
+++ b/stack_orchestrator/data/stacks/azimuth/stack.yml
@@ -1,7 +1,7 @@
version: "1.0"
name: azimuth
repos:
- - github.com/cerc-io/azimuth-watcher-ts@v0.1.3
+ - github.com/cerc-io/azimuth-watcher-ts@0.1.6
containers:
- cerc/watcher-azimuth
pods:
diff --git a/stack_orchestrator/data/stacks/graph-node/README.md b/stack_orchestrator/data/stacks/graph-node/README.md
index df3ae1eb..47728ed8 100644
--- a/stack_orchestrator/data/stacks/graph-node/README.md
+++ b/stack_orchestrator/data/stacks/graph-node/README.md
@@ -43,16 +43,18 @@ customized by editing the "spec" file generated by `laconic-so deploy init`.
```
$ cat graph-node-spec.yml
stack: graph-node
-ports:
- graph-node:
- - '8000:8000'
- - '8001'
- - '8020:8020'
- - '8030'
- ipfs:
- - '8080'
- - '4001'
- - '5001:5001'
+network:
+ ports:
+ graph-node:
+ - '8000:8000'
+ - '8001'
+ - '8020:8020'
+ - '8030'
+ - '8040'
+ ipfs:
+ - '8080'
+ - '4001'
+ - '5001:5001'
...
```
@@ -64,7 +66,7 @@ laconic-so --stack graph-node deploy create --spec-file graph-node-spec.yml --de
## Start the stack
-Create an env file with the following values to be set before starting the stack:
+Update `config.env` file inside deployment directory with the following values before starting the stack:
```bash
# Set ETH RPC endpoint the graph node will use
@@ -76,21 +78,35 @@ export ETH_RPC_PORT=
# The etherum network(s) graph-node will connect to
# Set this to a space-separated list of the networks where each entry has the form NAME:URL
export ETH_NETWORKS=
+
+# Optional:
+
+# Timeout for ETH RPC requests in seconds (default: 180s)
+export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=
+
+# Number of times to retry ETH RPC requests (default: 10)
+export GRAPH_ETHEREUM_REQUEST_RETRIES=
+
+# Maximum number of blocks to scan for triggers in each request (default: 2000)
+export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=
+
+# Maximum number of concurrent requests made against Ethereum for requesting transaction receipts during block ingestion (default: 1000)
+export GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS=
+
+# Ref: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
```
-Example env file:
+Example `config.env` file:
```bash
export ETH_RPC_HOST=filecoin.chainup.net
export ETH_RPC_PORT=443
export ETH_NETWORKS=filecoin:https://filecoin.chainup.net/rpc/v1
-```
-Set the environment variables:
-
-```bash
-source
+export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=360
+export GRAPH_ETHEREUM_REQUEST_RETRIES=5
+export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=50
```
Deploy the stack:
diff --git a/stack_orchestrator/data/stacks/graph-node/stack.yml b/stack_orchestrator/data/stacks/graph-node/stack.yml
index ce45e965..262b7626 100644
--- a/stack_orchestrator/data/stacks/graph-node/stack.yml
+++ b/stack_orchestrator/data/stacks/graph-node/stack.yml
@@ -3,7 +3,9 @@ name: graph-node
description: "Stack for running graph-node"
repos:
- github.com/graphprotocol/graph-node
+ - github.com/cerc-io/watcher-ts@v0.2.92
containers:
- cerc/graph-node
+ - cerc/watcher-ts
pods:
- graph-node
diff --git a/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py
index b611a0d6..337b72ab 100644
--- a/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py
+++ b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py
@@ -14,10 +14,11 @@
# along with this program. If not, see .
from stack_orchestrator.util import get_yaml
-from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext
+from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
+from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.stack_state import State
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
-from stack_orchestrator.command_types import CommandOptions
+from stack_orchestrator.opts import opts
from enum import Enum
from pathlib import Path
from shutil import copyfile, copytree
@@ -61,7 +62,7 @@ def _get_node_moniker_from_config(network_dir: Path):
return moniker
-def _get_node_key_from_gentx(options: CommandOptions, gentx_file_name: str):
+def _get_node_key_from_gentx(gentx_file_name: str):
gentx_file_path = Path(gentx_file_name)
if gentx_file_path.exists():
with open(Path(gentx_file_name), "rb") as f:
@@ -76,24 +77,24 @@ def _comma_delimited_to_list(list_str: str):
return list_str.split(",") if list_str else []
-def _get_node_keys_from_gentx_files(options: CommandOptions, gentx_file_list: str):
+def _get_node_keys_from_gentx_files(gentx_file_list: str):
node_keys = []
gentx_files = _comma_delimited_to_list(gentx_file_list)
for gentx_file in gentx_files:
- node_key = _get_node_key_from_gentx(options, gentx_file)
+ node_key = _get_node_key_from_gentx(gentx_file)
if node_key:
node_keys.append(node_key)
return node_keys
-def _copy_gentx_files(options: CommandOptions, network_dir: Path, gentx_file_list: str):
+def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
gentx_files = _comma_delimited_to_list(gentx_file_list)
for gentx_file in gentx_files:
gentx_file_path = Path(gentx_file)
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
-def _remove_persistent_peers(options: CommandOptions, network_dir: Path):
+def _remove_persistent_peers(network_dir: Path):
config_file_path = _config_toml_path(network_dir)
if not config_file_path.exists():
print("Error: config.toml not found")
@@ -107,7 +108,7 @@ def _remove_persistent_peers(options: CommandOptions, network_dir: Path):
output_file.write(config_file_content)
-def _insert_persistent_peers(options: CommandOptions, config_dir: Path, new_persistent_peers: str):
+def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
config_file_path = config_dir.joinpath("config.toml")
if not config_file_path.exists():
print("Error: config.toml not found")
@@ -150,7 +151,7 @@ def _phase_from_params(parameters):
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
- options = command_context.cluster_context.options
+ options = opts.o
currency = "stake" # Does this need to be a parameter?
@@ -237,7 +238,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
print("Error: --gentx-files must be supplied")
sys.exit(1)
# First look in the supplied gentx files for the other nodes' keys
- other_node_keys = _get_node_keys_from_gentx_files(options, parameters.gentx_file_list)
+ other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_file_list)
# Add those keys to our genesis, with balances we determine here (why?)
for other_node_key in other_node_keys:
outputk, statusk = run_container_command(
@@ -246,7 +247,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
if options.debug:
print(f"Command output: {outputk}")
# Copy the gentx json files into our network dir
- _copy_gentx_files(options, network_dir, parameters.gentx_file_list)
+ _copy_gentx_files(network_dir, parameters.gentx_file_list)
# Now we can run collect-gentxs
output1, status1 = run_container_command(
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
@@ -255,7 +256,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
print(f"Generated genesis file, please copy to other nodes as required: \
{os.path.join(network_dir, 'config', 'genesis.json')}")
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
- _remove_persistent_peers(options, network_dir)
+ _remove_persistent_peers(network_dir)
# In both cases we validate the genesis file now
output2, status1 = run_container_command(
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
@@ -266,7 +267,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
sys.exit(1)
-def create(context: DeploymentContext, extra_args):
+def create(deployment_context: DeploymentContext, extra_args):
network_dir = extra_args[0]
if network_dir is None:
print("Error: --network-dir must be supplied")
@@ -285,15 +286,15 @@ def create(context: DeploymentContext, extra_args):
sys.exit(1)
# Copy the network directory contents into our deployment
# TODO: change this to work with non local paths
- deployment_config_dir = context.deployment_dir.joinpath("data", "laconicd-config")
+ deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
# If supplied, add the initial persistent peers to the config file
if extra_args[1]:
initial_persistent_peers = extra_args[1]
- _insert_persistent_peers(context.command_context.cluster_context.options, deployment_config_dir, initial_persistent_peers)
+ _insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
# Copy the data directory contents into our deployment
# TODO: change this to work with non local paths
- deployment_data_dir = context.deployment_dir.joinpath("data", "laconicd-data")
+ deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md
index ddd8ecf6..124fc380 100644
--- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md
+++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md
@@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
```bash
# External Filecoin (ETH RPC) endpoint to point the watcher to
-CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
+CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
```
### Start the deployment
diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
index c080d324..779cb2e6 100644
--- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
+++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: merkl-sushiswap-v3
description: "SushiSwap v3 watcher stack"
repos:
- - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.7
+ - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.14
containers:
- cerc/watcher-merkl-sushiswap-v3
pods:
diff --git a/stack_orchestrator/data/stacks/monitoring/README.md b/stack_orchestrator/data/stacks/monitoring/README.md
index 99502902..f0d39be6 100644
--- a/stack_orchestrator/data/stacks/monitoring/README.md
+++ b/stack_orchestrator/data/stacks/monitoring/README.md
@@ -134,6 +134,29 @@ Note: Use `host.docker.internal` as host to access ports on the host machine
Place the dashboard json files in grafana dashboards config directory (`monitoring-deployment/config/monitoring/grafana/dashboards`) in the deployment folder
+#### Graph Node Config
+
+For graph-node dashboard postgres datasource needs to be setup in `monitoring-deployment/config/monitoring/grafana/provisioning/datasources/graph-node-postgres.yml` (in deployment folder)
+
+```yml
+# graph-node-postgres.yml
+...
+datasources:
+ - name: Graph Node Postgres
+ type: postgres
+ jsonData:
+ # Set name to remote graph-node database name
+ database: graph-node
+ ...
+ # Set user to remote graph-node database username
+ user: graph-node
+ # Add URL for remote graph-node database
+ url: graph-node-db:5432
+ # Set password for graph-node database
+ secureJsonData:
+ password: 'password'
+```
+
### Env
Set the following env variables in the deployment env config file (`monitoring-deployment/config.env`):
@@ -156,6 +179,11 @@ Set the following env variables in the deployment env config file (`monitoring-d
# Grafana server host URL (used in various links in alerts, etc.)
# (Optional, default: http://localhost:3000)
GF_SERVER_ROOT_URL=
+
+
+ # RPC endpoint used by graph-node for upstream head metric
+ # (Optional, default: https://mainnet.infura.io/v3)
+ GRAPH_NODE_RPC_ENDPOINT=
```
## Start the stack
diff --git a/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md b/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md
index 2f057c3c..158da503 100644
--- a/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md
+++ b/stack_orchestrator/data/stacks/monitoring/monitoring-watchers.md
@@ -57,35 +57,35 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
metrics_path: /metrics
scheme: http
static_configs:
- - targets: ['AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_PORT']
+ - targets: ['AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_METRICS_PORT', 'AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'azimuth'
chain: 'ethereum'
- - targets: ['CENSURES_WATCHER_HOST:CENSURES_WATCHER_PORT']
+ - targets: ['CENSURES_WATCHER_HOST:CENSURES_WATCHER_METRICS_PORT', 'CENSURES_WATCHER_HOST:CENSURES_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'censures'
chain: 'ethereum'
- - targets: ['CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_PORT']
+ - targets: ['CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_METRICS_PORT', 'CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'claims'
chain: 'ethereum'
- - targets: ['CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_PORT']
+ - targets: ['CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_METRICS_PORT', 'CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'conditional_star_release'
chain: 'ethereum'
- - targets: ['DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_PORT']
+ - targets: ['DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_METRICS_PORT', 'DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'delegated_sending'
chain: 'ethereum'
- - targets: ['ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_PORT']
+ - targets: ['ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_METRICS_PORT', 'ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'ecliptic'
chain: 'ethereum'
- - targets: ['LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_PORT']
+ - targets: ['LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_METRICS_PORT', 'LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'linear_star_release'
chain: 'ethereum'
- - targets: ['POLLS_WATCHER_HOST:POLLS_WATCHER_PORT']
+ - targets: ['POLLS_WATCHER_HOST:POLLS_WATCHER_METRICS_PORT', 'POLLS_WATCHER_HOST:POLLS_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'polls'
chain: 'ethereum'
@@ -95,11 +95,11 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
metrics_path: /metrics
scheme: http
static_configs:
- - targets: ['SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_PORT']
+ - targets: ['SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_METRICS_PORT', 'SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'sushiswap'
chain: 'filecoin'
- - targets: ['MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_PORT']
+ - targets: ['MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_METRICS_PORT', 'MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'merkl_sushiswap'
chain: 'filecoin'
@@ -109,25 +109,35 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
metrics_path: /metrics
scheme: http
static_configs:
- - targets: ['AJNA_WATCHER_HOST:AJNA_WATCHER_PORT']
+ - targets: ['AJNA_WATCHER_HOST:AJNA_WATCHER_METRICS_PORT', 'AJNA_WATCHER_HOST:AJNA_WATCHER_GQL_METRICS_PORT']
labels:
instance: 'ajna'
chain: 'filecoin'
+
+ - job_name: graph-node
+ metrics_path: /metrics
+ scrape_interval: 30s
+ static_configs:
+ - targets: ['GRAPH_NODE_HOST:GRAPH_NODE_HOST_METRICS_PORT']
```
Add scrape config as done above for any additional watcher to add it to the Watchers dashboard.
### Grafana alerts config
-Place the pre-configured watcher alerts rules in Grafana provisioning directory:
+Place the pre-configured alerts rules in Grafana provisioning directory:
```bash
+ # watcher alert rules
cp monitoring-watchers-deployment/config/monitoring/watcher-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
+
+ # subgraph alert rules
+ cp monitoring-watchers-deployment/config/monitoring/subgraph-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
```
Update the alerting contact points config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/contactpoints.yml`) with desired contact points
-Add corresponding routes to the notification policies config (`monitoring-watchers-deployment/monitoring/grafana/provisioning/alerting/policies.yaml`) with appropriate object-matchers:
+Add corresponding routes to the notification policies config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/policies.yml`) with appropriate object-matchers:
```yml
...
@@ -135,7 +145,7 @@ Add corresponding routes to the notification policies config (`monitoring-watche
- receiver: SlackNotifier
object_matchers:
# Add matchers below
- - ['grafana_folder', '=', 'WatcherAlerts']
+ - ['grafana_folder', '=~', 'WatcherAlerts|SubgraphAlerts']
```
### Env
@@ -149,6 +159,9 @@ Set the following env variables in the deployment env config file (`monitoring-w
# Grafana server host URL to be used
# (Optional, default: http://localhost:3000)
GF_SERVER_ROOT_URL=
+
+ # List of subgraph ids to configure alerts for (separated by |)
+ CERC_GRAFANA_ALERTS_SUBGRAPH_IDS=
```
## Start the stack
diff --git a/stack_orchestrator/data/stacks/monitoring/stack.yml b/stack_orchestrator/data/stacks/monitoring/stack.yml
index 48605dc3..88a84b80 100644
--- a/stack_orchestrator/data/stacks/monitoring/stack.yml
+++ b/stack_orchestrator/data/stacks/monitoring/stack.yml
@@ -1,7 +1,7 @@
version: "0.1"
name: monitoring
repos:
- - github.com/cerc-io/watcher-ts@v0.2.81
+ - github.com/cerc-io/watcher-ts@v0.2.92
containers:
- cerc/watcher-ts
pods:
diff --git a/stack_orchestrator/data/stacks/snowballtools-base-backend/stack.yml b/stack_orchestrator/data/stacks/snowballtools-base-backend/stack.yml
deleted file mode 100644
index 3ee19b05..00000000
--- a/stack_orchestrator/data/stacks/snowballtools-base-backend/stack.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: "1.0"
-name: snowballtools-base-backend
-description: "snowballtools-base-backend"
-repos:
- - github.com/snowball-tools/snowballtools-base
-containers:
- - cerc/webapp-base
- - cerc/snowballtools-base-backend
-pods:
- - snowballtools-base-backend
diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/README.md b/stack_orchestrator/data/stacks/sushiswap-v3/README.md
index 6bcbb54c..cec8a825 100644
--- a/stack_orchestrator/data/stacks/sushiswap-v3/README.md
+++ b/stack_orchestrator/data/stacks/sushiswap-v3/README.md
@@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
```bash
# External Filecoin (ETH RPC) endpoint to point the watcher to
-CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
+CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
```
### Start the deployment
diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
index 248cb381..ac29632b 100644
--- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
+++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: sushiswap-v3
description: "SushiSwap v3 watcher stack"
repos:
- - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.7
+ - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.14
containers:
- cerc/watcher-sushiswap-v3
pods:
diff --git a/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml b/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml
index 04000a1b..dad4b773 100644
--- a/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml
+++ b/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml
@@ -2,10 +2,10 @@ version: "1.0"
name: webapp-deployer-backend
description: "Deployer for webapps"
repos:
- - git.vdb.to/telackey/webapp-deployment-status-api
+ - git.vdb.to/cerc-io/webapp-deployment-status-api
containers:
- cerc/webapp-deployer-backend
pods:
- name: webapp-deployer-backend
- repository: git.vdb.to/telackey/webapp-deployment-status-api
+ repository: git.vdb.to/cerc-io/webapp-deployment-status-api
path: ./
diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py
index 29afcf13..deb32d63 100644
--- a/stack_orchestrator/deploy/deploy.py
+++ b/stack_orchestrator/deploy/deploy.py
@@ -26,7 +26,15 @@ import click
from pathlib import Path
from stack_orchestrator import constants
from stack_orchestrator.opts import opts
-from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
+from stack_orchestrator.util import (
+ get_stack_path,
+ include_exclude_check,
+ get_parsed_stack_config,
+ global_options2,
+ get_dev_root_path,
+ stack_is_in_deployment,
+ resolve_compose_file,
+)
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
@@ -59,6 +67,7 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
if deploy_to is None:
deploy_to = "compose"
+ stack = get_stack_path(stack)
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
# Subcommand is executed now, by the magic of click
@@ -273,16 +282,12 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
-
dev_root_path = get_dev_root_path(ctx)
- # TODO: huge hack, fix this
- # If the caller passed a path for the stack file, then we know that we can get the compose files
- # from the same directory
- deployment = False
- if isinstance(stack, os.PathLike):
- compose_dir = stack.parent.joinpath("compose")
- deployment = True
+ # TODO: hack, this should be encapsulated by the deployment context.
+ deployment = stack_is_in_deployment(stack)
+ if deployment:
+ compose_dir = stack.joinpath("compose")
else:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
@@ -324,7 +329,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
pod_path = pod["path"]
if include_exclude_check(pod_name, include, exclude):
if pod_repository is None or pod_repository == "internal":
- compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
+ if deployment:
+ compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
+ else:
+ compose_file_name = resolve_compose_file(stack, pod_name)
else:
if deployment:
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
@@ -336,6 +344,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if pod_post_start_command is not None:
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
else:
+ # TODO: fix this code for external stack with scripts
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
pod_pre_start_command = pod.get("pre_start_command")
diff --git a/stack_orchestrator/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py
index 8b812d3a..9ee09619 100644
--- a/stack_orchestrator/deploy/deploy_util.py
+++ b/stack_orchestrator/deploy/deploy_util.py
@@ -16,7 +16,7 @@
import os
from typing import List, Any
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
-from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
+from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
from stack_orchestrator.opts import opts
@@ -27,7 +27,7 @@ def _container_image_from_service(stack: str, service: str):
pods = get_pod_list(parsed_stack)
yaml = get_yaml()
for pod in pods:
- pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
+ pod_file_path = resolve_compose_file(stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "services" in parsed_pod_file:
services = parsed_pod_file["services"]
diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py
index f364121f..a7fd8bb2 100644
--- a/stack_orchestrator/deploy/deployment.py
+++ b/stack_orchestrator/deploy/deployment.py
@@ -50,15 +50,15 @@ def command(ctx, dir):
def make_deploy_context(ctx) -> DeployCommandContext:
context: DeploymentContext = ctx.obj
- stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
cluster_name = context.get_cluster_id()
if constants.deploy_to_key in context.spec.obj:
deployment_type = context.spec.obj[constants.deploy_to_key]
else:
deployment_type = constants.compose_deploy_type
- return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
- deployment_type)
+ stack = context.deployment_dir
+ return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None,
+ cluster_name, env_file, deployment_type)
@command.command()
@@ -123,6 +123,7 @@ def push_images(ctx):
@click.argument('extra_args', nargs=-1) # help: command: port
@click.pass_context
def port(ctx, extra_args):
+ ctx.obj = make_deploy_context(ctx)
port_operation(ctx, extra_args)
diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py
index 8da93f7a..5f565854 100644
--- a/stack_orchestrator/deploy/deployment_create.py
+++ b/stack_orchestrator/deploy/deployment_create.py
@@ -24,9 +24,10 @@ from secrets import token_hex
import sys
from stack_orchestrator import constants
from stack_orchestrator.opts import opts
-from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
+from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
- get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file)
+ get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
+ resolve_config_dir)
from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
@@ -43,7 +44,7 @@ def _get_ports(stack):
pods = get_pod_list(parsed_stack)
yaml = get_yaml()
for pod in pods:
- pod_file_path = get_pod_file_path(parsed_stack, pod)
+ pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items():
@@ -79,7 +80,7 @@ def _get_named_volumes(stack):
return ret
for pod in pods:
- pod_file_path = get_pod_file_path(parsed_stack, pod)
+ pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
@@ -237,6 +238,11 @@ def _find_extra_config_dirs(parsed_pod_file, pod):
config_dir = host_path.split("/")[2]
if config_dir != pod:
config_dirs.add(config_dir)
+ for env_file in service_info.get("env_file", []):
+ if env_file.startswith("../config"):
+ config_dir = env_file.split("/")[2]
+ if config_dir != pod:
+ config_dirs.add(config_dir)
return config_dirs
@@ -453,7 +459,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
_check_volume_definitions(parsed_spec)
stack_name = parsed_spec["stack"]
deployment_type = parsed_spec[constants.deploy_to_key]
- stack_file = get_stack_file_path(stack_name)
+ stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
parsed_stack = get_parsed_stack_config(stack_name)
if opts.o.debug:
print(f"parsed spec: {parsed_spec}")
@@ -466,7 +472,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
os.mkdir(deployment_dir_path)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
- copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file)))
+ copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
_create_deployment_file(deployment_dir_path)
# Copy any config varibles from the spec file into an env file suitable for compose
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
@@ -480,10 +486,9 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
os.mkdir(destination_compose_dir)
destination_pods_dir = deployment_dir_path.joinpath("pods")
os.mkdir(destination_pods_dir)
- data_dir = Path(__file__).absolute().parent.parent.joinpath("data")
yaml = get_yaml()
for pod in pods:
- pod_file_path = get_pod_file_path(parsed_stack, pod)
+ pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
destination_pod_dir = destination_pods_dir.joinpath(pod)
@@ -497,7 +502,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
config_dirs = {pod}
config_dirs = config_dirs.union(extra_config_dirs)
for config_dir in config_dirs:
- source_config_dir = data_dir.joinpath("config", config_dir)
+ source_config_dir = resolve_config_dir(stack_name, config_dir)
if os.path.exists(source_config_dir):
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
# If the same config dir appears in multiple pods, it may already have been copied
diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py
index 77713d18..f2af1c09 100644
--- a/stack_orchestrator/deploy/images.py
+++ b/stack_orchestrator/deploy/images.py
@@ -29,16 +29,29 @@ def _image_needs_pushed(image: str):
return image.endswith(":local")
+def _remote_tag_for_image(image: str, remote_repo_url: str):
+ # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
+ major_parts = image.split("/", 2)
+ image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
+ (image_name, image_version) = image_name_with_version.split(":")
+ if image_version == "local":
+ return f"{remote_repo_url}/{image_name}:deploy"
+ else:
+ return image
+
+
+# Note: do not add any calls this function
def remote_image_exists(remote_repo_url: str, local_tag: str):
docker = DockerClient()
try:
- remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
+ remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
result = docker.manifest.inspect(remote_tag)
return True if result else False
except Exception: # noqa: E722
return False
+# Note: do not add any calls this function
def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
if not additional_tags:
return
@@ -47,18 +60,20 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
raise Exception(f"{local_tag} does not exist in {remote_repo_url}")
docker = DockerClient()
- remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
- new_remote_tags = [remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
+ remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
+ new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
-def remote_tag_for_image(image: str, remote_repo_url: str):
+def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
major_parts = image.split("/", 2)
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
(image_name, image_version) = image_name_with_version.split(":")
if image_version == "local":
- return f"{remote_repo_url}/{image_name}:deploy"
+ # Salt the tag with part of the deployment id to make it unique to this deployment
+ deployment_tag = deployment_id[-8:]
+ return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
else:
return image
@@ -73,14 +88,14 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
docker = DockerClient()
for image in images:
if _image_needs_pushed(image):
- remote_tag = remote_tag_for_image(image, remote_repo_url)
+ remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
if opts.o.verbose:
print(f"Tagging {image} to {remote_tag}")
docker.image.tag(image, remote_tag)
# Run docker push commands to upload
for image in images:
if _image_needs_pushed(image):
- remote_tag = remote_tag_for_image(image, remote_repo_url)
+ remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
if opts.o.verbose:
print(f"Pushing image {remote_tag}")
docker.image.push(remote_tag)
diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py
index dbf7c907..7c696691 100644
--- a/stack_orchestrator/deploy/k8s/cluster_info.py
+++ b/stack_orchestrator/deploy/k8s/cluster_info.py
@@ -26,7 +26,7 @@ from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variable
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
-from stack_orchestrator.deploy.images import remote_tag_for_image
+from stack_orchestrator.deploy.images import remote_tag_for_image_unique
DEFAULT_VOLUME_RESOURCES = Resources({
"reservations": {"storage": "2Gi"}
@@ -326,8 +326,11 @@ class ClusterInfo:
if opts.o.debug:
print(f"Merged envs: {envs}")
# Re-write the image tag for remote deployment
- image_to_use = remote_tag_for_image(
- image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image
+ # Note self.app_name has the same value as deployment_id
+ image_to_use = remote_tag_for_image_unique(
+ image,
+ self.spec.get_image_registry(),
+ self.app_name) if self.spec.get_image_registry() is not None else image
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
container = client.V1Container(
name=container_name,
diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py
index 2cc704ff..ba01c9e5 100644
--- a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py
+++ b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py
@@ -24,7 +24,7 @@ import uuid
import click
-from stack_orchestrator.deploy.images import remote_image_exists, add_tags_to_image
+from stack_orchestrator.deploy.images import remote_image_exists
from stack_orchestrator.deploy.webapp import deploy_webapp
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, TimedLogger,
build_container_image, push_container_image,
@@ -39,11 +39,12 @@ def process_app_deployment_request(
app_deployment_request,
deployment_record_namespace,
dns_record_namespace,
- dns_suffix,
+ default_dns_suffix,
deployment_parent_dir,
kube_config,
image_registry,
force_rebuild,
+ fqdn_policy,
logger
):
logger.log("BEGIN - process_app_deployment_request")
@@ -56,14 +57,15 @@ def process_app_deployment_request(
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
logger.log(f"Determined requested name: {requested_name}")
- # HACK
if "." in requested_name:
- raise Exception("Only unqualified hostnames allowed at this time.")
-
- fqdn = f"{requested_name}.{dns_suffix}"
+ if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
+ fqdn = requested_name
+ else:
+ raise Exception(f"{requested_name} is invalid: only unqualified hostnames are allowed.")
+ else:
+ fqdn = f"{requested_name}.{default_dns_suffix}"
# 3. check ownership of existing dnsrecord vs this request
- # TODO: Support foreign DNS
dns_crn = f"{dns_record_namespace}/{fqdn}"
dns_record = laconic.get_record(dns_crn)
if dns_record:
@@ -75,7 +77,9 @@ def process_app_deployment_request(
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
else:
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
- (dns_record.id, app_deployment_request.id))
+ (dns_crn, app_deployment_request.id))
+ elif "preexisting" == fqdn_policy:
+ raise Exception(f"No pre-existing DnsRecord {dns_crn} could be found for request {app_deployment_request.id}.")
# 4. get build and runtime config from request
env_filename = None
@@ -95,44 +99,62 @@ def process_app_deployment_request(
deployment_record = laconic.get_record(app_deployment_crn)
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
+ # At present we use this to generate a unique but stable ID for the app's host container
+ # TODO: implement support to derive this transparently from the already-unique deployment id
+ unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
deployment_config_file = os.path.join(deployment_dir, "config.env")
- # TODO: Is there any reason not to simplify the hash input to the app_deployment_crn?
- deployment_container_tag = "laconic-webapp/%s:local" % hashlib.md5(deployment_dir.encode()).hexdigest()
+ deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
app_image_shared_tag = f"laconic-webapp/{app.id}:local"
# b. check for deployment directory (create if necessary)
if not os.path.exists(deployment_dir):
if deployment_record:
raise Exception("Deployment record %s exists, but not deployment dir %s. Please remove name." %
(app_deployment_crn, deployment_dir))
- print("deploy_webapp", deployment_dir)
+ logger.log(f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}")
deploy_webapp.create_deployment(ctx, deployment_dir, deployment_container_tag,
f"https://{fqdn}", kube_config, image_registry, env_filename)
elif env_filename:
shutil.copyfile(env_filename, deployment_config_file)
needs_k8s_deploy = False
+ if force_rebuild:
+ logger.log("--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app")
# 6. build container (if needed)
- if not deployment_record or deployment_record.attributes.application != app.id:
+ # TODO: add a comment that explains what this code is doing (not clear to me)
+ if not deployment_record or deployment_record.attributes.application != app.id or force_rebuild:
needs_k8s_deploy = True
# check if the image already exists
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
+ # Note: in the code below, calls to add_tags_to_image() won't work at present.
+ # This is because SO deployment code in general re-names the container image
+ # to be unique to the deployment. This is done transparently
+ # and so when we call add_tags_to_image() here and try to add tags to the remote image,
+ # we get the image name wrong. Accordingly I've disabled the relevant code for now.
+ # This is safe because we are running with --force-rebuild at present
if shared_tag_exists and not force_rebuild:
# simply add our unique tag to the existing image and we are done
- logger.log(f"Using existing app image {app_image_shared_tag} for {deployment_container_tag}")
- add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
+ logger.log(
+ f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
+ "tagging it with: {deployment_container_tag} to use in this deployment"
+ )
+ # add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
logger.log("Tag complete")
else:
extra_build_args = [] # TODO: pull from request
- logger.log(f"Building container image {deployment_container_tag}")
+ logger.log(f"Building container image: {deployment_container_tag}")
build_container_image(app, deployment_container_tag, extra_build_args, logger)
logger.log("Build complete")
- logger.log(f"Pushing container image {deployment_container_tag}")
+ logger.log(f"Pushing container image: {deployment_container_tag}")
push_container_image(deployment_dir, logger)
logger.log("Push complete")
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
- logger.log(f"Updating app image tag {app_image_shared_tag} from build of {deployment_container_tag}")
- add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
+ logger.log(
+ f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
+ )
+ # add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
logger.log("Tag complete")
+ else:
+ logger.log("Requested app is already deployed, skipping build and image push")
# 7. update config (if needed)
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
@@ -191,6 +213,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
@click.option("--state-file", help="File to store state about previously seen requests.")
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
+@click.option("--fqdn-policy", help="How to handle requests with an FQDN: prohibit, allow, preexisting", default="prohibit")
@click.option("--record-namespace-dns", help="eg, crn://laconic/dns")
@click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments")
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
@@ -201,7 +224,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
@click.pass_context
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
request_id, discover, state_file, only_update_state,
- dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
+ dns_suffix, fqdn_policy, record_namespace_dns, record_namespace_deployments, dry_run,
include_tags, exclude_tags, force_rebuild, log_dir):
if request_id and discover:
print("Cannot specify both --request-id and --discover", file=sys.stderr)
@@ -220,6 +243,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
sys.exit(2)
+ if fqdn_policy not in ["prohibit", "allow", "preexisting"]:
+ print("--fqdn-policy must be one of 'prohibit', 'allow', or 'preexisting'", file=sys.stderr)
+ sys.exit(2)
+
# Split CSV and clean up values.
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
@@ -247,7 +274,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
requests_by_name = {}
skipped_by_name = {}
for r in requests:
- # TODO: Do this _after_ filtering deployments and cancellations to minimize round trips.
+ if r.id in previous_requests and previous_requests[r.id].get("status", "") != "RETRY":
+ print(f"Skipping request {r.id}, we've already seen it.")
+ continue
+
app = laconic.get_record(r.attributes.application)
if not app:
print("Skipping request %s, cannot locate app." % r.id)
@@ -334,6 +364,7 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
kube_config,
image_registry,
force_rebuild,
+ fqdn_policy,
logger
)
status = "DEPLOYED"
diff --git a/stack_orchestrator/deploy/webapp/util.py b/stack_orchestrator/deploy/webapp/util.py
index 5c484ed1..97e756d9 100644
--- a/stack_orchestrator/deploy/webapp/util.py
+++ b/stack_orchestrator/deploy/webapp/util.py
@@ -242,6 +242,7 @@ def determine_base_container(clone_dir, app_type="webapp"):
def build_container_image(app_record, tag, extra_build_args=[], logger=None):
tmpdir = tempfile.mkdtemp()
+ # TODO: determine if this code could be calling into the Python git library like setup-repositories
try:
record_id = app_record["id"]
ref = app_record.attributes.repository_ref
@@ -249,6 +250,16 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
clone_dir = os.path.join(tmpdir, record_id)
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
+ # Set github credentials if present running a command like:
+ # git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
+ github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
+ if github_token:
+ logger.log("Github token detected, setting it in the git environment")
+ git_config_args = [
+ "git", "config", "--global", f"url.https://{github_token}:@github.com/.insteadOf", "https://github.com/"
+ ]
+ result = subprocess.run(git_config_args, stdout=logger.file, stderr=logger.file)
+ result.check_returncode()
if ref:
# TODO: Determing branch or hash, and use depth 1 if we can.
git_env = dict(os.environ.copy())
@@ -265,6 +276,7 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
logger.log(f"git checkout failed. Does ref {ref} exist?")
raise e
else:
+ # TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, stderr=logger.file)
result.check_returncode()
@@ -299,11 +311,12 @@ def push_container_image(deployment_dir, logger):
def deploy_to_k8s(deploy_record, deployment_dir, logger):
if not deploy_record:
- command = "up"
+ command = "start"
else:
command = "update"
logger.log("Deploying to k8s ...")
+ logger.log(f"Running {command} command on deployment dir: {deployment_dir}")
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
stdout=logger.file, stderr=logger.file)
result.check_returncode()
diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py
index c0a49689..06fe4ec7 100644
--- a/stack_orchestrator/main.py
+++ b/stack_orchestrator/main.py
@@ -17,6 +17,7 @@ import click
from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.repos import setup_repositories
+from stack_orchestrator.repos import fetch_stack
from stack_orchestrator.build import build_containers, fetch_containers
from stack_orchestrator.build import build_npms
from stack_orchestrator.build import build_webapp
@@ -50,6 +51,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
ctx.obj = command_options
+cli.add_command(fetch_stack.command, "fetch-stack")
cli.add_command(setup_repositories.command, "setup-repositories")
cli.add_command(build_containers.command, "build-containers")
cli.add_command(fetch_containers.command, "fetch-containers")
diff --git a/stack_orchestrator/repos/fetch_stack.py b/stack_orchestrator/repos/fetch_stack.py
new file mode 100644
index 00000000..9566e48f
--- /dev/null
+++ b/stack_orchestrator/repos/fetch_stack.py
@@ -0,0 +1,45 @@
+# Copyright © 2022, 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+# env vars:
+# CERC_REPO_BASE_DIR defaults to ~/cerc
+
+
+import click
+import os
+
+from decouple import config
+from git import exc
+
+from stack_orchestrator.opts import opts
+from stack_orchestrator.repos.setup_repositories import process_repo
+from stack_orchestrator.util import error_exit
+
+
+@click.command()
+@click.argument('stack-locator')
+@click.option('--git-ssh', is_flag=True, default=False)
+@click.option('--check-only', is_flag=True, default=False)
+@click.option('--pull', is_flag=True, default=False)
+@click.pass_context
+def command(ctx, stack_locator, git_ssh, check_only, pull):
+ '''optionally resolve then git clone a repository containing one or more stack definitions'''
+ dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
+ if not opts.o.quiet:
+ print(f"Dev Root is: {dev_root_path}")
+ try:
+ process_repo(pull, check_only, git_ssh, dev_root_path, None, stack_locator)
+ except exc.GitCommandError as error:
+ error_exit(f"\n******* git command returned error exit status:\n{error}")
diff --git a/stack_orchestrator/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py
index a137d645..83075647 100644
--- a/stack_orchestrator/repos/setup_repositories.py
+++ b/stack_orchestrator/repos/setup_repositories.py
@@ -20,13 +20,12 @@ import os
import sys
from decouple import config
import git
+from git.exc import GitCommandError
from tqdm import tqdm
import click
import importlib.resources
-from pathlib import Path
-import yaml
-from stack_orchestrator.constants import stack_file_name
-from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit
+from stack_orchestrator.opts import opts
+from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check, error_exit, warn_exit
class GitProgress(git.RemoteProgress):
@@ -80,15 +79,19 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
except TypeError:
# This means that the current ref is not a branch, so possibly a tag
# Let's try to get the tag
- current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
- # Note that git is assymetric -- the tag you told it to check out may not be the one
- # you get back here (if there are multiple tags associated with the same commit)
+ try:
+ current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
+ # Note that git is asymmetric -- the tag you told it to check out may not be the one
+ # you get back here (if there are multiple tags associated with the same commit)
+ except GitCommandError:
+ # If there is no matching branch or tag checked out, just use the current SHA
+ current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
return current_repo_branch_or_tag, is_branch
# TODO: fix the messy arg list here
-def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
- if verbose:
+def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
+ if opts.o.verbose:
print(f"Processing repo: {fully_qualified_repo}")
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:"
@@ -100,8 +103,8 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
full_filesystem_repo_path
) if is_present else (None, None)
- if not quiet:
- present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
+ if not opts.o.quiet:
+ present_text = f"already exists active {'branch' if is_branch else 'ref'}: {current_repo_branch_or_tag}" if is_present \
else 'Needs to be fetched'
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo
@@ -111,25 +114,25 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
sys.exit(1)
else:
if pull:
- if verbose:
+ if opts.o.verbose:
print(f"Running git pull for {full_filesystem_repo_path}")
if not check_only:
if is_branch:
git_repo = git.Repo(full_filesystem_repo_path)
origin = git_repo.remotes.origin
- origin.pull(progress=None if quiet else GitProgress())
+ origin.pull(progress=None if opts.o.quiet else GitProgress())
else:
- print("skipping pull because this repo checked out a tag")
+ print("skipping pull because this repo is not on a branch")
else:
print("(git pull skipped)")
if not is_present:
# Clone
- if verbose:
+ if opts.o.verbose:
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
- if not dry_run:
+ if not opts.o.dry_run:
git.Repo.clone_from(full_github_repo_path,
full_filesystem_repo_path,
- progress=None if quiet else GitProgress())
+ progress=None if opts.o.quiet else GitProgress())
else:
print("(git clone skipped)")
# Checkout the requested branch, if one was specified
@@ -150,13 +153,13 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
current_repo_branch_or_tag and (
current_repo_branch_or_tag != branch_to_checkout)
):
- if not quiet:
+ if not opts.o.quiet:
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
git_repo = git.Repo(full_filesystem_repo_path)
# git checkout works for both branches and tags
git_repo.git.checkout(branch_to_checkout)
else:
- if verbose:
+ if opts.o.verbose:
print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
@@ -182,36 +185,18 @@ def parse_branches(branches_string):
@click.option('--check-only', is_flag=True, default=False)
@click.option('--pull', is_flag=True, default=False)
@click.option("--branches", help="override branches for repositories")
-@click.option('--branches-file', help="checkout branches specified in this file")
@click.pass_context
-def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches_file):
+def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
'''git clone the set of repositories required to build the complete system from source'''
- quiet = ctx.obj.quiet
- verbose = ctx.obj.verbose
- dry_run = ctx.obj.dry_run
- stack = ctx.obj.stack
+ quiet = opts.o.quiet
+ verbose = opts.o.verbose
+ stack = opts.o.stack
branches_array = []
- # TODO: branches file needs to be re-worked in the context of stacks
- if branches_file:
- if branches:
- print("Error: can't specify both --branches and --branches-file")
- sys.exit(1)
- else:
- if verbose:
- print(f"loading branches from: {branches_file}")
- with open(branches_file) as branches_file_open:
- branches_array = branches_file_open.read().splitlines()
-
- print(f"branches: {branches}")
if branches:
- if branches_file:
- print("Error: can't specify both --branches and --branches-file")
- sys.exit(1)
- else:
- branches_array = parse_branches(branches)
+ branches_array = parse_branches(branches)
if branches_array and verbose:
print(f"Branches are: {branches_array}")
@@ -239,20 +224,10 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
repos_in_scope = []
if stack:
- if stack_is_external(stack):
- stack_file_path = Path(stack).joinpath(stack_file_name)
- else:
- # In order to be compatible with Python 3.8 we need to use this hack to get the path:
- # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
- stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, stack_file_name)
- if not stack_file_path.exists():
- error_exit(f"stack {stack} does not exist")
- with stack_file_path:
- stack_config = yaml.safe_load(open(stack_file_path, "r"))
- if "repos" not in stack_config or stack_config["repos"] is None:
- warn_exit(f"stack {stack} does not define any repositories")
- else:
- repos_in_scope = stack_config["repos"]
+ stack_config = get_parsed_stack_config(stack)
+ if "repos" not in stack_config or stack_config["repos"] is None:
+ warn_exit(f"stack {stack} does not define any repositories")
+ repos_in_scope = stack_config["repos"]
else:
repos_in_scope = all_repos
@@ -271,7 +246,6 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
for repo in repos:
try:
- process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
+ process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
except git.exc.GitCommandError as error:
- print(f"\n******* git command returned error exit status:\n{error}")
- sys.exit(1)
+ error_exit(f"\n******* git command returned error exit status:\n{error}")
diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py
index d03753c3..d2dd0425 100644
--- a/stack_orchestrator/util.py
+++ b/stack_orchestrator/util.py
@@ -20,6 +20,7 @@ import ruamel.yaml
from pathlib import Path
from dotenv import dotenv_values
from typing import Mapping, Set, List
+from stack_orchestrator.constants import stack_file_name, deployment_file_name
def include_exclude_check(s, include, exclude):
@@ -33,11 +34,14 @@ def include_exclude_check(s, include, exclude):
return s not in exclude_list
-def get_stack_file_path(stack):
- # In order to be compatible with Python 3.8 we need to use this hack to get the path:
- # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
- stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
- return stack_file_path
+def get_stack_path(stack):
+ if stack_is_external(stack):
+ stack_path = Path(stack)
+ else:
+ # In order to be compatible with Python 3.8 we need to use this hack to get the path:
+ # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
+ stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
+ return stack_path
def get_dev_root_path(ctx):
@@ -52,21 +56,14 @@ def get_dev_root_path(ctx):
# Caller can pass either the name of a stack, or a path to a stack file
def get_parsed_stack_config(stack):
- stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
- try:
- with stack_file_path:
- stack_config = get_yaml().load(open(stack_file_path, "r"))
- return stack_config
- except FileNotFoundError as error:
- # We try here to generate a useful diagnostic error
- # First check if the stack directory is present
- stack_directory = stack_file_path.parent
- if os.path.exists(stack_directory):
- print(f"Error: stack.yml file is missing from stack: {stack}")
- else:
- print(f"Error: stack: {stack} does not exist")
- print(f"Exiting, error: {error}")
- sys.exit(1)
+ stack_file_path = get_stack_path(stack).joinpath(stack_file_name)
+ if stack_file_path.exists():
+ return get_yaml().load(open(stack_file_path, "r"))
+ # We try here to generate a useful diagnostic error
+ # First check if the stack directory is present
+ if stack_file_path.parent.exists():
+ error_exit(f"stack.yml file is missing from stack: {stack}")
+ error_exit(f"stack {stack} does not exist")
def get_pod_list(parsed_stack):
@@ -87,17 +84,45 @@ def get_plugin_code_paths(stack) -> List[Path]:
result: Set[Path] = set()
for pod in pods:
if type(pod) is str:
- result.add(get_stack_file_path(stack).parent)
+ result.add(get_stack_path(stack))
else:
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
result.add(Path(os.path.join(pod_root_dir, "stack")))
return list(result)
-def get_pod_file_path(parsed_stack, pod_name: str):
+# # Find a config directory, looking first in any external stack
+# and if not found there, internally
+def resolve_config_dir(stack, config_dir_name: str):
+ if stack_is_external(stack):
+ # First try looking in the external stack for the compose file
+ config_base = Path(stack).parent.parent.joinpath("config")
+ proposed_dir = config_base.joinpath(config_dir_name)
+ if proposed_dir.exists():
+ return proposed_dir
+ # If we don't find it fall through to the internal case
+ config_base = get_internal_config_dir()
+ return config_base.joinpath(config_dir_name)
+
+
+# Find a compose file, looking first in any external stack
+# and if not found there, internally
+def resolve_compose_file(stack, pod_name: str):
+ if stack_is_external(stack):
+ # First try looking in the external stack for the compose file
+ compose_base = Path(stack).parent.parent.joinpath("compose")
+ proposed_file = compose_base.joinpath(f"docker-compose-{pod_name}.yml")
+ if proposed_file.exists():
+ return proposed_file
+ # If we don't find it fall through to the internal case
+ compose_base = get_internal_compose_file_dir()
+ return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
+
+
+def get_pod_file_path(stack, parsed_stack, pod_name: str):
pods = parsed_stack["pods"]
if type(pods[0]) is str:
- result = os.path.join(get_compose_file_dir(), f"docker-compose-{pod_name}.yml")
+ result = resolve_compose_file(stack, pod_name)
else:
for pod in pods:
if pod["name"] == pod_name:
@@ -131,7 +156,7 @@ def pod_has_scripts(parsed_stack, pod_name: str):
return result
-def get_compose_file_dir():
+def get_internal_compose_file_dir():
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
data_dir = Path(__file__).absolute().parent.joinpath("data")
@@ -139,7 +164,7 @@ def get_compose_file_dir():
return source_compose_dir
-def get_config_file_dir():
+def get_internal_config_dir():
# TODO: refactor to use common code with deploy command
data_dir = Path(__file__).absolute().parent.joinpath("data")
source_config_dir = data_dir.joinpath("config")
@@ -171,6 +196,10 @@ def stack_is_external(stack: str):
return Path(stack).exists() if stack is not None else False
+def stack_is_in_deployment(stack: Path):
+ return stack.joinpath(deployment_file_name).exists()
+
+
def get_yaml():
# See: https://stackoverflow.com/a/45701840/1701505
yaml = ruamel.yaml.YAML()
diff --git a/stack_orchestrator/version.py b/stack_orchestrator/version.py
index 68e47b44..541e5580 100644
--- a/stack_orchestrator/version.py
+++ b/stack_orchestrator/version.py
@@ -14,7 +14,7 @@
# along with this program. If not, see .
import click
-import importlib.resources
+from importlib import resources, metadata
@click.command()
@@ -24,8 +24,11 @@ def command(ctx):
# See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data
- with importlib.resources.open_text(data, "build_tag.txt") as version_file:
- # TODO: code better version that skips comment lines
- version_string = version_file.read().splitlines()[1]
+ if resources.is_resource(data, "build_tag.txt"):
+ with resources.open_text(data, "build_tag.txt") as version_file:
+ # TODO: code better version that skips comment lines
+ version_string = version_file.read().splitlines()[1]
+ else:
+ version_string = metadata.version("laconic-stack-orchestrator") + "-unknown"
- print(f"Version: {version_string}")
+ print(version_string)
diff --git a/tests/external-stack/run-test.sh b/tests/external-stack/run-test.sh
new file mode 100755
index 00000000..084f3b9d
--- /dev/null
+++ b/tests/external-stack/run-test.sh
@@ -0,0 +1,185 @@
+#!/usr/bin/env bash
+set -e
+if [ -n "$CERC_SCRIPT_DEBUG" ]; then
+ set -x
+fi
+# Dump environment variables for debugging
+echo "Environment variables:"
+env
+
+if [ "$1" == "from-path" ]; then
+ TEST_TARGET_SO="laconic-so"
+else
+ TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
+fi
+
+delete_cluster_exit () {
+ $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
+ exit 1
+}
+
+# Test basic stack-orchestrator deploy
+echo "Running stack-orchestrator external stack deploy test"
+# Set a non-default repo dir
+export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
+echo "Testing this package: $TEST_TARGET_SO"
+echo "Test version command"
+reported_version_string=$( $TEST_TARGET_SO version )
+echo "Version reported is: ${reported_version_string}"
+echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
+rm -rf $CERC_REPO_BASE_DIR
+mkdir -p $CERC_REPO_BASE_DIR
+# Clone the external test stack
+$TEST_TARGET_SO fetch-stack git.vdb.to/cerc-io/test-external-stack
+stack_name="$CERC_REPO_BASE_DIR/test-external-stack/stack-orchestrator/stacks/test-external-stack"
+TEST_TARGET_SO_STACK="$TEST_TARGET_SO --stack ${stack_name}"
+# Test bringing the test container up and down
+# with and without volume removal
+$TEST_TARGET_SO_STACK setup-repositories
+$TEST_TARGET_SO_STACK build-containers
+# Test deploy command execution
+$TEST_TARGET_SO_STACK deploy setup $CERC_REPO_BASE_DIR
+# Check that we now have the expected output directory
+container_output_dir=$CERC_REPO_BASE_DIR/container-output-dir
+if [ ! -d "$container_output_dir" ]; then
+ echo "deploy setup test: output directory not present"
+ echo "deploy setup test: FAILED"
+ exit 1
+fi
+if [ ! -f "$container_output_dir/output-file" ]; then
+ echo "deploy setup test: output file not present"
+ echo "deploy setup test: FAILED"
+ exit 1
+fi
+output_file_content=$(<$container_output_dir/output-file)
+if [ ! "$output_file_content" == "output-data" ]; then
+ echo "deploy setup test: output file contents not correct"
+ echo "deploy setup test: FAILED"
+ exit 1
+fi
+# Check that we now have the expected output file
+$TEST_TARGET_SO_STACK deploy up
+# Test deploy port command
+deploy_port_output=$( $TEST_TARGET_SO_STACK deploy port test 80 )
+if [[ "$deploy_port_output" =~ ^0.0.0.0:[1-9][0-9]* ]]; then
+ echo "Deploy port test: passed"
+else
+ echo "Deploy port test: FAILED"
+ exit 1
+fi
+$TEST_TARGET_SO_STACK deploy down
+# The next time we bring the container up the volume will be old (from the previous run above)
+$TEST_TARGET_SO_STACK deploy up
+log_output_1=$( $TEST_TARGET_SO_STACK deploy logs )
+if [[ "$log_output_1" == *"filesystem is old"* ]]; then
+ echo "Retain volumes test: passed"
+else
+ echo "Retain volumes test: FAILED"
+ exit 1
+fi
+$TEST_TARGET_SO_STACK deploy down --delete-volumes
+# Now when we bring the container up the volume will be new again
+$TEST_TARGET_SO_STACK deploy up
+log_output_2=$( $TEST_TARGET_SO_STACK deploy logs )
+if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then
+ echo "Delete volumes test: passed"
+else
+ echo "Delete volumes test: FAILED"
+ exit 1
+fi
+$TEST_TARGET_SO_STACK deploy down --delete-volumes
+# Basic test of creating a deployment
+test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
+test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
+$TEST_TARGET_SO_STACK deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED,CERC_TEST_PARAM_3=FAST
+# Check the file now exists
+if [ ! -f "$test_deployment_spec" ]; then
+ echo "deploy init test: spec file not present"
+ echo "deploy init test: FAILED"
+ exit 1
+fi
+echo "deploy init test: passed"
+$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
+# Check the deployment dir exists
+if [ ! -d "$test_deployment_dir" ]; then
+ echo "deploy create test: deployment directory not present"
+ echo "deploy create test: FAILED"
+ exit 1
+fi
+echo "deploy create test: passed"
+# Check the file writted by the create command in the stack now exists
+if [ ! -f "$test_deployment_dir/create-file" ]; then
+ echo "deploy create test: create output file not present"
+ echo "deploy create test: FAILED"
+ exit 1
+fi
+# And has the right content
+create_file_content=$(<$test_deployment_dir/create-file)
+if [ ! "$create_file_content" == "create-command-output-data" ]; then
+ echo "deploy create test: create output file contents not correct"
+ echo "deploy create test: FAILED"
+ exit 1
+fi
+
+# Add a config file to be picked up by the ConfigMap before starting.
+echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
+
+echo "deploy create output file test: passed"
+# Try to start the deployment
+$TEST_TARGET_SO deployment --dir $test_deployment_dir start
+# Check logs command works
+log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
+if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
+ echo "deployment logs test: passed"
+else
+ echo "deployment logs test: FAILED"
+ exit 1
+fi
+# Check the config variable CERC_TEST_PARAM_1 was passed correctly
+if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
+ echo "deployment config test: passed"
+else
+ echo "deployment config test: FAILED"
+ exit 1
+fi
+# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
+if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
+ echo "deployment compose config test: passed"
+else
+ echo "deployment compose config test: FAILED"
+ exit 1
+fi
+# Check the config variable CERC_TEST_PARAM_3 was passed correctly
+if [[ "$log_output_3" == *"Test-param-3: FAST"* ]]; then
+ echo "deployment config test: passed"
+else
+ echo "deployment config test: FAILED"
+ exit 1
+fi
+
+# Check that the ConfigMap is mounted and contains the expected content.
+log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
+if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
+ echo "deployment ConfigMap test: passed"
+else
+ echo "deployment ConfigMap test: FAILED"
+ delete_cluster_exit
+fi
+
+# Stop then start again and check the volume was preserved
+$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
+# Sleep a bit just in case
+# sleep for longer to check if that's why the subsequent create cluster fails
+sleep 20
+$TEST_TARGET_SO deployment --dir $test_deployment_dir start
+log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
+if [[ "$log_output_5" == *"filesystem is old"* ]]; then
+ echo "Retain volumes test: passed"
+else
+ echo "Retain volumes test: FAILED"
+ delete_cluster_exit
+fi
+
+# Stop and clean up
+$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
+echo "Test passed"
diff --git a/tests/fixturenet-laconicd/run-cli-test.sh b/tests/fixturenet-laconicd/run-cli-test.sh
index 8a5dcb42..28670390 100755
--- a/tests/fixturenet-laconicd/run-cli-test.sh
+++ b/tests/fixturenet-laconicd/run-cli-test.sh
@@ -22,16 +22,16 @@ echo "$(date +"%Y-%m-%d %T"): Stack started"
# Verify that the fixturenet is up and running
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps
+# Wait for the laconid endpoint to come up
+echo "Waiting for the RPC endpoint to come up"
+docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
+
# Get the fixturenet account address
laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}')
# Copy over config
docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/
-# Wait for the laconid endpoint to come up
-echo "Waiting for the RPC endpoint to come up"
-docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
-
# Run the tests
echo "Running the tests"
docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test'