Add stack for azimuth watchers with gateway-server #379
98
app/data/compose/docker-compose-watcher-azimuth.yml
Normal file
98
app/data/compose/docker-compose-watcher-azimuth.yml
Normal file
@ -0,0 +1,98 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
# Starts the PostgreSQL database for watchers
|
||||
watcher-db:
|
||||
restart: unless-stopped
|
||||
image: postgres:14-alpine
|
||||
environment:
|
||||
- POSTGRES_USER=vdbm
|
||||
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-watcher,delegated-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
|
||||
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
|
||||
- POSTGRES_PASSWORD=password
|
||||
volumes:
|
||||
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
||||
- watcher_db_data:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "0.0.0.0:15432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-v", "localhost", "5432"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 10s
|
||||
|
||||
# Starts the azimuth-watcher server
|
||||
azimuth-watcher-server:
|
||||
image: cerc/watcher-azimuth:local
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
watcher-db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
working_dir: /app/packages/azimuth-watcher
|
||||
command: "yarn server"
|
||||
volumes:
|
||||
- ../config/watcher-azimuth/watcher-configs/azimuth-watcher.toml:/app/packages/azimuth-watcher/environments/local.toml
|
||||
# ports:
|
||||
# - "0.0.0.0:3001:3001"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-vz", "localhost", "3001"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 5s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
# Starts the censures-watcher server
|
||||
censures-watcher-server:
|
||||
image: cerc/watcher-azimuth:local
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
watcher-db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
working_dir: /app/packages/censures-watcher
|
||||
command: "yarn server"
|
||||
volumes:
|
||||
- ../config/watcher-azimuth/watcher-configs/censures-watcher.toml:/app/packages/censures-watcher/environments/local.toml
|
||||
# ports:
|
||||
# - "0.0.0.0:3001:3001"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-vz", "localhost", "3002"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 5s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
# Starts the gateway-server for proxying queries
|
||||
gateway-server:
|
||||
image: cerc/watcher-azimuth:local
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
azimuth-watcher-server:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
working_dir: /app/packages/gateway-server
|
||||
command: "yarn server"
|
||||
volumes:
|
||||
- ../config/watcher-azimuth/gateway-watchers.json:/app/packages/gateway-server/dist/watchers.json
|
||||
ports:
|
||||
- "0.0.0.0:4000:4000"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-vz", "localhost", "4000"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 5s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
volumes:
|
||||
watcher_db_data:
|
10
app/data/config/watcher-azimuth/gateway-watchers.json
Normal file
10
app/data/config/watcher-azimuth/gateway-watchers.json
Normal file
@ -0,0 +1,10 @@
|
||||
[
|
||||
{
|
||||
"endpoint": "http://azimuth-watcher-server:3001/graphql",
|
||||
"prefix": "azimuth"
|
||||
},
|
||||
{
|
||||
"endpoint": "http://censures-watcher-server:3002/graphql",
|
||||
"prefix": "censures"
|
||||
}
|
||||
]
|
@ -0,0 +1,66 @@
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 3001
|
||||
kind = "lazy"
|
||||
|
||||
# Checkpointing state.
|
||||
checkpointing = true
|
||||
|
||||
# Checkpoint interval in number of blocks.
|
||||
checkpointInterval = 2000
|
||||
|
||||
# Enable state creation
|
||||
# CAUTION: Disable only if state creation is not desired or can be filled subsequently
|
||||
enableState = true
|
||||
|
||||
# Boolean to filter logs by contract.
|
||||
filterLogs = false
|
||||
|
||||
# Max block range for which to return events in eventsInRange GQL query.
|
||||
# Use -1 for skipping check on block range.
|
||||
maxEventsBlockRange = 1000
|
||||
|
||||
# GQL cache settings
|
||||
[server.gqlCache]
|
||||
enabled = true
|
||||
|
||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||
# maxCacheSize
|
||||
|
||||
# GQL cache-control max-age settings (in seconds)
|
||||
maxAge = 15
|
||||
|
||||
[metrics]
|
||||
host = "127.0.0.1"
|
||||
port = 9000
|
||||
[metrics.gql]
|
||||
port = 9001
|
||||
|
||||
[database]
|
||||
type = "postgres"
|
||||
host = "watcher-db"
|
||||
port = 5432
|
||||
database = "azimuth-watcher"
|
||||
username = "vdbm"
|
||||
password = "password"
|
||||
synchronize = true
|
||||
logging = false
|
||||
|
||||
[upstream]
|
||||
[upstream.ethServer]
|
||||
gqlApiEndpoint = "http://host.docker.internal:8083/graphql"
|
||||
rpcProviderEndpoint = "http://host.docker.internal:8082"
|
||||
|
||||
[upstream.cache]
|
||||
name = "requests"
|
||||
enabled = false
|
||||
deleteOnStart = false
|
||||
|
||||
[jobQueue]
|
||||
dbConnectionString = "postgres://vdbm:password@watcher-db/azimuth-watcher-job-queue"
|
||||
maxCompletionLagInSecs = 300
|
||||
jobDelayInMilliSecs = 100
|
||||
eventsInBatch = 50
|
||||
blockDelayInMilliSecs = 2000
|
||||
prefetchBlocksInMem = true
|
||||
prefetchBlockCount = 10
|
@ -0,0 +1,66 @@
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 3002
|
||||
kind = "lazy"
|
||||
|
||||
# Checkpointing state.
|
||||
checkpointing = true
|
||||
|
||||
# Checkpoint interval in number of blocks.
|
||||
checkpointInterval = 2000
|
||||
|
||||
# Enable state creation
|
||||
# CAUTION: Disable only if state creation is not desired or can be filled subsequently
|
||||
enableState = true
|
||||
|
||||
# Boolean to filter logs by contract.
|
||||
filterLogs = false
|
||||
|
||||
# Max block range for which to return events in eventsInRange GQL query.
|
||||
# Use -1 for skipping check on block range.
|
||||
maxEventsBlockRange = 1000
|
||||
|
||||
# GQL cache settings
|
||||
[server.gqlCache]
|
||||
enabled = true
|
||||
|
||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||
# maxCacheSize
|
||||
|
||||
# GQL cache-control max-age settings (in seconds)
|
||||
maxAge = 15
|
||||
|
||||
[metrics]
|
||||
host = "127.0.0.1"
|
||||
port = 9000
|
||||
[metrics.gql]
|
||||
port = 9001
|
||||
|
||||
[database]
|
||||
type = "postgres"
|
||||
host = "watcher-db"
|
||||
port = 5432
|
||||
database = "censures-watcher"
|
||||
username = "vdbm"
|
||||
password = "password"
|
||||
synchronize = true
|
||||
logging = false
|
||||
|
||||
[upstream]
|
||||
[upstream.ethServer]
|
||||
gqlApiEndpoint = "http://host.docker.internal:8083/graphql"
|
||||
rpcProviderEndpoint = "http://host.docker.internal:8082"
|
||||
|
||||
[upstream.cache]
|
||||
name = "requests"
|
||||
enabled = false
|
||||
deleteOnStart = false
|
||||
|
||||
[jobQueue]
|
||||
dbConnectionString = "postgres://vdbm:password@watcher-db/censures-watcher-job-queue"
|
||||
maxCompletionLagInSecs = 300
|
||||
jobDelayInMilliSecs = 100
|
||||
eventsInBatch = 50
|
||||
blockDelayInMilliSecs = 2000
|
||||
prefetchBlocksInMem = true
|
||||
prefetchBlockCount = 10
|
10
app/data/container-build/cerc-watcher-azimuth/Dockerfile
Normal file
10
app/data/container-build/cerc-watcher-azimuth/Dockerfile
Normal file
@ -0,0 +1,10 @@
|
||||
FROM node:18.16.0-alpine3.16
|
||||
|
||||
RUN apk --update --no-cache add git python3 alpine-sdk
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo "Building azimuth-watcher-ts" && \
|
||||
yarn && yarn build
|
9
app/data/container-build/cerc-watcher-azimuth/build.sh
Executable file
9
app/data/container-build/cerc-watcher-azimuth/build.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/watcher-azimuth
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/watcher-azimuth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/azimuth-watcher-ts
|
@ -37,3 +37,4 @@ cerc/optimism-op-batcher
|
||||
cerc/optimism-op-node
|
||||
cerc/optimism-op-proposer
|
||||
cerc/pocket
|
||||
cerc/watcher-azimuth
|
||||
|
@ -25,3 +25,4 @@ kubo
|
||||
foundry
|
||||
fixturenet-optimism
|
||||
fixturenet-pocket
|
||||
watcher-azimuth
|
||||
|
@ -29,3 +29,4 @@ ethereum-optimism/op-geth
|
||||
ethereum-optimism/optimism
|
||||
pokt-network/pocket-core
|
||||
pokt-network/pocket-core-deployments
|
||||
cerc-io/azimuth-watcher-ts
|
||||
|
58
app/data/stacks/azimuth/README.md
Normal file
58
app/data/stacks/azimuth/README.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Azimuth Watcher
|
||||
|
||||
Instructions to setup and deploy Azimuth Watcher stack
|
||||
|
||||
## Setup
|
||||
|
||||
Clone required repositories:
|
||||
|
||||
```bash
|
||||
laconic-so --stack azimuth setup-repositories
|
||||
```
|
||||
|
||||
NOTE: If the repository already exists and checked out to different versions, `setup-repositories` command will throw an error.
|
||||
For getting around this, the `azimuth-watcher-ts` repository can be removed and then run the command.
|
||||
|
||||
Checkout to the required versions and branches in repos
|
||||
|
||||
```bash
|
||||
# azimuth-watcher-ts
|
||||
cd ~/cerc/azimuth-watcher-ts
|
||||
# git checkout v0.1.0
|
||||
```
|
||||
|
||||
Build the container images:
|
||||
|
||||
```bash
|
||||
laconic-so --stack azimuth build-containers
|
||||
```
|
||||
|
||||
This should create the required docker images in the local image registry.
|
||||
|
||||
Deploy the stack:
|
||||
|
||||
* Deploy the containers:
|
||||
|
||||
```bash
|
||||
laconic-so --stack azimuth deploy-system up
|
||||
```
|
||||
|
||||
* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy`
|
||||
|
||||
## Clean up
|
||||
|
||||
Stop all the services running in background run:
|
||||
|
||||
```bash
|
||||
laconic-so --stack azimuth deploy-system down 30
|
||||
```
|
||||
|
||||
Clear volumes created by this stack:
|
||||
|
||||
```bash
|
||||
# List all relevant volumes
|
||||
docker volume ls -q --filter "name=.*watcher_db_data"
|
||||
|
||||
# Remove all the listed volumes
|
||||
docker volume rm $(docker volume ls -q --filter "name=.*watcher_db_data")
|
||||
```
|
8
app/data/stacks/azimuth/stack.yml
Normal file
8
app/data/stacks/azimuth/stack.yml
Normal file
@ -0,0 +1,8 @@
|
||||
version: "1.0"
|
||||
name: azimuth
|
||||
repos:
|
||||
- cerc-io/azimuth-watcher-ts
|
||||
containers:
|
||||
- cerc/watcher-azimuth
|
||||
pods:
|
||||
- watcher-azimuth
|
Loading…
Reference in New Issue
Block a user