Merge branch 'main' into VPhung24/bun-for-nextjs-and-webapps
All checks were successful
Lint Checks / Run linter (pull_request) Successful in 36s
Webapp Test / Run webapp test suite (pull_request) Successful in 4m31s
Smoke Test / Run basic test suite (pull_request) Successful in 4m23s
Deploy Test / Run deploy test suite (pull_request) Successful in 5m3s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Successful in 7m41s
All checks were successful
Lint Checks / Run linter (pull_request) Successful in 36s
Webapp Test / Run webapp test suite (pull_request) Successful in 4m31s
Smoke Test / Run basic test suite (pull_request) Successful in 4m23s
Deploy Test / Run deploy test suite (pull_request) Successful in 5m3s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Successful in 7m41s
This commit is contained in:
commit
17277730fb
@ -9,10 +9,6 @@ on:
|
|||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '2 14 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
|
@ -9,10 +9,6 @@ on:
|
|||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '2 14 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -41,10 +37,6 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
|
||||||
run: |
|
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
|
||||||
sleep 5
|
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
@ -7,10 +7,6 @@ on:
|
|||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -39,10 +35,6 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
|
||||||
run: |
|
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
|
||||||
sleep 5
|
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth/run-test.sh
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
@ -10,9 +10,6 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -41,10 +38,6 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
|
||||||
run: |
|
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
|
||||||
sleep 5
|
|
||||||
- name: "Run deploy tests"
|
- name: "Run deploy tests"
|
||||||
run: ./tests/deploy/run-deploy-test.sh
|
run: ./tests/deploy/run-deploy-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
58
.gitea/workflows/test-external-stack.yml
Normal file
58
.gitea/workflows/test-external-stack.yml
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
name: External Stack Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/test-external-stack'
|
||||||
|
- '.gitea/workflows/test-external-stack.yml'
|
||||||
|
- 'tests/external-stack/run-test.sh'
|
||||||
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
|
- cron: '8 19 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run external stack test suite"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Run external stack tests"
|
||||||
|
run: ./tests/external-stack/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
@ -10,10 +10,6 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run webapp test suite"
|
name: "Run webapp test suite"
|
||||||
@ -43,10 +39,6 @@ jobs:
|
|||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
||||||
run: apt update && apt install -y wget
|
run: apt update && apt install -y wget
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
|
||||||
run: |
|
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
|
||||||
sleep 5
|
|
||||||
- name: "Run webapp tests"
|
- name: "Run webapp tests"
|
||||||
run: ./tests/webapp-test/run-webapp-test.sh
|
run: ./tests/webapp-test/run-webapp-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
@ -10,9 +10,6 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: unix:///var/run/dind.sock
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -41,10 +38,6 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
|
||||||
run: |
|
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
|
||||||
sleep 5
|
|
||||||
- name: "Run smoke tests"
|
- name: "Run smoke tests"
|
||||||
run: ./tests/smoke-test/run-smoke-test.sh
|
run: ./tests/smoke-test/run-smoke-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
@ -4,3 +4,4 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
|
Trigger
|
||||||
|
2
.gitea/workflows/triggers/test-external-stack
Normal file
2
.gitea/workflows/triggers/test-external-stack
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the external-stack CI job
|
||||||
|
trigger
|
4
setup.py
4
setup.py
@ -4,9 +4,11 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
requirements = fh.read()
|
requirements = fh.read()
|
||||||
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name='laconic-stack-orchestrator',
|
||||||
version='1.0.12',
|
version=version,
|
||||||
author='Cerc',
|
author='Cerc',
|
||||||
author_email='info@cerc.io',
|
author_email='info@cerc.io',
|
||||||
license='GNU Affero General Public License',
|
license='GNU Affero General Public License',
|
||||||
|
@ -71,7 +71,7 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).joinpath("container-build")
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
|
@ -21,11 +21,6 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
|
||||||
from stack_orchestrator import data
|
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
all_containers = container_list_file.read().splitlines()
|
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
@ -33,11 +28,14 @@ def get_containers_in_scope(stack: str):
|
|||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config['containers']
|
||||||
else:
|
else:
|
||||||
containers_in_scope = all_containers
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Containers: {containers_in_scope}')
|
print(f'Containers: {containers_in_scope}')
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
return containers_in_scope
|
return containers_in_scope
|
||||||
|
@ -6,12 +6,20 @@ services:
|
|||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
||||||
|
CERC_GRAFANA_ALERTS_SUBGRAPH_IDS: ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
||||||
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
||||||
|
- ../config/monitoring/update-grafana-alerts-config.sh:/update-grafana-alerts-config.sh
|
||||||
- grafana_storage:/var/lib/grafana
|
- grafana_storage:/var/lib/grafana
|
||||||
|
user: root
|
||||||
|
entrypoint: ["bash", "-c"]
|
||||||
|
command: |
|
||||||
|
"/update-grafana-alerts-config.sh && /run.sh"
|
||||||
ports:
|
ports:
|
||||||
- "3000"
|
- "3000"
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
|
@ -16,8 +16,13 @@ services:
|
|||||||
postgres_pass: password
|
postgres_pass: password
|
||||||
postgres_db: graph-node
|
postgres_db: graph-node
|
||||||
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
||||||
|
# Env varaibles reference: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
||||||
GRAPH_LOG: debug
|
GRAPH_LOG: debug
|
||||||
ETHEREUM_REORG_THRESHOLD: 3
|
ETHEREUM_REORG_THRESHOLD: 3
|
||||||
|
GRAPH_ETHEREUM_JSON_RPC_TIMEOUT: ${GRAPH_ETHEREUM_JSON_RPC_TIMEOUT:-180}
|
||||||
|
GRAPH_ETHEREUM_REQUEST_RETRIES: ${GRAPH_ETHEREUM_REQUEST_RETRIES:-10}
|
||||||
|
GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE: ${GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE:-2000}
|
||||||
|
GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS: ${GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS:-1000}
|
||||||
entrypoint: ["bash", "-c"]
|
entrypoint: ["bash", "-c"]
|
||||||
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
||||||
command: |
|
command: |
|
||||||
@ -27,6 +32,7 @@ services:
|
|||||||
- "8001"
|
- "8001"
|
||||||
- "8020"
|
- "8020"
|
||||||
- "8030"
|
- "8030"
|
||||||
|
- "8040"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
|
@ -28,15 +28,37 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
chain-head-exporter:
|
ethereum-chain-head-exporter:
|
||||||
image: cerc/watcher-ts:local
|
image: cerc/watcher-ts:local
|
||||||
restart: always
|
restart: always
|
||||||
working_dir: /app/packages/cli
|
working_dir: /app/packages/cli
|
||||||
environment:
|
environment:
|
||||||
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT:-https://mainnet.infura.io/v3}
|
||||||
FIL_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT}
|
|
||||||
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
||||||
PORT: ${CERC_METRICS_PORT}
|
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
||||||
|
ports:
|
||||||
|
- '5000'
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
filecoin-chain-head-exporter:
|
||||||
|
image: cerc/watcher-ts:local
|
||||||
|
restart: always
|
||||||
|
working_dir: /app/packages/cli
|
||||||
|
environment:
|
||||||
|
ETH_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT:-https://api.node.glif.io/rpc/v1}
|
||||||
|
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
||||||
|
ports:
|
||||||
|
- '5000'
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
graph-node-upstream-head-exporter:
|
||||||
|
image: cerc/watcher-ts:local
|
||||||
|
restart: always
|
||||||
|
working_dir: /app/packages/cli
|
||||||
|
environment:
|
||||||
|
ETH_RPC_ENDPOINT: ${GRAPH_NODE_RPC_ENDPOINT}
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
||||||
ports:
|
ports:
|
||||||
- '5000'
|
- '5000'
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
services:
|
|
||||||
snowballtools-base-backend:
|
|
||||||
image: cerc/snowballtools-base-backend:local
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- data:/data
|
|
||||||
- config:/config:ro
|
|
||||||
ports:
|
|
||||||
- 8000
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
data:
|
|
||||||
config:
|
|
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-ajna:local
|
image: cerc/watcher-ajna:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,16 +55,17 @@ services:
|
|||||||
image: cerc/watcher-ajna:local
|
image: cerc/watcher-ajna:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
||||||
|
- ajna_watcher_gql_logs_data:/app/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3008"
|
- "3008"
|
||||||
- "9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -74,3 +75,4 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
ajna_watcher_db_data:
|
ajna_watcher_db_data:
|
||||||
|
ajna_watcher_gql_logs_data:
|
||||||
|
@ -32,8 +32,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CERC_HISTORICAL_BLOCK_RANGE: 500
|
CERC_HISTORICAL_BLOCK_RANGE: 500
|
||||||
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
||||||
CONTRACT_NAME: Azimuth
|
CONTRACT_NAME: Azimuth
|
||||||
@ -47,7 +47,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9000"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -66,18 +66,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/azimuth-watcher
|
working_dir: /app/packages/azimuth-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
||||||
|
- azimuth_watcher_gql_logs_data:/app/packages/azimuth-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3001"
|
- "3001"
|
||||||
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3001"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3001"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -94,8 +96,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
||||||
CONTRACT_NAME: Censures
|
CONTRACT_NAME: Censures
|
||||||
STARTING_BLOCK: 6784954
|
STARTING_BLOCK: 6784954
|
||||||
@ -108,7 +110,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9002"
|
- "9002"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9002"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9002"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -127,18 +129,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/censures-watcher
|
working_dir: /app/packages/censures-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
||||||
|
- censures_watcher_gql_logs_data:/app/packages/censures-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3002"
|
- "3002"
|
||||||
|
- "9003"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3002"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3002"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -155,8 +159,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
||||||
CONTRACT_NAME: Claims
|
CONTRACT_NAME: Claims
|
||||||
STARTING_BLOCK: 6784941
|
STARTING_BLOCK: 6784941
|
||||||
@ -169,7 +173,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9004"
|
- "9004"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9004"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9004"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -188,18 +192,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/claims-watcher
|
working_dir: /app/packages/claims-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
||||||
|
- claims_watcher_gql_logs_data:/app/packages/claims-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3003"
|
- "3003"
|
||||||
|
- "9005"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3003"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3003"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -216,8 +222,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
||||||
CONTRACT_NAME: ConditionalStarRelease
|
CONTRACT_NAME: ConditionalStarRelease
|
||||||
STARTING_BLOCK: 6828004
|
STARTING_BLOCK: 6828004
|
||||||
@ -230,7 +236,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9006"
|
- "9006"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9006"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9006"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -249,18 +255,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/conditional-star-release-watcher
|
working_dir: /app/packages/conditional-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
||||||
|
- conditional_star_release_watcher_gql_logs_data:/app/packages/conditional-star-release-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3004"
|
- "3004"
|
||||||
|
- "9007"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3004"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3004"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -277,8 +285,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
||||||
CONTRACT_NAME: DelegatedSending
|
CONTRACT_NAME: DelegatedSending
|
||||||
STARTING_BLOCK: 6784956
|
STARTING_BLOCK: 6784956
|
||||||
@ -291,7 +299,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9008"
|
- "9008"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9008"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -310,18 +318,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/delegated-sending-watcher
|
working_dir: /app/packages/delegated-sending-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
||||||
|
- delegated_sending_watcher_gql_logs_data:/app/packages/delegated-sending-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3005"
|
- "3005"
|
||||||
|
- "9009"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3005"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3005"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -338,8 +348,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
||||||
CONTRACT_NAME: Ecliptic
|
CONTRACT_NAME: Ecliptic
|
||||||
STARTING_BLOCK: 13692129
|
STARTING_BLOCK: 13692129
|
||||||
@ -352,7 +362,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9010"
|
- "9010"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9010"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9010"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -371,18 +381,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/ecliptic-watcher
|
working_dir: /app/packages/ecliptic-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
||||||
|
- ecliptic_watcher_gql_logs_data:/app/packages/ecliptic-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3006"
|
- "3006"
|
||||||
|
- "9011"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3006"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3006"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -399,8 +411,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
||||||
CONTRACT_NAME: LinearStarRelease
|
CONTRACT_NAME: LinearStarRelease
|
||||||
STARTING_BLOCK: 6784943
|
STARTING_BLOCK: 6784943
|
||||||
@ -413,7 +425,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9012"
|
- "9012"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9012"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9012"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -432,18 +444,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/linear-star-release-watcher
|
working_dir: /app/packages/linear-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
||||||
|
- linear_star_release_watcher_gql_logs_data:/app/packages/linear-star-release-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3007"
|
- "3007"
|
||||||
|
- "9013"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3007"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3007"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -460,8 +474,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
||||||
CONTRACT_NAME: Polls
|
CONTRACT_NAME: Polls
|
||||||
STARTING_BLOCK: 6784912
|
STARTING_BLOCK: 6784912
|
||||||
@ -474,7 +488,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9014"
|
- "9014"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9014"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9014"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -493,18 +507,20 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
||||||
working_dir: /app/packages/polls-watcher
|
working_dir: /app/packages/polls-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
||||||
|
- polls_watcher_gql_logs_data:/app/packages/polls-watcher/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "3008"
|
- "3008"
|
||||||
|
- "9015"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3008"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -542,7 +558,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:4000:4000"
|
- "0.0.0.0:4000:4000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "4000"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "4000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -552,3 +568,11 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
watcher_db_data:
|
watcher_db_data:
|
||||||
|
azimuth_watcher_gql_logs_data:
|
||||||
|
censures_watcher_gql_logs_data:
|
||||||
|
claims_watcher_gql_logs_data:
|
||||||
|
conditional_star_release_watcher_gql_logs_data:
|
||||||
|
delegated_sending_watcher_gql_logs_data:
|
||||||
|
ecliptic_watcher_gql_logs_data:
|
||||||
|
linear_star_release_watcher_gql_logs_data:
|
||||||
|
polls_watcher_gql_logs_data:
|
||||||
|
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9002:9000"
|
- "9002:9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,16 +55,17 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
|
- merkl_sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3007:3008"
|
- "127.0.0.1:3007:3008"
|
||||||
- "9003:9001"
|
- "9003:9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -74,3 +75,4 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
merkl_sushiswap_v3_watcher_db_data:
|
merkl_sushiswap_v3_watcher_db_data:
|
||||||
|
merkl_sushiswap_v3_watcher_gql_logs_data:
|
||||||
|
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000:9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,16 +55,17 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
|
- sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3008:3008"
|
- "127.0.0.1:3008:3008"
|
||||||
- "9001:9001"
|
- "9001:9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -74,3 +75,4 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
sushiswap_v3_watcher_db_data:
|
sushiswap_v3_watcher_db_data:
|
||||||
|
sushiswap_v3_watcher_gql_logs_data:
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
- name: Graph Node Postgres
|
||||||
|
type: postgres
|
||||||
|
jsonData:
|
||||||
|
database: graph-node
|
||||||
|
sslmode: 'disable'
|
||||||
|
maxOpenConns: 100
|
||||||
|
maxIdleConns: 100
|
||||||
|
maxIdleConnsAuto: true
|
||||||
|
connMaxLifetime: 14400
|
||||||
|
postgresVersion: 1411 # 903=9.3, 1000=10, 1411=14.11
|
||||||
|
timescaledb: false
|
||||||
|
user: graph-node
|
||||||
|
# # Add URL for graph-node database
|
||||||
|
# url: graph-node-db:5432
|
||||||
|
# # Set password for graph-node database
|
||||||
|
# secureJsonData:
|
||||||
|
# password: 'password'
|
@ -45,7 +45,18 @@ scrape_configs:
|
|||||||
metrics_path: /metrics
|
metrics_path: /metrics
|
||||||
scheme: http
|
scheme: http
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['chain-head-exporter:5000']
|
- targets: ['ethereum-chain-head-exporter:5000']
|
||||||
|
labels:
|
||||||
|
instance: 'external'
|
||||||
|
chain: 'ethereum'
|
||||||
|
- targets: ['filecoin-chain-head-exporter:5000']
|
||||||
|
labels:
|
||||||
|
instance: 'external'
|
||||||
|
chain: 'filecoin'
|
||||||
|
- targets: ['graph-node-upstream-head-exporter:5000']
|
||||||
|
labels:
|
||||||
|
instance: 'graph-node'
|
||||||
|
chain: 'filecoin'
|
||||||
|
|
||||||
- job_name: 'postgres'
|
- job_name: 'postgres'
|
||||||
scrape_interval: 30s
|
scrape_interval: 30s
|
||||||
@ -74,3 +85,11 @@ scrape_configs:
|
|||||||
# - targets: ['example-host:1317']
|
# - targets: ['example-host:1317']
|
||||||
params:
|
params:
|
||||||
format: ['prometheus']
|
format: ['prometheus']
|
||||||
|
|
||||||
|
- job_name: graph-node
|
||||||
|
metrics_path: /metrics
|
||||||
|
scrape_interval: 30s
|
||||||
|
scheme: http
|
||||||
|
static_configs:
|
||||||
|
# Add graph-node targets to be monitored below
|
||||||
|
# - targets: ['graph-node:8040']
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
groups:
|
||||||
|
- orgId: 1
|
||||||
|
name: subgraph
|
||||||
|
folder: SubgraphAlerts
|
||||||
|
interval: 30s
|
||||||
|
rules:
|
||||||
|
- uid: b2a9144b-6104-46fc-92b5-352f4e643c4c
|
||||||
|
title: subgraph_head_tracking
|
||||||
|
condition: condition
|
||||||
|
data:
|
||||||
|
- refId: diff
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: PBFA97CFB590B2093
|
||||||
|
editorMode: code
|
||||||
|
expr: ethereum_chain_head_number - on(network) group_right deployment_head{deployment=~"REPLACE_WITH_SUBGRAPH_IDS"}
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: diff
|
||||||
|
- refId: condition
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: __expr__
|
||||||
|
model:
|
||||||
|
conditions:
|
||||||
|
- evaluator:
|
||||||
|
params:
|
||||||
|
- 15
|
||||||
|
- 0
|
||||||
|
type: gt
|
||||||
|
operator:
|
||||||
|
type: and
|
||||||
|
query:
|
||||||
|
params: []
|
||||||
|
reducer:
|
||||||
|
params: []
|
||||||
|
type: avg
|
||||||
|
type: query
|
||||||
|
datasource:
|
||||||
|
name: Expression
|
||||||
|
type: __expr__
|
||||||
|
uid: __expr__
|
||||||
|
expression: diff
|
||||||
|
intervalMs: 1000
|
||||||
|
maxDataPoints: 43200
|
||||||
|
refId: condition
|
||||||
|
type: threshold
|
||||||
|
noDataState: OK
|
||||||
|
execErrState: Alerting
|
||||||
|
for: 5m
|
||||||
|
annotations:
|
||||||
|
summary: Subgraph deployment {{ index $labels "deployment" }} is falling behind head by {{ index $values "diff" }}
|
||||||
|
labels: {}
|
||||||
|
isPaused: false
|
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo Using CERC_GRAFANA_ALERTS_SUBGRAPH_IDS ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
|
||||||
|
|
||||||
|
# Replace subgraph ids in subgraph alerting config
|
||||||
|
# Note: Requires the grafana container to be run with user root
|
||||||
|
if [ -n "$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS" ]; then
|
||||||
|
sed -i "s/REPLACE_WITH_SUBGRAPH_IDS/$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS/g" /etc/grafana/provisioning/alerting/subgraph-alert-rules.yml
|
||||||
|
fi
|
@ -24,7 +24,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -100,7 +100,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -176,7 +176,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -252,7 +252,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -328,7 +328,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -404,7 +404,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -480,7 +480,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -556,7 +556,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -634,7 +634,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -710,7 +710,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -788,7 +788,85 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
|
||||||
|
fullMetaSearch: false
|
||||||
|
includeNullMetadata: true
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: diff
|
||||||
|
useBackend: false
|
||||||
|
- refId: latest_external
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: PBFA97CFB590B2093
|
||||||
|
editorMode: code
|
||||||
|
expr: latest_block_number{chain="filecoin"}
|
||||||
|
hide: false
|
||||||
|
instant: true
|
||||||
|
legendFormat: __auto
|
||||||
|
range: false
|
||||||
|
refId: latest_external
|
||||||
|
- refId: condition
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: __expr__
|
||||||
|
model:
|
||||||
|
conditions:
|
||||||
|
- evaluator:
|
||||||
|
params:
|
||||||
|
- 0
|
||||||
|
- 0
|
||||||
|
type: gt
|
||||||
|
operator:
|
||||||
|
type: and
|
||||||
|
query:
|
||||||
|
params: []
|
||||||
|
reducer:
|
||||||
|
params: []
|
||||||
|
type: avg
|
||||||
|
type: query
|
||||||
|
datasource:
|
||||||
|
name: Expression
|
||||||
|
type: __expr__
|
||||||
|
uid: __expr__
|
||||||
|
expression: ${diff} >= 16
|
||||||
|
intervalMs: 1000
|
||||||
|
maxDataPoints: 43200
|
||||||
|
refId: condition
|
||||||
|
type: math
|
||||||
|
noDataState: Alerting
|
||||||
|
execErrState: Alerting
|
||||||
|
for: 15m
|
||||||
|
annotations:
|
||||||
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
|
isPaused: false
|
||||||
|
|
||||||
|
# Secured Finance
|
||||||
|
- uid: secured_finance_diff_external
|
||||||
|
title: secured_finance_watcher_head_tracking
|
||||||
|
condition: condition
|
||||||
|
data:
|
||||||
|
- refId: diff
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
datasource:
|
||||||
|
type: prometheus
|
||||||
|
uid: PBFA97CFB590B2093
|
||||||
|
disableTextWrap: false
|
||||||
|
editorMode: code
|
||||||
|
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="secured-finance", instance="secured-finance", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
gqlPath = "/"
|
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -22,23 +21,30 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
|
||||||
# Use -1 for skipping check on block range.
|
|
||||||
maxEventsBlockRange = 1000
|
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
rpcSupportsBlockHashParam = false
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# Server GQL config
|
||||||
[server.gqlCache]
|
[server.gql]
|
||||||
enabled = true
|
path = "/"
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# maxCacheSize
|
# Use -1 for skipping check on block range.
|
||||||
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# GQL cache-control max-age settings (in seconds)
|
# Log directory for GQL requests
|
||||||
maxAge = 15
|
logDir = "./gql-logs"
|
||||||
timeTravelMaxAge = 86400 # 1 day
|
|
||||||
|
# GQL cache settings
|
||||||
|
[server.gql.cache]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
|
# maxCacheSize
|
||||||
|
|
||||||
|
# GQL cache-control max-age settings (in seconds)
|
||||||
|
maxAge = 15
|
||||||
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
@ -58,7 +64,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -85,6 +91,9 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
|
# Number of blocks by which block processing lags behind head
|
||||||
|
blockProcessingOffset = 0
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -96,3 +105,6 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
|
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
||||||
|
maxNewBlockRetries = 3
|
||||||
|
@ -4,16 +4,19 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
# Replace env variables in template TOML file
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
|
@ -4,16 +4,19 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
# Replace env variables in template TOML file
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
[server]
|
[server]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
maxSimultaneousRequests = -1
|
[server.gql]
|
||||||
|
maxSimultaneousRequests = -1
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
@ -13,8 +14,8 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
|
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT"
|
||||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
|
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
||||||
|
|
||||||
[jobQueue]
|
[jobQueue]
|
||||||
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
|
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
gqlPath = '/'
|
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -22,23 +21,30 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
|
||||||
# Use -1 for skipping check on block range.
|
|
||||||
maxEventsBlockRange = 1000
|
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
rpcSupportsBlockHashParam = false
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# Server GQL config
|
||||||
[server.gqlCache]
|
[server.gql]
|
||||||
enabled = true
|
path = "/"
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# maxCacheSize
|
# Use -1 for skipping check on block range.
|
||||||
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# GQL cache-control max-age settings (in seconds)
|
# Log directory for GQL requests
|
||||||
maxAge = 15
|
logDir = "./gql-logs"
|
||||||
timeTravelMaxAge = 86400 # 1 day
|
|
||||||
|
# GQL cache settings
|
||||||
|
[server.gql.cache]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
|
# maxCacheSize
|
||||||
|
|
||||||
|
# GQL cache-control max-age settings (in seconds)
|
||||||
|
maxAge = 15
|
||||||
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
@ -58,7 +64,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -69,7 +75,7 @@
|
|||||||
# Boolean flag to filter event logs by contracts
|
# Boolean flag to filter event logs by contracts
|
||||||
filterLogsByAddresses = true
|
filterLogsByAddresses = true
|
||||||
# Boolean flag to filter event logs by topics
|
# Boolean flag to filter event logs by topics
|
||||||
filterLogsByTopics = false
|
filterLogsByTopics = true
|
||||||
|
|
||||||
[upstream.cache]
|
[upstream.cache]
|
||||||
name = "requests"
|
name = "requests"
|
||||||
@ -85,6 +91,9 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
|
# Number of blocks by which block processing lags behind head
|
||||||
|
blockProcessingOffset = 0
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -96,3 +105,6 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
|
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
||||||
|
maxNewBlockRetries = 3
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -6,12 +6,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
|
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
||||||
|
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
gqlPath = "/"
|
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -22,23 +21,30 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
|
||||||
# Use -1 for skipping check on block range.
|
|
||||||
maxEventsBlockRange = 1000
|
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
rpcSupportsBlockHashParam = false
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# Server GQL config
|
||||||
[server.gqlCache]
|
[server.gql]
|
||||||
enabled = true
|
path = "/"
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# maxCacheSize
|
# Use -1 for skipping check on block range.
|
||||||
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# GQL cache-control max-age settings (in seconds)
|
# Log directory for GQL requests
|
||||||
maxAge = 15
|
logDir = "./gql-logs"
|
||||||
timeTravelMaxAge = 86400 # 1 day
|
|
||||||
|
# GQL cache settings
|
||||||
|
[server.gql.cache]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
|
# maxCacheSize
|
||||||
|
|
||||||
|
# GQL cache-control max-age settings (in seconds)
|
||||||
|
maxAge = 15
|
||||||
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
@ -58,7 +64,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -85,6 +91,9 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
|
# Number of blocks by which block processing lags behind head
|
||||||
|
blockProcessingOffset = 0
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -96,3 +105,6 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
|
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
||||||
|
maxNewBlockRetries = 3
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
FROM cerc/snowballtools-base-backend-base:local
|
|
||||||
|
|
||||||
WORKDIR /app/packages/backend
|
|
||||||
COPY run.sh .
|
|
||||||
|
|
||||||
ENTRYPOINT ["./run.sh"]
|
|
@ -1,26 +0,0 @@
|
|||||||
FROM ubuntu:22.04 as builder
|
|
||||||
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y --no-install-recommends --no-install-suggests \
|
|
||||||
ca-certificates curl gnupg
|
|
||||||
|
|
||||||
# Node
|
|
||||||
ARG NODE_MAJOR=20
|
|
||||||
RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
|
|
||||||
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \
|
|
||||||
apt update && apt install -y nodejs
|
|
||||||
|
|
||||||
# npm setup
|
|
||||||
RUN npm config set @cerc-io:registry https://git.vdb.to/api/packages/cerc-io/npm/ && npm install -g yarn
|
|
||||||
|
|
||||||
COPY . /app/
|
|
||||||
WORKDIR /app/
|
|
||||||
|
|
||||||
RUN find . -name 'node_modules' | xargs -n1 rm -rf
|
|
||||||
RUN yarn && yarn build --ignore frontend
|
|
||||||
|
|
||||||
FROM cerc/webapp-base:local
|
|
||||||
|
|
||||||
COPY --from=builder /app /app
|
|
||||||
|
|
||||||
WORKDIR /app/packages/backend
|
|
@ -1,10 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Build cerc/webapp-deployer-backend
|
|
||||||
|
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/246128/1701505
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|
||||||
|
|
||||||
docker build -t cerc/snowballtools-base-backend-base:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile-base ${CERC_REPO_BASE_DIR}/snowballtools-base
|
|
||||||
docker build -t cerc/snowballtools-base-backend:local ${build_command_args} ${SCRIPT_DIR}
|
|
@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
|
|
||||||
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
|
||||||
if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
|
||||||
if [ -f "/config/laconic-hosted-config.yml" ]; then
|
|
||||||
LACONIC_HOSTED_CONFIG_FILE="/config/laconic-hosted-config.yml"
|
|
||||||
elif [ -f "/config/config.yml" ]; then
|
|
||||||
LACONIC_HOSTED_CONFIG_FILE="/config/config.yml"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
|
||||||
/scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE "`pwd`/dist"
|
|
||||||
fi
|
|
||||||
|
|
||||||
/scripts/apply-runtime-env.sh "`pwd`/dist"
|
|
||||||
|
|
||||||
yarn start
|
|
@ -2,4 +2,4 @@
|
|||||||
# Build cerc/test-container
|
# Build cerc/test-container
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
||||||
|
@ -6,5 +6,10 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
# Get the latest Git commit hash and set in package.json
|
||||||
|
RUN COMMIT_HASH=$(git rev-parse HEAD) && \
|
||||||
|
jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
|
||||||
|
mv tmp.json package.json
|
||||||
|
|
||||||
RUN echo "Installing dependencies and building ajna-watcher-ts" && \
|
RUN echo "Installing dependencies and building ajna-watcher-ts" && \
|
||||||
yarn && yarn build
|
yarn && yarn build
|
||||||
|
@ -1,11 +1,20 @@
|
|||||||
FROM node:18.16.0-alpine3.16
|
FROM node:18.16.0-alpine3.16
|
||||||
|
|
||||||
RUN apk --update --no-cache add git python3 alpine-sdk
|
RUN apk --update --no-cache add git python3 alpine-sdk jq
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
# Get the latest Git commit hash and set it in package.json of all watchers
|
||||||
|
RUN COMMIT_HASH=$(git rev-parse HEAD) && \
|
||||||
|
find . -name 'package.json' -exec sh -c ' \
|
||||||
|
for packageFile; do \
|
||||||
|
jq --arg commitHash "$0" ".commitHash = \$commitHash" "$packageFile" > "$packageFile.tmp" && \
|
||||||
|
mv "$packageFile.tmp" "$packageFile"; \
|
||||||
|
done \
|
||||||
|
' "$COMMIT_HASH" {} \;
|
||||||
|
|
||||||
RUN echo "Building azimuth-watcher-ts" && \
|
RUN echo "Building azimuth-watcher-ts" && \
|
||||||
yarn && yarn build
|
yarn && yarn build
|
||||||
|
|
||||||
|
@ -6,5 +6,10 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
# Get the latest Git commit hash and set in package.json
|
||||||
|
RUN COMMIT_HASH=$(git rev-parse HEAD) && \
|
||||||
|
jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
|
||||||
|
mv tmp.json package.json
|
||||||
|
|
||||||
RUN echo "Installing dependencies and building merkl-sushiswap-v3-watcher-ts" && \
|
RUN echo "Installing dependencies and building merkl-sushiswap-v3-watcher-ts" && \
|
||||||
yarn && yarn build
|
yarn && yarn build
|
||||||
|
@ -6,5 +6,10 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
# Get the latest Git commit hash and set in package.json
|
||||||
|
RUN COMMIT_HASH=$(git rev-parse HEAD) && \
|
||||||
|
jq --arg commitHash "$COMMIT_HASH" '.commitHash = $commitHash' package.json > tmp.json && \
|
||||||
|
mv tmp.json package.json
|
||||||
|
|
||||||
RUN echo "Installing dependencies and building sushiswap-v3-watcher-ts" && \
|
RUN echo "Installing dependencies and building sushiswap-v3-watcher-ts" && \
|
||||||
yarn && yarn build
|
yarn && yarn build
|
||||||
|
@ -34,7 +34,7 @@ RUN \
|
|||||||
|
|
||||||
# [Optional] Uncomment this section to install additional OS packages.
|
# [Optional] Uncomment this section to install additional OS packages.
|
||||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||||
&& apt-get -y install --no-install-recommends jq gettext-base
|
&& apt-get -y install --no-install-recommends jq gettext-base git
|
||||||
|
|
||||||
# [Optional] Uncomment if you want to install an additional version of node using nvm
|
# [Optional] Uncomment if you want to install an additional version of node using nvm
|
||||||
# ARG EXTRA_NODE_VERSION=10
|
# ARG EXTRA_NODE_VERSION=10
|
||||||
|
@ -8,21 +8,27 @@ CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
|||||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||||
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
||||||
|
|
||||||
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
||||||
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ] && [ -d "${CERC_WEBAPP_FILES_DIR}/static" ]; then
|
# If there is only one HTML file, assume an SPA.
|
||||||
|
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
||||||
CERC_SINGLE_PAGE_APP=true
|
CERC_SINGLE_PAGE_APP=true
|
||||||
else
|
else
|
||||||
CERC_SINGLE_PAGE_APP=false
|
CERC_SINGLE_PAGE_APP=false
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "true" == "$CERC_ENABLE_CORS" ]; then
|
# ${var,,} is a lower-case comparison
|
||||||
|
if [ "true" == "${CERC_ENABLE_CORS,,}" ]; then
|
||||||
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "true" == "$CERC_SINGLE_PAGE_APP" ]; then
|
# ${var,,} is a lower-case comparison
|
||||||
|
if [ "true" == "${CERC_SINGLE_PAGE_APP,,}" ]; then
|
||||||
|
echo "Serving content as single-page app. If this is wrong, set 'CERC_SINGLE_PAGE_APP=false'"
|
||||||
# Create a catchall redirect back to /
|
# Create a catchall redirect back to /
|
||||||
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --proxy http://localhost:${CERC_LISTEN_PORT}?"
|
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --proxy http://localhost:${CERC_LISTEN_PORT}?"
|
||||||
|
else
|
||||||
|
echo "Serving content normally. If this is a single-page app, set 'CERC_SINGLE_PAGE_APP=true'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
||||||
@ -39,4 +45,4 @@ if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
||||||
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT} "${CERC_WEBAPP_FILES_DIR}"
|
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT} "${CERC_WEBAPP_FILES_DIR}"
|
||||||
|
@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Start the deployment
|
### Start the deployment
|
||||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
|||||||
name: ajna
|
name: ajna
|
||||||
description: "Ajna watcher stack"
|
description: "Ajna watcher stack"
|
||||||
repos:
|
repos:
|
||||||
- git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.1
|
- git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.13
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-ajna
|
- cerc/watcher-ajna
|
||||||
pods:
|
pods:
|
||||||
|
@ -4,7 +4,7 @@ Instructions to setup and deploy Azimuth Watcher stack
|
|||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
Prerequisite: `ipld-eth-server` RPC and GQL endpoints
|
Prerequisite: External RPC endpoints
|
||||||
|
|
||||||
Clone required repositories:
|
Clone required repositories:
|
||||||
|
|
||||||
@ -44,34 +44,42 @@ network:
|
|||||||
- 0.0.0.0:9000:9000
|
- 0.0.0.0:9000:9000
|
||||||
azimuth-watcher-server:
|
azimuth-watcher-server:
|
||||||
- 0.0.0.0:3001:3001
|
- 0.0.0.0:3001:3001
|
||||||
|
- 0.0.0.0:9001:9001
|
||||||
censures-watcher-job-runner:
|
censures-watcher-job-runner:
|
||||||
- 0.0.0.0:9002:9002
|
- 0.0.0.0:9002:9002
|
||||||
censures-watcher-server:
|
censures-watcher-server:
|
||||||
- 0.0.0.0:3002:3002
|
- 0.0.0.0:3002:3002
|
||||||
|
- 0.0.0.0:9003:9003
|
||||||
claims-watcher-job-runner:
|
claims-watcher-job-runner:
|
||||||
- 0.0.0.0:9004:9004
|
- 0.0.0.0:9004:9004
|
||||||
claims-watcher-server:
|
claims-watcher-server:
|
||||||
- 0.0.0.0:3003:3003
|
- 0.0.0.0:3003:3003
|
||||||
|
- 0.0.0.0:9005:9005
|
||||||
conditional-star-release-watcher-job-runner:
|
conditional-star-release-watcher-job-runner:
|
||||||
- 0.0.0.0:9006:9006
|
- 0.0.0.0:9006:9006
|
||||||
conditional-star-release-watcher-server:
|
conditional-star-release-watcher-server:
|
||||||
- 0.0.0.0:3004:3004
|
- 0.0.0.0:3004:3004
|
||||||
|
- 0.0.0.0:9007:9007
|
||||||
delegated-sending-watcher-job-runner:
|
delegated-sending-watcher-job-runner:
|
||||||
- 0.0.0.0:9008:9008
|
- 0.0.0.0:9008:9008
|
||||||
delegated-sending-watcher-server:
|
delegated-sending-watcher-server:
|
||||||
- 0.0.0.0:3005:3005
|
- 0.0.0.0:3005:3005
|
||||||
|
- 0.0.0.0:9009:9009
|
||||||
ecliptic-watcher-job-runner:
|
ecliptic-watcher-job-runner:
|
||||||
- 0.0.0.0:9010:9010
|
- 0.0.0.0:9010:9010
|
||||||
ecliptic-watcher-server:
|
ecliptic-watcher-server:
|
||||||
- 0.0.0.0:3006:3006
|
- 0.0.0.0:3006:3006
|
||||||
|
- 0.0.0.0:9011:9011
|
||||||
linear-star-release-watcher-job-runner:
|
linear-star-release-watcher-job-runner:
|
||||||
- 0.0.0.0:9012:9012
|
- 0.0.0.0:9012:9012
|
||||||
linear-star-release-watcher-server:
|
linear-star-release-watcher-server:
|
||||||
- 0.0.0.0:3007:3007
|
- 0.0.0.0:3007:3007
|
||||||
|
- 0.0.0.0:9013:9013
|
||||||
polls-watcher-job-runner:
|
polls-watcher-job-runner:
|
||||||
- 0.0.0.0:9014:9014
|
- 0.0.0.0:9014:9014
|
||||||
polls-watcher-server:
|
polls-watcher-server:
|
||||||
- 0.0.0.0:3008:3008
|
- 0.0.0.0:3008:3008
|
||||||
|
- 0.0.0.0:9015:9015
|
||||||
gateway-server:
|
gateway-server:
|
||||||
- 0.0.0.0:4000:4000
|
- 0.0.0.0:4000:4000
|
||||||
...
|
...
|
||||||
@ -94,7 +102,7 @@ Inside the deployment directory, open the file `config.env` and add variable to
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External RPC endpoints
|
# External RPC endpoints
|
||||||
CERC_IPLD_ETH_RPC=
|
CERC_ETH_RPC_ENDPOINTS=https://example-rpc-endpoint-1,https://example-rpc-endpoint-2
|
||||||
```
|
```
|
||||||
|
|
||||||
* NOTE: If RPC endpoint is on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`)
|
* NOTE: If RPC endpoint is on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`)
|
||||||
@ -120,4 +128,7 @@ To stop all azimuth services and also delete data:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so deployment --dir azimuth-deployment stop --delete-volumes
|
laconic-so deployment --dir azimuth-deployment stop --delete-volumes
|
||||||
|
|
||||||
|
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||||
|
rm -r azimuth-deployment
|
||||||
```
|
```
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: "1.0"
|
version: "1.0"
|
||||||
name: azimuth
|
name: azimuth
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/azimuth-watcher-ts@v0.1.3
|
- github.com/cerc-io/azimuth-watcher-ts@0.1.6
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-azimuth
|
- cerc/watcher-azimuth
|
||||||
pods:
|
pods:
|
||||||
|
@ -43,16 +43,18 @@ customized by editing the "spec" file generated by `laconic-so deploy init`.
|
|||||||
```
|
```
|
||||||
$ cat graph-node-spec.yml
|
$ cat graph-node-spec.yml
|
||||||
stack: graph-node
|
stack: graph-node
|
||||||
ports:
|
network:
|
||||||
graph-node:
|
ports:
|
||||||
- '8000:8000'
|
graph-node:
|
||||||
- '8001'
|
- '8000:8000'
|
||||||
- '8020:8020'
|
- '8001'
|
||||||
- '8030'
|
- '8020:8020'
|
||||||
ipfs:
|
- '8030'
|
||||||
- '8080'
|
- '8040'
|
||||||
- '4001'
|
ipfs:
|
||||||
- '5001:5001'
|
- '8080'
|
||||||
|
- '4001'
|
||||||
|
- '5001:5001'
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -64,7 +66,7 @@ laconic-so --stack graph-node deploy create --spec-file graph-node-spec.yml --de
|
|||||||
|
|
||||||
## Start the stack
|
## Start the stack
|
||||||
|
|
||||||
Create an env file with the following values to be set before starting the stack:
|
Update `config.env` file inside deployment directory with the following values before starting the stack:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Set ETH RPC endpoint the graph node will use
|
# Set ETH RPC endpoint the graph node will use
|
||||||
@ -76,21 +78,35 @@ export ETH_RPC_PORT=
|
|||||||
# The etherum network(s) graph-node will connect to
|
# The etherum network(s) graph-node will connect to
|
||||||
# Set this to a space-separated list of the networks where each entry has the form NAME:URL
|
# Set this to a space-separated list of the networks where each entry has the form NAME:URL
|
||||||
export ETH_NETWORKS=
|
export ETH_NETWORKS=
|
||||||
|
|
||||||
|
# Optional:
|
||||||
|
|
||||||
|
# Timeout for ETH RPC requests in seconds (default: 180s)
|
||||||
|
export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=
|
||||||
|
|
||||||
|
# Number of times to retry ETH RPC requests (default: 10)
|
||||||
|
export GRAPH_ETHEREUM_REQUEST_RETRIES=
|
||||||
|
|
||||||
|
# Maximum number of blocks to scan for triggers in each request (default: 2000)
|
||||||
|
export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=
|
||||||
|
|
||||||
|
# Maximum number of concurrent requests made against Ethereum for requesting transaction receipts during block ingestion (default: 1000)
|
||||||
|
export GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS=
|
||||||
|
|
||||||
|
# Ref: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
||||||
```
|
```
|
||||||
|
|
||||||
Example env file:
|
Example `config.env` file:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export ETH_RPC_HOST=filecoin.chainup.net
|
export ETH_RPC_HOST=filecoin.chainup.net
|
||||||
export ETH_RPC_PORT=443
|
export ETH_RPC_PORT=443
|
||||||
|
|
||||||
export ETH_NETWORKS=filecoin:https://filecoin.chainup.net/rpc/v1
|
export ETH_NETWORKS=filecoin:https://filecoin.chainup.net/rpc/v1
|
||||||
```
|
|
||||||
|
|
||||||
Set the environment variables:
|
export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=360
|
||||||
|
export GRAPH_ETHEREUM_REQUEST_RETRIES=5
|
||||||
```bash
|
export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=50
|
||||||
source <PATH_TO_ENV_FILE>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploy the stack:
|
Deploy the stack:
|
||||||
|
@ -3,7 +3,9 @@ name: graph-node
|
|||||||
description: "Stack for running graph-node"
|
description: "Stack for running graph-node"
|
||||||
repos:
|
repos:
|
||||||
- github.com/graphprotocol/graph-node
|
- github.com/graphprotocol/graph-node
|
||||||
|
- github.com/cerc-io/watcher-ts@v0.2.92
|
||||||
containers:
|
containers:
|
||||||
- cerc/graph-node
|
- cerc/graph-node
|
||||||
|
- cerc/watcher-ts
|
||||||
pods:
|
pods:
|
||||||
- graph-node
|
- graph-node
|
||||||
|
@ -14,10 +14,11 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
||||||
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.opts import opts
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile, copytree
|
from shutil import copyfile, copytree
|
||||||
@ -61,7 +62,7 @@ def _get_node_moniker_from_config(network_dir: Path):
|
|||||||
return moniker
|
return moniker
|
||||||
|
|
||||||
|
|
||||||
def _get_node_key_from_gentx(options: CommandOptions, gentx_file_name: str):
|
def _get_node_key_from_gentx(gentx_file_name: str):
|
||||||
gentx_file_path = Path(gentx_file_name)
|
gentx_file_path = Path(gentx_file_name)
|
||||||
if gentx_file_path.exists():
|
if gentx_file_path.exists():
|
||||||
with open(Path(gentx_file_name), "rb") as f:
|
with open(Path(gentx_file_name), "rb") as f:
|
||||||
@ -76,24 +77,24 @@ def _comma_delimited_to_list(list_str: str):
|
|||||||
return list_str.split(",") if list_str else []
|
return list_str.split(",") if list_str else []
|
||||||
|
|
||||||
|
|
||||||
def _get_node_keys_from_gentx_files(options: CommandOptions, gentx_file_list: str):
|
def _get_node_keys_from_gentx_files(gentx_file_list: str):
|
||||||
node_keys = []
|
node_keys = []
|
||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
node_key = _get_node_key_from_gentx(options, gentx_file)
|
node_key = _get_node_key_from_gentx(gentx_file)
|
||||||
if node_key:
|
if node_key:
|
||||||
node_keys.append(node_key)
|
node_keys.append(node_key)
|
||||||
return node_keys
|
return node_keys
|
||||||
|
|
||||||
|
|
||||||
def _copy_gentx_files(options: CommandOptions, network_dir: Path, gentx_file_list: str):
|
def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
gentx_file_path = Path(gentx_file)
|
gentx_file_path = Path(gentx_file)
|
||||||
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
||||||
|
|
||||||
|
|
||||||
def _remove_persistent_peers(options: CommandOptions, network_dir: Path):
|
def _remove_persistent_peers(network_dir: Path):
|
||||||
config_file_path = _config_toml_path(network_dir)
|
config_file_path = _config_toml_path(network_dir)
|
||||||
if not config_file_path.exists():
|
if not config_file_path.exists():
|
||||||
print("Error: config.toml not found")
|
print("Error: config.toml not found")
|
||||||
@ -107,7 +108,7 @@ def _remove_persistent_peers(options: CommandOptions, network_dir: Path):
|
|||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
|
|
||||||
def _insert_persistent_peers(options: CommandOptions, config_dir: Path, new_persistent_peers: str):
|
def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
|
||||||
config_file_path = config_dir.joinpath("config.toml")
|
config_file_path = config_dir.joinpath("config.toml")
|
||||||
if not config_file_path.exists():
|
if not config_file_path.exists():
|
||||||
print("Error: config.toml not found")
|
print("Error: config.toml not found")
|
||||||
@ -150,7 +151,7 @@ def _phase_from_params(parameters):
|
|||||||
|
|
||||||
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
||||||
|
|
||||||
options = command_context.cluster_context.options
|
options = opts.o
|
||||||
|
|
||||||
currency = "stake" # Does this need to be a parameter?
|
currency = "stake" # Does this need to be a parameter?
|
||||||
|
|
||||||
@ -237,7 +238,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
print("Error: --gentx-files must be supplied")
|
print("Error: --gentx-files must be supplied")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(options, parameters.gentx_file_list)
|
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_file_list)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
@ -246,7 +247,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(options, network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
||||||
@ -255,7 +256,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
print(f"Generated genesis file, please copy to other nodes as required: \
|
print(f"Generated genesis file, please copy to other nodes as required: \
|
||||||
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
||||||
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
||||||
_remove_persistent_peers(options, network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
||||||
@ -266,7 +267,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def create(context: DeploymentContext, extra_args):
|
def create(deployment_context: DeploymentContext, extra_args):
|
||||||
network_dir = extra_args[0]
|
network_dir = extra_args[0]
|
||||||
if network_dir is None:
|
if network_dir is None:
|
||||||
print("Error: --network-dir must be supplied")
|
print("Error: --network-dir must be supplied")
|
||||||
@ -285,15 +286,15 @@ def create(context: DeploymentContext, extra_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Copy the network directory contents into our deployment
|
# Copy the network directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_config_dir = context.deployment_dir.joinpath("data", "laconicd-config")
|
deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
|
||||||
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
# If supplied, add the initial persistent peers to the config file
|
# If supplied, add the initial persistent peers to the config file
|
||||||
if extra_args[1]:
|
if extra_args[1]:
|
||||||
initial_persistent_peers = extra_args[1]
|
initial_persistent_peers = extra_args[1]
|
||||||
_insert_persistent_peers(context.command_context.cluster_context.options, deployment_config_dir, initial_persistent_peers)
|
_insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = context.deployment_dir.joinpath("data", "laconicd-data")
|
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
||||||
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Start the deployment
|
### Start the deployment
|
||||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
|||||||
name: merkl-sushiswap-v3
|
name: merkl-sushiswap-v3
|
||||||
description: "SushiSwap v3 watcher stack"
|
description: "SushiSwap v3 watcher stack"
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.7
|
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.14
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-merkl-sushiswap-v3
|
- cerc/watcher-merkl-sushiswap-v3
|
||||||
pods:
|
pods:
|
||||||
|
@ -134,6 +134,29 @@ Note: Use `host.docker.internal` as host to access ports on the host machine
|
|||||||
|
|
||||||
Place the dashboard json files in grafana dashboards config directory (`monitoring-deployment/config/monitoring/grafana/dashboards`) in the deployment folder
|
Place the dashboard json files in grafana dashboards config directory (`monitoring-deployment/config/monitoring/grafana/dashboards`) in the deployment folder
|
||||||
|
|
||||||
|
#### Graph Node Config
|
||||||
|
|
||||||
|
For graph-node dashboard postgres datasource needs to be setup in `monitoring-deployment/config/monitoring/grafana/provisioning/datasources/graph-node-postgres.yml` (in deployment folder)
|
||||||
|
|
||||||
|
```yml
|
||||||
|
# graph-node-postgres.yml
|
||||||
|
...
|
||||||
|
datasources:
|
||||||
|
- name: Graph Node Postgres
|
||||||
|
type: postgres
|
||||||
|
jsonData:
|
||||||
|
# Set name to remote graph-node database name
|
||||||
|
database: graph-node
|
||||||
|
...
|
||||||
|
# Set user to remote graph-node database username
|
||||||
|
user: graph-node
|
||||||
|
# Add URL for remote graph-node database
|
||||||
|
url: graph-node-db:5432
|
||||||
|
# Set password for graph-node database
|
||||||
|
secureJsonData:
|
||||||
|
password: 'password'
|
||||||
|
```
|
||||||
|
|
||||||
### Env
|
### Env
|
||||||
|
|
||||||
Set the following env variables in the deployment env config file (`monitoring-deployment/config.env`):
|
Set the following env variables in the deployment env config file (`monitoring-deployment/config.env`):
|
||||||
@ -156,6 +179,11 @@ Set the following env variables in the deployment env config file (`monitoring-d
|
|||||||
# Grafana server host URL (used in various links in alerts, etc.)
|
# Grafana server host URL (used in various links in alerts, etc.)
|
||||||
# (Optional, default: http://localhost:3000)
|
# (Optional, default: http://localhost:3000)
|
||||||
GF_SERVER_ROOT_URL=
|
GF_SERVER_ROOT_URL=
|
||||||
|
|
||||||
|
|
||||||
|
# RPC endpoint used by graph-node for upstream head metric
|
||||||
|
# (Optional, default: https://mainnet.infura.io/v3)
|
||||||
|
GRAPH_NODE_RPC_ENDPOINT=
|
||||||
```
|
```
|
||||||
|
|
||||||
## Start the stack
|
## Start the stack
|
||||||
|
@ -57,35 +57,35 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
|
|||||||
metrics_path: /metrics
|
metrics_path: /metrics
|
||||||
scheme: http
|
scheme: http
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_PORT']
|
- targets: ['AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_METRICS_PORT', 'AZIMUTH_WATCHER_HOST:AZIMUTH_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'azimuth'
|
instance: 'azimuth'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['CENSURES_WATCHER_HOST:CENSURES_WATCHER_PORT']
|
- targets: ['CENSURES_WATCHER_HOST:CENSURES_WATCHER_METRICS_PORT', 'CENSURES_WATCHER_HOST:CENSURES_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'censures'
|
instance: 'censures'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_PORT']
|
- targets: ['CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_METRICS_PORT', 'CLAIMS_WATCHER_HOST:CLAIMS_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'claims'
|
instance: 'claims'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_PORT']
|
- targets: ['CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_METRICS_PORT', 'CONDITIONAL_STAR_RELEASE_WATCHER_HOST:CONDITIONAL_STAR_RELEASE_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'conditional_star_release'
|
instance: 'conditional_star_release'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_PORT']
|
- targets: ['DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_METRICS_PORT', 'DELEGATED_SENDING_WATCHER_HOST:DELEGATED_SENDING_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'delegated_sending'
|
instance: 'delegated_sending'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_PORT']
|
- targets: ['ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_METRICS_PORT', 'ECLIPTIC_WATCHER_HOST:ECLIPTIC_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'ecliptic'
|
instance: 'ecliptic'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_PORT']
|
- targets: ['LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_METRICS_PORT', 'LINEAR_STAR_WATCHER_HOST:LINEAR_STAR_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'linear_star_release'
|
instance: 'linear_star_release'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
- targets: ['POLLS_WATCHER_HOST:POLLS_WATCHER_PORT']
|
- targets: ['POLLS_WATCHER_HOST:POLLS_WATCHER_METRICS_PORT', 'POLLS_WATCHER_HOST:POLLS_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'polls'
|
instance: 'polls'
|
||||||
chain: 'ethereum'
|
chain: 'ethereum'
|
||||||
@ -95,11 +95,11 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
|
|||||||
metrics_path: /metrics
|
metrics_path: /metrics
|
||||||
scheme: http
|
scheme: http
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_PORT']
|
- targets: ['SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_METRICS_PORT', 'SUSHISWAP_WATCHER_HOST:SUSHISWAP_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'sushiswap'
|
instance: 'sushiswap'
|
||||||
chain: 'filecoin'
|
chain: 'filecoin'
|
||||||
- targets: ['MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_PORT']
|
- targets: ['MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_METRICS_PORT', 'MERKLE_SUSHISWAP_WATCHER_HOST:MERKLE_SUSHISWAP_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'merkl_sushiswap'
|
instance: 'merkl_sushiswap'
|
||||||
chain: 'filecoin'
|
chain: 'filecoin'
|
||||||
@ -109,25 +109,35 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
|
|||||||
metrics_path: /metrics
|
metrics_path: /metrics
|
||||||
scheme: http
|
scheme: http
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['AJNA_WATCHER_HOST:AJNA_WATCHER_PORT']
|
- targets: ['AJNA_WATCHER_HOST:AJNA_WATCHER_METRICS_PORT', 'AJNA_WATCHER_HOST:AJNA_WATCHER_GQL_METRICS_PORT']
|
||||||
labels:
|
labels:
|
||||||
instance: 'ajna'
|
instance: 'ajna'
|
||||||
chain: 'filecoin'
|
chain: 'filecoin'
|
||||||
|
|
||||||
|
- job_name: graph-node
|
||||||
|
metrics_path: /metrics
|
||||||
|
scrape_interval: 30s
|
||||||
|
static_configs:
|
||||||
|
- targets: ['GRAPH_NODE_HOST:GRAPH_NODE_HOST_METRICS_PORT']
|
||||||
```
|
```
|
||||||
|
|
||||||
Add scrape config as done above for any additional watcher to add it to the Watchers dashboard.
|
Add scrape config as done above for any additional watcher to add it to the Watchers dashboard.
|
||||||
|
|
||||||
### Grafana alerts config
|
### Grafana alerts config
|
||||||
|
|
||||||
Place the pre-configured watcher alerts rules in Grafana provisioning directory:
|
Place the pre-configured alerts rules in Grafana provisioning directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# watcher alert rules
|
||||||
cp monitoring-watchers-deployment/config/monitoring/watcher-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
|
cp monitoring-watchers-deployment/config/monitoring/watcher-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
|
||||||
|
|
||||||
|
# subgraph alert rules
|
||||||
|
cp monitoring-watchers-deployment/config/monitoring/subgraph-alert-rules.yml monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/
|
||||||
```
|
```
|
||||||
|
|
||||||
Update the alerting contact points config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/contactpoints.yml`) with desired contact points
|
Update the alerting contact points config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/contactpoints.yml`) with desired contact points
|
||||||
|
|
||||||
Add corresponding routes to the notification policies config (`monitoring-watchers-deployment/monitoring/grafana/provisioning/alerting/policies.yaml`) with appropriate object-matchers:
|
Add corresponding routes to the notification policies config (`monitoring-watchers-deployment/config/monitoring/grafana/provisioning/alerting/policies.yml`) with appropriate object-matchers:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
...
|
...
|
||||||
@ -135,7 +145,7 @@ Add corresponding routes to the notification policies config (`monitoring-watche
|
|||||||
- receiver: SlackNotifier
|
- receiver: SlackNotifier
|
||||||
object_matchers:
|
object_matchers:
|
||||||
# Add matchers below
|
# Add matchers below
|
||||||
- ['grafana_folder', '=', 'WatcherAlerts']
|
- ['grafana_folder', '=~', 'WatcherAlerts|SubgraphAlerts']
|
||||||
```
|
```
|
||||||
|
|
||||||
### Env
|
### Env
|
||||||
@ -149,6 +159,9 @@ Set the following env variables in the deployment env config file (`monitoring-w
|
|||||||
# Grafana server host URL to be used
|
# Grafana server host URL to be used
|
||||||
# (Optional, default: http://localhost:3000)
|
# (Optional, default: http://localhost:3000)
|
||||||
GF_SERVER_ROOT_URL=
|
GF_SERVER_ROOT_URL=
|
||||||
|
|
||||||
|
# List of subgraph ids to configure alerts for (separated by |)
|
||||||
|
CERC_GRAFANA_ALERTS_SUBGRAPH_IDS=
|
||||||
```
|
```
|
||||||
|
|
||||||
## Start the stack
|
## Start the stack
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: "0.1"
|
version: "0.1"
|
||||||
name: monitoring
|
name: monitoring
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/watcher-ts@v0.2.81
|
- github.com/cerc-io/watcher-ts@v0.2.92
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-ts
|
- cerc/watcher-ts
|
||||||
pods:
|
pods:
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
version: "1.0"
|
|
||||||
name: snowballtools-base-backend
|
|
||||||
description: "snowballtools-base-backend"
|
|
||||||
repos:
|
|
||||||
- github.com/snowball-tools/snowballtools-base
|
|
||||||
containers:
|
|
||||||
- cerc/webapp-base
|
|
||||||
- cerc/snowballtools-base-backend
|
|
||||||
pods:
|
|
||||||
- snowballtools-base-backend
|
|
@ -53,7 +53,7 @@ Inside deployment directory, open the `config.env` file and set following env v
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
CERC_ETH_RPC_ENDPOINTS=https://example-lotus-endpoint-1/rpc/v1,https://example-lotus-endpoint-2/rpc/v1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Start the deployment
|
### Start the deployment
|
||||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
|||||||
name: sushiswap-v3
|
name: sushiswap-v3
|
||||||
description: "SushiSwap v3 watcher stack"
|
description: "SushiSwap v3 watcher stack"
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.7
|
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.14
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-sushiswap-v3
|
- cerc/watcher-sushiswap-v3
|
||||||
pods:
|
pods:
|
||||||
|
@ -2,10 +2,10 @@ version: "1.0"
|
|||||||
name: webapp-deployer-backend
|
name: webapp-deployer-backend
|
||||||
description: "Deployer for webapps"
|
description: "Deployer for webapps"
|
||||||
repos:
|
repos:
|
||||||
- git.vdb.to/telackey/webapp-deployment-status-api
|
- git.vdb.to/cerc-io/webapp-deployment-status-api
|
||||||
containers:
|
containers:
|
||||||
- cerc/webapp-deployer-backend
|
- cerc/webapp-deployer-backend
|
||||||
pods:
|
pods:
|
||||||
- name: webapp-deployer-backend
|
- name: webapp-deployer-backend
|
||||||
repository: git.vdb.to/telackey/webapp-deployment-status-api
|
repository: git.vdb.to/cerc-io/webapp-deployment-status-api
|
||||||
path: ./
|
path: ./
|
||||||
|
@ -26,7 +26,15 @@ import click
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
|
from stack_orchestrator.util import (
|
||||||
|
get_stack_path,
|
||||||
|
include_exclude_check,
|
||||||
|
get_parsed_stack_config,
|
||||||
|
global_options2,
|
||||||
|
get_dev_root_path,
|
||||||
|
stack_is_in_deployment,
|
||||||
|
resolve_compose_file,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||||
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
||||||
@ -59,6 +67,7 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
|||||||
if deploy_to is None:
|
if deploy_to is None:
|
||||||
deploy_to = "compose"
|
deploy_to = "compose"
|
||||||
|
|
||||||
|
stack = get_stack_path(stack)
|
||||||
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
|
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
|
||||||
# Subcommand is executed now, by the magic of click
|
# Subcommand is executed now, by the magic of click
|
||||||
|
|
||||||
@ -273,16 +282,12 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
|||||||
|
|
||||||
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
||||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
|
|
||||||
dev_root_path = get_dev_root_path(ctx)
|
dev_root_path = get_dev_root_path(ctx)
|
||||||
|
|
||||||
# TODO: huge hack, fix this
|
# TODO: hack, this should be encapsulated by the deployment context.
|
||||||
# If the caller passed a path for the stack file, then we know that we can get the compose files
|
deployment = stack_is_in_deployment(stack)
|
||||||
# from the same directory
|
if deployment:
|
||||||
deployment = False
|
compose_dir = stack.joinpath("compose")
|
||||||
if isinstance(stack, os.PathLike):
|
|
||||||
compose_dir = stack.parent.joinpath("compose")
|
|
||||||
deployment = True
|
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
||||||
@ -324,7 +329,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
pod_path = pod["path"]
|
pod_path = pod["path"]
|
||||||
if include_exclude_check(pod_name, include, exclude):
|
if include_exclude_check(pod_name, include, exclude):
|
||||||
if pod_repository is None or pod_repository == "internal":
|
if pod_repository is None or pod_repository == "internal":
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
if deployment:
|
||||||
|
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||||
|
else:
|
||||||
|
compose_file_name = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
||||||
@ -336,6 +344,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
||||||
else:
|
else:
|
||||||
|
# TODO: fix this code for external stack with scripts
|
||||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||||
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
import os
|
import os
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
|
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ def _container_image_from_service(stack: str, service: str):
|
|||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
pod_file_path = resolve_compose_file(stack, pod)
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "services" in parsed_pod_file:
|
if "services" in parsed_pod_file:
|
||||||
services = parsed_pod_file["services"]
|
services = parsed_pod_file["services"]
|
||||||
|
@ -50,15 +50,15 @@ def command(ctx, dir):
|
|||||||
|
|
||||||
def make_deploy_context(ctx) -> DeployCommandContext:
|
def make_deploy_context(ctx) -> DeployCommandContext:
|
||||||
context: DeploymentContext = ctx.obj
|
context: DeploymentContext = ctx.obj
|
||||||
stack_file_path = context.get_stack_file()
|
|
||||||
env_file = context.get_env_file()
|
env_file = context.get_env_file()
|
||||||
cluster_name = context.get_cluster_id()
|
cluster_name = context.get_cluster_id()
|
||||||
if constants.deploy_to_key in context.spec.obj:
|
if constants.deploy_to_key in context.spec.obj:
|
||||||
deployment_type = context.spec.obj[constants.deploy_to_key]
|
deployment_type = context.spec.obj[constants.deploy_to_key]
|
||||||
else:
|
else:
|
||||||
deployment_type = constants.compose_deploy_type
|
deployment_type = constants.compose_deploy_type
|
||||||
return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
|
stack = context.deployment_dir
|
||||||
deployment_type)
|
return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None,
|
||||||
|
cluster_name, env_file, deployment_type)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@ -123,6 +123,7 @@ def push_images(ctx):
|
|||||||
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
|
ctx.obj = make_deploy_context(ctx)
|
||||||
port_operation(ctx, extra_args)
|
port_operation(ctx, extra_args)
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,9 +24,10 @@ from secrets import token_hex
|
|||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
||||||
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
||||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file)
|
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
|
||||||
|
resolve_config_dir)
|
||||||
from stack_orchestrator.deploy.spec import Spec
|
from stack_orchestrator.deploy.spec import Spec
|
||||||
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
||||||
@ -43,7 +44,7 @@ def _get_ports(stack):
|
|||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "services" in parsed_pod_file:
|
if "services" in parsed_pod_file:
|
||||||
for svc_name, svc in parsed_pod_file["services"].items():
|
for svc_name, svc in parsed_pod_file["services"].items():
|
||||||
@ -79,7 +80,7 @@ def _get_named_volumes(stack):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "volumes" in parsed_pod_file:
|
if "volumes" in parsed_pod_file:
|
||||||
volumes = parsed_pod_file["volumes"]
|
volumes = parsed_pod_file["volumes"]
|
||||||
@ -237,6 +238,11 @@ def _find_extra_config_dirs(parsed_pod_file, pod):
|
|||||||
config_dir = host_path.split("/")[2]
|
config_dir = host_path.split("/")[2]
|
||||||
if config_dir != pod:
|
if config_dir != pod:
|
||||||
config_dirs.add(config_dir)
|
config_dirs.add(config_dir)
|
||||||
|
for env_file in service_info.get("env_file", []):
|
||||||
|
if env_file.startswith("../config"):
|
||||||
|
config_dir = env_file.split("/")[2]
|
||||||
|
if config_dir != pod:
|
||||||
|
config_dirs.add(config_dir)
|
||||||
return config_dirs
|
return config_dirs
|
||||||
|
|
||||||
|
|
||||||
@ -453,7 +459,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
_check_volume_definitions(parsed_spec)
|
_check_volume_definitions(parsed_spec)
|
||||||
stack_name = parsed_spec["stack"]
|
stack_name = parsed_spec["stack"]
|
||||||
deployment_type = parsed_spec[constants.deploy_to_key]
|
deployment_type = parsed_spec[constants.deploy_to_key]
|
||||||
stack_file = get_stack_file_path(stack_name)
|
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
|
||||||
parsed_stack = get_parsed_stack_config(stack_name)
|
parsed_stack = get_parsed_stack_config(stack_name)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"parsed spec: {parsed_spec}")
|
print(f"parsed spec: {parsed_spec}")
|
||||||
@ -466,7 +472,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
os.mkdir(deployment_dir_path)
|
os.mkdir(deployment_dir_path)
|
||||||
# Copy spec file and the stack file into the deployment dir
|
# Copy spec file and the stack file into the deployment dir
|
||||||
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
|
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
|
||||||
copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file)))
|
copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
|
||||||
_create_deployment_file(deployment_dir_path)
|
_create_deployment_file(deployment_dir_path)
|
||||||
# Copy any config varibles from the spec file into an env file suitable for compose
|
# Copy any config varibles from the spec file into an env file suitable for compose
|
||||||
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
|
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
|
||||||
@ -480,10 +486,9 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
os.mkdir(destination_compose_dir)
|
os.mkdir(destination_compose_dir)
|
||||||
destination_pods_dir = deployment_dir_path.joinpath("pods")
|
destination_pods_dir = deployment_dir_path.joinpath("pods")
|
||||||
os.mkdir(destination_pods_dir)
|
os.mkdir(destination_pods_dir)
|
||||||
data_dir = Path(__file__).absolute().parent.parent.joinpath("data")
|
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
||||||
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
||||||
@ -497,7 +502,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
config_dirs = {pod}
|
config_dirs = {pod}
|
||||||
config_dirs = config_dirs.union(extra_config_dirs)
|
config_dirs = config_dirs.union(extra_config_dirs)
|
||||||
for config_dir in config_dirs:
|
for config_dir in config_dirs:
|
||||||
source_config_dir = data_dir.joinpath("config", config_dir)
|
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
||||||
if os.path.exists(source_config_dir):
|
if os.path.exists(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
|
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
|
||||||
# If the same config dir appears in multiple pods, it may already have been copied
|
# If the same config dir appears in multiple pods, it may already have been copied
|
||||||
|
@ -29,16 +29,29 @@ def _image_needs_pushed(image: str):
|
|||||||
return image.endswith(":local")
|
return image.endswith(":local")
|
||||||
|
|
||||||
|
|
||||||
|
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
||||||
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
|
major_parts = image.split("/", 2)
|
||||||
|
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
|
if image_version == "local":
|
||||||
|
return f"{remote_repo_url}/{image_name}:deploy"
|
||||||
|
else:
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
|
# Note: do not add any calls this function
|
||||||
def remote_image_exists(remote_repo_url: str, local_tag: str):
|
def remote_image_exists(remote_repo_url: str, local_tag: str):
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
try:
|
try:
|
||||||
remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
|
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
result = docker.manifest.inspect(remote_tag)
|
result = docker.manifest.inspect(remote_tag)
|
||||||
return True if result else False
|
return True if result else False
|
||||||
except Exception: # noqa: E722
|
except Exception: # noqa: E722
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Note: do not add any calls this function
|
||||||
def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
||||||
if not additional_tags:
|
if not additional_tags:
|
||||||
return
|
return
|
||||||
@ -47,18 +60,20 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
|||||||
raise Exception(f"{local_tag} does not exist in {remote_repo_url}")
|
raise Exception(f"{local_tag} does not exist in {remote_repo_url}")
|
||||||
|
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
|
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
new_remote_tags = [remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
||||||
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
||||||
|
|
||||||
|
|
||||||
def remote_tag_for_image(image: str, remote_repo_url: str):
|
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
return f"{remote_repo_url}/{image_name}:deploy"
|
# Salt the tag with part of the deployment id to make it unique to this deployment
|
||||||
|
deployment_tag = deployment_id[-8:]
|
||||||
|
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
||||||
else:
|
else:
|
||||||
return image
|
return image
|
||||||
|
|
||||||
@ -73,14 +88,14 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
|
|||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image(image, remote_repo_url)
|
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Tagging {image} to {remote_tag}")
|
print(f"Tagging {image} to {remote_tag}")
|
||||||
docker.image.tag(image, remote_tag)
|
docker.image.tag(image, remote_tag)
|
||||||
# Run docker push commands to upload
|
# Run docker push commands to upload
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image(image, remote_repo_url)
|
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Pushing image {remote_tag}")
|
print(f"Pushing image {remote_tag}")
|
||||||
docker.image.push(remote_tag)
|
docker.image.push(remote_tag)
|
||||||
|
@ -26,7 +26,7 @@ from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variable
|
|||||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
||||||
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
||||||
from stack_orchestrator.deploy.images import remote_tag_for_image
|
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
||||||
|
|
||||||
DEFAULT_VOLUME_RESOURCES = Resources({
|
DEFAULT_VOLUME_RESOURCES = Resources({
|
||||||
"reservations": {"storage": "2Gi"}
|
"reservations": {"storage": "2Gi"}
|
||||||
@ -326,8 +326,11 @@ class ClusterInfo:
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Merged envs: {envs}")
|
print(f"Merged envs: {envs}")
|
||||||
# Re-write the image tag for remote deployment
|
# Re-write the image tag for remote deployment
|
||||||
image_to_use = remote_tag_for_image(
|
# Note self.app_name has the same value as deployment_id
|
||||||
image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image
|
image_to_use = remote_tag_for_image_unique(
|
||||||
|
image,
|
||||||
|
self.spec.get_image_registry(),
|
||||||
|
self.app_name) if self.spec.get_image_registry() is not None else image
|
||||||
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
|
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
|
||||||
container = client.V1Container(
|
container = client.V1Container(
|
||||||
name=container_name,
|
name=container_name,
|
||||||
|
@ -24,7 +24,7 @@ import uuid
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from stack_orchestrator.deploy.images import remote_image_exists, add_tags_to_image
|
from stack_orchestrator.deploy.images import remote_image_exists
|
||||||
from stack_orchestrator.deploy.webapp import deploy_webapp
|
from stack_orchestrator.deploy.webapp import deploy_webapp
|
||||||
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, TimedLogger,
|
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, TimedLogger,
|
||||||
build_container_image, push_container_image,
|
build_container_image, push_container_image,
|
||||||
@ -39,11 +39,12 @@ def process_app_deployment_request(
|
|||||||
app_deployment_request,
|
app_deployment_request,
|
||||||
deployment_record_namespace,
|
deployment_record_namespace,
|
||||||
dns_record_namespace,
|
dns_record_namespace,
|
||||||
dns_suffix,
|
default_dns_suffix,
|
||||||
deployment_parent_dir,
|
deployment_parent_dir,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
|
fqdn_policy,
|
||||||
logger
|
logger
|
||||||
):
|
):
|
||||||
logger.log("BEGIN - process_app_deployment_request")
|
logger.log("BEGIN - process_app_deployment_request")
|
||||||
@ -56,14 +57,15 @@ def process_app_deployment_request(
|
|||||||
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
|
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
|
||||||
logger.log(f"Determined requested name: {requested_name}")
|
logger.log(f"Determined requested name: {requested_name}")
|
||||||
|
|
||||||
# HACK
|
|
||||||
if "." in requested_name:
|
if "." in requested_name:
|
||||||
raise Exception("Only unqualified hostnames allowed at this time.")
|
if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
|
||||||
|
fqdn = requested_name
|
||||||
fqdn = f"{requested_name}.{dns_suffix}"
|
else:
|
||||||
|
raise Exception(f"{requested_name} is invalid: only unqualified hostnames are allowed.")
|
||||||
|
else:
|
||||||
|
fqdn = f"{requested_name}.{default_dns_suffix}"
|
||||||
|
|
||||||
# 3. check ownership of existing dnsrecord vs this request
|
# 3. check ownership of existing dnsrecord vs this request
|
||||||
# TODO: Support foreign DNS
|
|
||||||
dns_crn = f"{dns_record_namespace}/{fqdn}"
|
dns_crn = f"{dns_record_namespace}/{fqdn}"
|
||||||
dns_record = laconic.get_record(dns_crn)
|
dns_record = laconic.get_record(dns_crn)
|
||||||
if dns_record:
|
if dns_record:
|
||||||
@ -75,7 +77,9 @@ def process_app_deployment_request(
|
|||||||
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
||||||
else:
|
else:
|
||||||
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
||||||
(dns_record.id, app_deployment_request.id))
|
(dns_crn, app_deployment_request.id))
|
||||||
|
elif "preexisting" == fqdn_policy:
|
||||||
|
raise Exception(f"No pre-existing DnsRecord {dns_crn} could be found for request {app_deployment_request.id}.")
|
||||||
|
|
||||||
# 4. get build and runtime config from request
|
# 4. get build and runtime config from request
|
||||||
env_filename = None
|
env_filename = None
|
||||||
@ -95,44 +99,62 @@ def process_app_deployment_request(
|
|||||||
|
|
||||||
deployment_record = laconic.get_record(app_deployment_crn)
|
deployment_record = laconic.get_record(app_deployment_crn)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
||||||
|
# At present we use this to generate a unique but stable ID for the app's host container
|
||||||
|
# TODO: implement support to derive this transparently from the already-unique deployment id
|
||||||
|
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
|
||||||
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
||||||
# TODO: Is there any reason not to simplify the hash input to the app_deployment_crn?
|
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
|
||||||
deployment_container_tag = "laconic-webapp/%s:local" % hashlib.md5(deployment_dir.encode()).hexdigest()
|
|
||||||
app_image_shared_tag = f"laconic-webapp/{app.id}:local"
|
app_image_shared_tag = f"laconic-webapp/{app.id}:local"
|
||||||
# b. check for deployment directory (create if necessary)
|
# b. check for deployment directory (create if necessary)
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
if deployment_record:
|
if deployment_record:
|
||||||
raise Exception("Deployment record %s exists, but not deployment dir %s. Please remove name." %
|
raise Exception("Deployment record %s exists, but not deployment dir %s. Please remove name." %
|
||||||
(app_deployment_crn, deployment_dir))
|
(app_deployment_crn, deployment_dir))
|
||||||
print("deploy_webapp", deployment_dir)
|
logger.log(f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}")
|
||||||
deploy_webapp.create_deployment(ctx, deployment_dir, deployment_container_tag,
|
deploy_webapp.create_deployment(ctx, deployment_dir, deployment_container_tag,
|
||||||
f"https://{fqdn}", kube_config, image_registry, env_filename)
|
f"https://{fqdn}", kube_config, image_registry, env_filename)
|
||||||
elif env_filename:
|
elif env_filename:
|
||||||
shutil.copyfile(env_filename, deployment_config_file)
|
shutil.copyfile(env_filename, deployment_config_file)
|
||||||
|
|
||||||
needs_k8s_deploy = False
|
needs_k8s_deploy = False
|
||||||
|
if force_rebuild:
|
||||||
|
logger.log("--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app")
|
||||||
# 6. build container (if needed)
|
# 6. build container (if needed)
|
||||||
if not deployment_record or deployment_record.attributes.application != app.id:
|
# TODO: add a comment that explains what this code is doing (not clear to me)
|
||||||
|
if not deployment_record or deployment_record.attributes.application != app.id or force_rebuild:
|
||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
# check if the image already exists
|
# check if the image already exists
|
||||||
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
|
# Note: in the code below, calls to add_tags_to_image() won't work at present.
|
||||||
|
# This is because SO deployment code in general re-names the container image
|
||||||
|
# to be unique to the deployment. This is done transparently
|
||||||
|
# and so when we call add_tags_to_image() here and try to add tags to the remote image,
|
||||||
|
# we get the image name wrong. Accordingly I've disabled the relevant code for now.
|
||||||
|
# This is safe because we are running with --force-rebuild at present
|
||||||
if shared_tag_exists and not force_rebuild:
|
if shared_tag_exists and not force_rebuild:
|
||||||
# simply add our unique tag to the existing image and we are done
|
# simply add our unique tag to the existing image and we are done
|
||||||
logger.log(f"Using existing app image {app_image_shared_tag} for {deployment_container_tag}")
|
logger.log(
|
||||||
add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
||||||
|
"tagging it with: {deployment_container_tag} to use in this deployment"
|
||||||
|
)
|
||||||
|
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
extra_build_args = [] # TODO: pull from request
|
extra_build_args = [] # TODO: pull from request
|
||||||
logger.log(f"Building container image {deployment_container_tag}")
|
logger.log(f"Building container image: {deployment_container_tag}")
|
||||||
build_container_image(app, deployment_container_tag, extra_build_args, logger)
|
build_container_image(app, deployment_container_tag, extra_build_args, logger)
|
||||||
logger.log("Build complete")
|
logger.log("Build complete")
|
||||||
logger.log(f"Pushing container image {deployment_container_tag}")
|
logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||||
push_container_image(deployment_dir, logger)
|
push_container_image(deployment_dir, logger)
|
||||||
logger.log("Push complete")
|
logger.log("Push complete")
|
||||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||||
logger.log(f"Updating app image tag {app_image_shared_tag} from build of {deployment_container_tag}")
|
logger.log(
|
||||||
add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
||||||
|
)
|
||||||
|
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
|
else:
|
||||||
|
logger.log("Requested app is already deployed, skipping build and image push")
|
||||||
|
|
||||||
# 7. update config (if needed)
|
# 7. update config (if needed)
|
||||||
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
||||||
@ -191,6 +213,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
@click.option("--state-file", help="File to store state about previously seen requests.")
|
@click.option("--state-file", help="File to store state about previously seen requests.")
|
||||||
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
|
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
|
||||||
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
|
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
|
||||||
|
@click.option("--fqdn-policy", help="How to handle requests with an FQDN: prohibit, allow, preexisting", default="prohibit")
|
||||||
@click.option("--record-namespace-dns", help="eg, crn://laconic/dns")
|
@click.option("--record-namespace-dns", help="eg, crn://laconic/dns")
|
||||||
@click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments")
|
@click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments")
|
||||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
||||||
@ -201,7 +224,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
||||||
request_id, discover, state_file, only_update_state,
|
request_id, discover, state_file, only_update_state,
|
||||||
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
|
dns_suffix, fqdn_policy, record_namespace_dns, record_namespace_deployments, dry_run,
|
||||||
include_tags, exclude_tags, force_rebuild, log_dir):
|
include_tags, exclude_tags, force_rebuild, log_dir):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
@ -220,6 +243,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
|
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
|
if fqdn_policy not in ["prohibit", "allow", "preexisting"]:
|
||||||
|
print("--fqdn-policy must be one of 'prohibit', 'allow', or 'preexisting'", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
# Split CSV and clean up values.
|
# Split CSV and clean up values.
|
||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
@ -247,7 +274,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
requests_by_name = {}
|
requests_by_name = {}
|
||||||
skipped_by_name = {}
|
skipped_by_name = {}
|
||||||
for r in requests:
|
for r in requests:
|
||||||
# TODO: Do this _after_ filtering deployments and cancellations to minimize round trips.
|
if r.id in previous_requests and previous_requests[r.id].get("status", "") != "RETRY":
|
||||||
|
print(f"Skipping request {r.id}, we've already seen it.")
|
||||||
|
continue
|
||||||
|
|
||||||
app = laconic.get_record(r.attributes.application)
|
app = laconic.get_record(r.attributes.application)
|
||||||
if not app:
|
if not app:
|
||||||
print("Skipping request %s, cannot locate app." % r.id)
|
print("Skipping request %s, cannot locate app." % r.id)
|
||||||
@ -334,6 +364,7 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
|
fqdn_policy,
|
||||||
logger
|
logger
|
||||||
)
|
)
|
||||||
status = "DEPLOYED"
|
status = "DEPLOYED"
|
||||||
|
@ -242,6 +242,7 @@ def determine_base_container(clone_dir, app_type="webapp"):
|
|||||||
def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
|
# TODO: determine if this code could be calling into the Python git library like setup-repositories
|
||||||
try:
|
try:
|
||||||
record_id = app_record["id"]
|
record_id = app_record["id"]
|
||||||
ref = app_record.attributes.repository_ref
|
ref = app_record.attributes.repository_ref
|
||||||
@ -249,6 +250,16 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
|||||||
clone_dir = os.path.join(tmpdir, record_id)
|
clone_dir = os.path.join(tmpdir, record_id)
|
||||||
|
|
||||||
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
||||||
|
# Set github credentials if present running a command like:
|
||||||
|
# git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
|
||||||
|
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
||||||
|
if github_token:
|
||||||
|
logger.log("Github token detected, setting it in the git environment")
|
||||||
|
git_config_args = [
|
||||||
|
"git", "config", "--global", f"url.https://{github_token}:@github.com/.insteadOf", "https://github.com/"
|
||||||
|
]
|
||||||
|
result = subprocess.run(git_config_args, stdout=logger.file, stderr=logger.file)
|
||||||
|
result.check_returncode()
|
||||||
if ref:
|
if ref:
|
||||||
# TODO: Determing branch or hash, and use depth 1 if we can.
|
# TODO: Determing branch or hash, and use depth 1 if we can.
|
||||||
git_env = dict(os.environ.copy())
|
git_env = dict(os.environ.copy())
|
||||||
@ -265,6 +276,7 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
|||||||
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
|
# TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
|
||||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, stderr=logger.file)
|
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
@ -299,11 +311,12 @@ def push_container_image(deployment_dir, logger):
|
|||||||
|
|
||||||
def deploy_to_k8s(deploy_record, deployment_dir, logger):
|
def deploy_to_k8s(deploy_record, deployment_dir, logger):
|
||||||
if not deploy_record:
|
if not deploy_record:
|
||||||
command = "up"
|
command = "start"
|
||||||
else:
|
else:
|
||||||
command = "update"
|
command = "update"
|
||||||
|
|
||||||
logger.log("Deploying to k8s ...")
|
logger.log("Deploying to k8s ...")
|
||||||
|
logger.log(f"Running {command} command on deployment dir: {deployment_dir}")
|
||||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
||||||
stdout=logger.file, stderr=logger.file)
|
stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
@ -17,6 +17,7 @@ import click
|
|||||||
|
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.command_types import CommandOptions
|
||||||
from stack_orchestrator.repos import setup_repositories
|
from stack_orchestrator.repos import setup_repositories
|
||||||
|
from stack_orchestrator.repos import fetch_stack
|
||||||
from stack_orchestrator.build import build_containers, fetch_containers
|
from stack_orchestrator.build import build_containers, fetch_containers
|
||||||
from stack_orchestrator.build import build_npms
|
from stack_orchestrator.build import build_npms
|
||||||
from stack_orchestrator.build import build_webapp
|
from stack_orchestrator.build import build_webapp
|
||||||
@ -50,6 +51,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
|
|||||||
ctx.obj = command_options
|
ctx.obj = command_options
|
||||||
|
|
||||||
|
|
||||||
|
cli.add_command(fetch_stack.command, "fetch-stack")
|
||||||
cli.add_command(setup_repositories.command, "setup-repositories")
|
cli.add_command(setup_repositories.command, "setup-repositories")
|
||||||
cli.add_command(build_containers.command, "build-containers")
|
cli.add_command(build_containers.command, "build-containers")
|
||||||
cli.add_command(fetch_containers.command, "fetch-containers")
|
cli.add_command(fetch_containers.command, "fetch-containers")
|
||||||
|
45
stack_orchestrator/repos/fetch_stack.py
Normal file
45
stack_orchestrator/repos/fetch_stack.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Copyright © 2022, 2023 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# env vars:
|
||||||
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
|
|
||||||
|
import click
|
||||||
|
import os
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
from git import exc
|
||||||
|
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.repos.setup_repositories import process_repo
|
||||||
|
from stack_orchestrator.util import error_exit
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('stack-locator')
|
||||||
|
@click.option('--git-ssh', is_flag=True, default=False)
|
||||||
|
@click.option('--check-only', is_flag=True, default=False)
|
||||||
|
@click.option('--pull', is_flag=True, default=False)
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
||||||
|
'''optionally resolve then git clone a repository containing one or more stack definitions'''
|
||||||
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
try:
|
||||||
|
process_repo(pull, check_only, git_ssh, dev_root_path, None, stack_locator)
|
||||||
|
except exc.GitCommandError as error:
|
||||||
|
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
@ -20,13 +20,12 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from decouple import config
|
from decouple import config
|
||||||
import git
|
import git
|
||||||
|
from git.exc import GitCommandError
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
from pathlib import Path
|
from stack_orchestrator.opts import opts
|
||||||
import yaml
|
from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check, error_exit, warn_exit
|
||||||
from stack_orchestrator.constants import stack_file_name
|
|
||||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit
|
|
||||||
|
|
||||||
|
|
||||||
class GitProgress(git.RemoteProgress):
|
class GitProgress(git.RemoteProgress):
|
||||||
@ -80,15 +79,19 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
|
|||||||
except TypeError:
|
except TypeError:
|
||||||
# This means that the current ref is not a branch, so possibly a tag
|
# This means that the current ref is not a branch, so possibly a tag
|
||||||
# Let's try to get the tag
|
# Let's try to get the tag
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
|
try:
|
||||||
# Note that git is assymetric -- the tag you told it to check out may not be the one
|
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
|
||||||
# you get back here (if there are multiple tags associated with the same commit)
|
# Note that git is asymmetric -- the tag you told it to check out may not be the one
|
||||||
|
# you get back here (if there are multiple tags associated with the same commit)
|
||||||
|
except GitCommandError:
|
||||||
|
# If there is no matching branch or tag checked out, just use the current SHA
|
||||||
|
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
|
||||||
return current_repo_branch_or_tag, is_branch
|
return current_repo_branch_or_tag, is_branch
|
||||||
|
|
||||||
|
|
||||||
# TODO: fix the messy arg list here
|
# TODO: fix the messy arg list here
|
||||||
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
||||||
if verbose:
|
if opts.o.verbose:
|
||||||
print(f"Processing repo: {fully_qualified_repo}")
|
print(f"Processing repo: {fully_qualified_repo}")
|
||||||
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
||||||
git_ssh_prefix = f"git@{repo_host}:"
|
git_ssh_prefix = f"git@{repo_host}:"
|
||||||
@ -100,8 +103,8 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
|
|||||||
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
|
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
|
||||||
full_filesystem_repo_path
|
full_filesystem_repo_path
|
||||||
) if is_present else (None, None)
|
) if is_present else (None, None)
|
||||||
if not quiet:
|
if not opts.o.quiet:
|
||||||
present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
|
present_text = f"already exists active {'branch' if is_branch else 'ref'}: {current_repo_branch_or_tag}" if is_present \
|
||||||
else 'Needs to be fetched'
|
else 'Needs to be fetched'
|
||||||
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
||||||
# Quick check that it's actually a repo
|
# Quick check that it's actually a repo
|
||||||
@ -111,25 +114,25 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
if pull:
|
if pull:
|
||||||
if verbose:
|
if opts.o.verbose:
|
||||||
print(f"Running git pull for {full_filesystem_repo_path}")
|
print(f"Running git pull for {full_filesystem_repo_path}")
|
||||||
if not check_only:
|
if not check_only:
|
||||||
if is_branch:
|
if is_branch:
|
||||||
git_repo = git.Repo(full_filesystem_repo_path)
|
git_repo = git.Repo(full_filesystem_repo_path)
|
||||||
origin = git_repo.remotes.origin
|
origin = git_repo.remotes.origin
|
||||||
origin.pull(progress=None if quiet else GitProgress())
|
origin.pull(progress=None if opts.o.quiet else GitProgress())
|
||||||
else:
|
else:
|
||||||
print("skipping pull because this repo checked out a tag")
|
print("skipping pull because this repo is not on a branch")
|
||||||
else:
|
else:
|
||||||
print("(git pull skipped)")
|
print("(git pull skipped)")
|
||||||
if not is_present:
|
if not is_present:
|
||||||
# Clone
|
# Clone
|
||||||
if verbose:
|
if opts.o.verbose:
|
||||||
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
|
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
|
||||||
if not dry_run:
|
if not opts.o.dry_run:
|
||||||
git.Repo.clone_from(full_github_repo_path,
|
git.Repo.clone_from(full_github_repo_path,
|
||||||
full_filesystem_repo_path,
|
full_filesystem_repo_path,
|
||||||
progress=None if quiet else GitProgress())
|
progress=None if opts.o.quiet else GitProgress())
|
||||||
else:
|
else:
|
||||||
print("(git clone skipped)")
|
print("(git clone skipped)")
|
||||||
# Checkout the requested branch, if one was specified
|
# Checkout the requested branch, if one was specified
|
||||||
@ -150,13 +153,13 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
|
|||||||
current_repo_branch_or_tag and (
|
current_repo_branch_or_tag and (
|
||||||
current_repo_branch_or_tag != branch_to_checkout)
|
current_repo_branch_or_tag != branch_to_checkout)
|
||||||
):
|
):
|
||||||
if not quiet:
|
if not opts.o.quiet:
|
||||||
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
||||||
git_repo = git.Repo(full_filesystem_repo_path)
|
git_repo = git.Repo(full_filesystem_repo_path)
|
||||||
# git checkout works for both branches and tags
|
# git checkout works for both branches and tags
|
||||||
git_repo.git.checkout(branch_to_checkout)
|
git_repo.git.checkout(branch_to_checkout)
|
||||||
else:
|
else:
|
||||||
if verbose:
|
if opts.o.verbose:
|
||||||
print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
|
print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
|
||||||
|
|
||||||
|
|
||||||
@ -182,36 +185,18 @@ def parse_branches(branches_string):
|
|||||||
@click.option('--check-only', is_flag=True, default=False)
|
@click.option('--check-only', is_flag=True, default=False)
|
||||||
@click.option('--pull', is_flag=True, default=False)
|
@click.option('--pull', is_flag=True, default=False)
|
||||||
@click.option("--branches", help="override branches for repositories")
|
@click.option("--branches", help="override branches for repositories")
|
||||||
@click.option('--branches-file', help="checkout branches specified in this file")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches_file):
|
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
'''git clone the set of repositories required to build the complete system from source'''
|
'''git clone the set of repositories required to build the complete system from source'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = opts.o.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = opts.o.verbose
|
||||||
dry_run = ctx.obj.dry_run
|
stack = opts.o.stack
|
||||||
stack = ctx.obj.stack
|
|
||||||
|
|
||||||
branches_array = []
|
branches_array = []
|
||||||
|
|
||||||
# TODO: branches file needs to be re-worked in the context of stacks
|
|
||||||
if branches_file:
|
|
||||||
if branches:
|
|
||||||
print("Error: can't specify both --branches and --branches-file")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
if verbose:
|
|
||||||
print(f"loading branches from: {branches_file}")
|
|
||||||
with open(branches_file) as branches_file_open:
|
|
||||||
branches_array = branches_file_open.read().splitlines()
|
|
||||||
|
|
||||||
print(f"branches: {branches}")
|
|
||||||
if branches:
|
if branches:
|
||||||
if branches_file:
|
branches_array = parse_branches(branches)
|
||||||
print("Error: can't specify both --branches and --branches-file")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
branches_array = parse_branches(branches)
|
|
||||||
|
|
||||||
if branches_array and verbose:
|
if branches_array and verbose:
|
||||||
print(f"Branches are: {branches_array}")
|
print(f"Branches are: {branches_array}")
|
||||||
@ -239,20 +224,10 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
|
|||||||
|
|
||||||
repos_in_scope = []
|
repos_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
if stack_is_external(stack):
|
stack_config = get_parsed_stack_config(stack)
|
||||||
stack_file_path = Path(stack).joinpath(stack_file_name)
|
if "repos" not in stack_config or stack_config["repos"] is None:
|
||||||
else:
|
warn_exit(f"stack {stack} does not define any repositories")
|
||||||
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
repos_in_scope = stack_config["repos"]
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
|
||||||
stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, stack_file_name)
|
|
||||||
if not stack_file_path.exists():
|
|
||||||
error_exit(f"stack {stack} does not exist")
|
|
||||||
with stack_file_path:
|
|
||||||
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
|
||||||
if "repos" not in stack_config or stack_config["repos"] is None:
|
|
||||||
warn_exit(f"stack {stack} does not define any repositories")
|
|
||||||
else:
|
|
||||||
repos_in_scope = stack_config["repos"]
|
|
||||||
else:
|
else:
|
||||||
repos_in_scope = all_repos
|
repos_in_scope = all_repos
|
||||||
|
|
||||||
@ -271,7 +246,6 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
|
|||||||
|
|
||||||
for repo in repos:
|
for repo in repos:
|
||||||
try:
|
try:
|
||||||
process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
||||||
except git.exc.GitCommandError as error:
|
except git.exc.GitCommandError as error:
|
||||||
print(f"\n******* git command returned error exit status:\n{error}")
|
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
||||||
sys.exit(1)
|
|
||||||
|
@ -20,6 +20,7 @@ import ruamel.yaml
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from dotenv import dotenv_values
|
from dotenv import dotenv_values
|
||||||
from typing import Mapping, Set, List
|
from typing import Mapping, Set, List
|
||||||
|
from stack_orchestrator.constants import stack_file_name, deployment_file_name
|
||||||
|
|
||||||
|
|
||||||
def include_exclude_check(s, include, exclude):
|
def include_exclude_check(s, include, exclude):
|
||||||
@ -33,11 +34,14 @@ def include_exclude_check(s, include, exclude):
|
|||||||
return s not in exclude_list
|
return s not in exclude_list
|
||||||
|
|
||||||
|
|
||||||
def get_stack_file_path(stack):
|
def get_stack_path(stack):
|
||||||
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
if stack_is_external(stack):
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
stack_path = Path(stack)
|
||||||
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
|
else:
|
||||||
return stack_file_path
|
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
||||||
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
|
stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
|
||||||
|
return stack_path
|
||||||
|
|
||||||
|
|
||||||
def get_dev_root_path(ctx):
|
def get_dev_root_path(ctx):
|
||||||
@ -52,21 +56,14 @@ def get_dev_root_path(ctx):
|
|||||||
|
|
||||||
# Caller can pass either the name of a stack, or a path to a stack file
|
# Caller can pass either the name of a stack, or a path to a stack file
|
||||||
def get_parsed_stack_config(stack):
|
def get_parsed_stack_config(stack):
|
||||||
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
|
stack_file_path = get_stack_path(stack).joinpath(stack_file_name)
|
||||||
try:
|
if stack_file_path.exists():
|
||||||
with stack_file_path:
|
return get_yaml().load(open(stack_file_path, "r"))
|
||||||
stack_config = get_yaml().load(open(stack_file_path, "r"))
|
# We try here to generate a useful diagnostic error
|
||||||
return stack_config
|
# First check if the stack directory is present
|
||||||
except FileNotFoundError as error:
|
if stack_file_path.parent.exists():
|
||||||
# We try here to generate a useful diagnostic error
|
error_exit(f"stack.yml file is missing from stack: {stack}")
|
||||||
# First check if the stack directory is present
|
error_exit(f"stack {stack} does not exist")
|
||||||
stack_directory = stack_file_path.parent
|
|
||||||
if os.path.exists(stack_directory):
|
|
||||||
print(f"Error: stack.yml file is missing from stack: {stack}")
|
|
||||||
else:
|
|
||||||
print(f"Error: stack: {stack} does not exist")
|
|
||||||
print(f"Exiting, error: {error}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_pod_list(parsed_stack):
|
def get_pod_list(parsed_stack):
|
||||||
@ -87,17 +84,45 @@ def get_plugin_code_paths(stack) -> List[Path]:
|
|||||||
result: Set[Path] = set()
|
result: Set[Path] = set()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if type(pod) is str:
|
if type(pod) is str:
|
||||||
result.add(get_stack_file_path(stack).parent)
|
result.add(get_stack_path(stack))
|
||||||
else:
|
else:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
||||||
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
||||||
return list(result)
|
return list(result)
|
||||||
|
|
||||||
|
|
||||||
def get_pod_file_path(parsed_stack, pod_name: str):
|
# # Find a config directory, looking first in any external stack
|
||||||
|
# and if not found there, internally
|
||||||
|
def resolve_config_dir(stack, config_dir_name: str):
|
||||||
|
if stack_is_external(stack):
|
||||||
|
# First try looking in the external stack for the compose file
|
||||||
|
config_base = Path(stack).parent.parent.joinpath("config")
|
||||||
|
proposed_dir = config_base.joinpath(config_dir_name)
|
||||||
|
if proposed_dir.exists():
|
||||||
|
return proposed_dir
|
||||||
|
# If we don't find it fall through to the internal case
|
||||||
|
config_base = get_internal_config_dir()
|
||||||
|
return config_base.joinpath(config_dir_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Find a compose file, looking first in any external stack
|
||||||
|
# and if not found there, internally
|
||||||
|
def resolve_compose_file(stack, pod_name: str):
|
||||||
|
if stack_is_external(stack):
|
||||||
|
# First try looking in the external stack for the compose file
|
||||||
|
compose_base = Path(stack).parent.parent.joinpath("compose")
|
||||||
|
proposed_file = compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
||||||
|
if proposed_file.exists():
|
||||||
|
return proposed_file
|
||||||
|
# If we don't find it fall through to the internal case
|
||||||
|
compose_base = get_internal_compose_file_dir()
|
||||||
|
return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
||||||
|
|
||||||
|
|
||||||
|
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
if type(pods[0]) is str:
|
if type(pods[0]) is str:
|
||||||
result = os.path.join(get_compose_file_dir(), f"docker-compose-{pod_name}.yml")
|
result = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if pod["name"] == pod_name:
|
if pod["name"] == pod_name:
|
||||||
@ -131,7 +156,7 @@ def pod_has_scripts(parsed_stack, pod_name: str):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_compose_file_dir():
|
def get_internal_compose_file_dir():
|
||||||
# TODO: refactor to use common code with deploy command
|
# TODO: refactor to use common code with deploy command
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||||
@ -139,7 +164,7 @@ def get_compose_file_dir():
|
|||||||
return source_compose_dir
|
return source_compose_dir
|
||||||
|
|
||||||
|
|
||||||
def get_config_file_dir():
|
def get_internal_config_dir():
|
||||||
# TODO: refactor to use common code with deploy command
|
# TODO: refactor to use common code with deploy command
|
||||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||||
source_config_dir = data_dir.joinpath("config")
|
source_config_dir = data_dir.joinpath("config")
|
||||||
@ -171,6 +196,10 @@ def stack_is_external(stack: str):
|
|||||||
return Path(stack).exists() if stack is not None else False
|
return Path(stack).exists() if stack is not None else False
|
||||||
|
|
||||||
|
|
||||||
|
def stack_is_in_deployment(stack: Path):
|
||||||
|
return stack.joinpath(deployment_file_name).exists()
|
||||||
|
|
||||||
|
|
||||||
def get_yaml():
|
def get_yaml():
|
||||||
# See: https://stackoverflow.com/a/45701840/1701505
|
# See: https://stackoverflow.com/a/45701840/1701505
|
||||||
yaml = ruamel.yaml.YAML()
|
yaml = ruamel.yaml.YAML()
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
from importlib import resources, metadata
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@ -24,8 +24,11 @@ def command(ctx):
|
|||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "build_tag.txt") as version_file:
|
if resources.is_resource(data, "build_tag.txt"):
|
||||||
# TODO: code better version that skips comment lines
|
with resources.open_text(data, "build_tag.txt") as version_file:
|
||||||
version_string = version_file.read().splitlines()[1]
|
# TODO: code better version that skips comment lines
|
||||||
|
version_string = version_file.read().splitlines()[1]
|
||||||
|
else:
|
||||||
|
version_string = metadata.version("laconic-stack-orchestrator") + "-unknown"
|
||||||
|
|
||||||
print(f"Version: {version_string}")
|
print(version_string)
|
||||||
|
185
tests/external-stack/run-test.sh
Executable file
185
tests/external-stack/run-test.sh
Executable file
@ -0,0 +1,185 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
# Dump environment variables for debugging
|
||||||
|
echo "Environment variables:"
|
||||||
|
env
|
||||||
|
|
||||||
|
if [ "$1" == "from-path" ]; then
|
||||||
|
TEST_TARGET_SO="laconic-so"
|
||||||
|
else
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
fi
|
||||||
|
|
||||||
|
delete_cluster_exit () {
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test basic stack-orchestrator deploy
|
||||||
|
echo "Running stack-orchestrator external stack deploy test"
|
||||||
|
# Set a non-default repo dir
|
||||||
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
echo "Test version command"
|
||||||
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
echo "Version reported is: ${reported_version_string}"
|
||||||
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
mkdir -p $CERC_REPO_BASE_DIR
|
||||||
|
# Clone the external test stack
|
||||||
|
$TEST_TARGET_SO fetch-stack git.vdb.to/cerc-io/test-external-stack
|
||||||
|
stack_name="$CERC_REPO_BASE_DIR/test-external-stack/stack-orchestrator/stacks/test-external-stack"
|
||||||
|
TEST_TARGET_SO_STACK="$TEST_TARGET_SO --stack ${stack_name}"
|
||||||
|
# Test bringing the test container up and down
|
||||||
|
# with and without volume removal
|
||||||
|
$TEST_TARGET_SO_STACK setup-repositories
|
||||||
|
$TEST_TARGET_SO_STACK build-containers
|
||||||
|
# Test deploy command execution
|
||||||
|
$TEST_TARGET_SO_STACK deploy setup $CERC_REPO_BASE_DIR
|
||||||
|
# Check that we now have the expected output directory
|
||||||
|
container_output_dir=$CERC_REPO_BASE_DIR/container-output-dir
|
||||||
|
if [ ! -d "$container_output_dir" ]; then
|
||||||
|
echo "deploy setup test: output directory not present"
|
||||||
|
echo "deploy setup test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ ! -f "$container_output_dir/output-file" ]; then
|
||||||
|
echo "deploy setup test: output file not present"
|
||||||
|
echo "deploy setup test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
output_file_content=$(<$container_output_dir/output-file)
|
||||||
|
if [ ! "$output_file_content" == "output-data" ]; then
|
||||||
|
echo "deploy setup test: output file contents not correct"
|
||||||
|
echo "deploy setup test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Check that we now have the expected output file
|
||||||
|
$TEST_TARGET_SO_STACK deploy up
|
||||||
|
# Test deploy port command
|
||||||
|
deploy_port_output=$( $TEST_TARGET_SO_STACK deploy port test 80 )
|
||||||
|
if [[ "$deploy_port_output" =~ ^0.0.0.0:[1-9][0-9]* ]]; then
|
||||||
|
echo "Deploy port test: passed"
|
||||||
|
else
|
||||||
|
echo "Deploy port test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
$TEST_TARGET_SO_STACK deploy down
|
||||||
|
# The next time we bring the container up the volume will be old (from the previous run above)
|
||||||
|
$TEST_TARGET_SO_STACK deploy up
|
||||||
|
log_output_1=$( $TEST_TARGET_SO_STACK deploy logs )
|
||||||
|
if [[ "$log_output_1" == *"filesystem is old"* ]]; then
|
||||||
|
echo "Retain volumes test: passed"
|
||||||
|
else
|
||||||
|
echo "Retain volumes test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
$TEST_TARGET_SO_STACK deploy down --delete-volumes
|
||||||
|
# Now when we bring the container up the volume will be new again
|
||||||
|
$TEST_TARGET_SO_STACK deploy up
|
||||||
|
log_output_2=$( $TEST_TARGET_SO_STACK deploy logs )
|
||||||
|
if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then
|
||||||
|
echo "Delete volumes test: passed"
|
||||||
|
else
|
||||||
|
echo "Delete volumes test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
$TEST_TARGET_SO_STACK deploy down --delete-volumes
|
||||||
|
# Basic test of creating a deployment
|
||||||
|
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
||||||
|
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
||||||
|
$TEST_TARGET_SO_STACK deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED,CERC_TEST_PARAM_3=FAST
|
||||||
|
# Check the file now exists
|
||||||
|
if [ ! -f "$test_deployment_spec" ]; then
|
||||||
|
echo "deploy init test: spec file not present"
|
||||||
|
echo "deploy init test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy init test: passed"
|
||||||
|
$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||||
|
# Check the deployment dir exists
|
||||||
|
if [ ! -d "$test_deployment_dir" ]; then
|
||||||
|
echo "deploy create test: deployment directory not present"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy create test: passed"
|
||||||
|
# Check the file writted by the create command in the stack now exists
|
||||||
|
if [ ! -f "$test_deployment_dir/create-file" ]; then
|
||||||
|
echo "deploy create test: create output file not present"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# And has the right content
|
||||||
|
create_file_content=$(<$test_deployment_dir/create-file)
|
||||||
|
if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
||||||
|
echo "deploy create test: create output file contents not correct"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add a config file to be picked up by the ConfigMap before starting.
|
||||||
|
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
|
||||||
|
|
||||||
|
echo "deploy create output file test: passed"
|
||||||
|
# Try to start the deployment
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
|
# Check logs command works
|
||||||
|
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
|
||||||
|
echo "deployment logs test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment logs test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Check the config variable CERC_TEST_PARAM_1 was passed correctly
|
||||||
|
if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
|
||||||
|
echo "deployment config test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment config test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
|
||||||
|
if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
|
||||||
|
echo "deployment compose config test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment compose config test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Check the config variable CERC_TEST_PARAM_3 was passed correctly
|
||||||
|
if [[ "$log_output_3" == *"Test-param-3: FAST"* ]]; then
|
||||||
|
echo "deployment config test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment config test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check that the ConfigMap is mounted and contains the expected content.
|
||||||
|
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
||||||
|
echo "deployment ConfigMap test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment ConfigMap test: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop then start again and check the volume was preserved
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||||
|
# Sleep a bit just in case
|
||||||
|
# sleep for longer to check if that's why the subsequent create cluster fails
|
||||||
|
sleep 20
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
|
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_5" == *"filesystem is old"* ]]; then
|
||||||
|
echo "Retain volumes test: passed"
|
||||||
|
else
|
||||||
|
echo "Retain volumes test: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop and clean up
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
echo "Test passed"
|
@ -22,16 +22,16 @@ echo "$(date +"%Y-%m-%d %T"): Stack started"
|
|||||||
# Verify that the fixturenet is up and running
|
# Verify that the fixturenet is up and running
|
||||||
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps
|
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps
|
||||||
|
|
||||||
|
# Wait for the laconid endpoint to come up
|
||||||
|
echo "Waiting for the RPC endpoint to come up"
|
||||||
|
docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
|
||||||
|
|
||||||
# Get the fixturenet account address
|
# Get the fixturenet account address
|
||||||
laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}')
|
laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}')
|
||||||
|
|
||||||
# Copy over config
|
# Copy over config
|
||||||
docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/
|
docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/
|
||||||
|
|
||||||
# Wait for the laconid endpoint to come up
|
|
||||||
echo "Waiting for the RPC endpoint to come up"
|
|
||||||
docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
|
|
||||||
|
|
||||||
# Run the tests
|
# Run the tests
|
||||||
echo "Running the tests"
|
echo "Running the tests"
|
||||||
docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test'
|
docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test'
|
||||||
|
Loading…
Reference in New Issue
Block a user