forked from cerc-io/stack-orchestrator
Merge branch 'main' into blast-stack
This commit is contained in:
commit
108a5a3440
@ -43,3 +43,19 @@ jobs:
|
|||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -47,3 +47,19 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -45,4 +45,19 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth/run-test.sh
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -11,7 +11,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run an Laconicd fixturenet test"
|
name: "Run Laconicd fixturenet and Laconic CLI tests"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Update'
|
- name: 'Update'
|
||||||
@ -46,3 +46,21 @@ jobs:
|
|||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Run fixturenet-laconicd tests"
|
- name: "Run fixturenet-laconicd tests"
|
||||||
run: ./tests/fixturenet-laconicd/run-test.sh
|
run: ./tests/fixturenet-laconicd/run-test.sh
|
||||||
|
- name: "Run laconic CLI tests"
|
||||||
|
run: ./tests/fixturenet-laconicd/run-cli-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -19,3 +19,19 @@ jobs:
|
|||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name : "Run flake8"
|
- name : "Run flake8"
|
||||||
uses: py-actions/flake8@v2
|
uses: py-actions/flake8@v2
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -54,3 +54,19 @@ jobs:
|
|||||||
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
|
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
|
||||||
draft: ${{ endsWith('publish-test', github.ref ) }}
|
draft: ${{ endsWith('publish-test', github.ref ) }}
|
||||||
files: ./laconic-so
|
files: ./laconic-so
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -51,4 +51,19 @@ jobs:
|
|||||||
source /opt/bash-utils/cgroup-helper.sh
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
join_cgroup
|
join_cgroup
|
||||||
./tests/container-registry/run-test.sh
|
./tests/container-registry/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -49,4 +49,19 @@ jobs:
|
|||||||
source /opt/bash-utils/cgroup-helper.sh
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
join_cgroup
|
join_cgroup
|
||||||
./tests/database/run-test.sh
|
./tests/database/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -47,3 +47,19 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run deploy tests"
|
- name: "Run deploy tests"
|
||||||
run: ./tests/deploy/run-deploy-test.sh
|
run: ./tests/deploy/run-deploy-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -51,4 +51,19 @@ jobs:
|
|||||||
source /opt/bash-utils/cgroup-helper.sh
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
join_cgroup
|
join_cgroup
|
||||||
./tests/k8s-deploy/run-deploy-test.sh
|
./tests/k8s-deploy/run-deploy-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -49,3 +49,19 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run webapp tests"
|
- name: "Run webapp tests"
|
||||||
run: ./tests/webapp-test/run-webapp-test.sh
|
run: ./tests/webapp-test/run-webapp-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -47,5 +47,19 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run smoke tests"
|
- name: "Run smoke tests"
|
||||||
run: ./tests/smoke-test/run-smoke-test.sh
|
run: ./tests/smoke-test/run-smoke-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
Change this file to trigger running the fixturenet-laconicd-test CI job
|
Change this file to trigger running the fixturenet-laconicd-test CI job
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
|
Trigger
|
||||||
|
Trigger
|
||||||
|
Trigger
|
||||||
|
@ -3,6 +3,9 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: cerc/laconicd:local
|
image: cerc/laconicd:local
|
||||||
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||||
|
environment:
|
||||||
|
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED}
|
||||||
|
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY}
|
||||||
volumes:
|
volumes:
|
||||||
# The cosmos-sdk node's database directory:
|
# The cosmos-sdk node's database directory:
|
||||||
- laconicd-data:/root/.laconicd
|
- laconicd-data:/root/.laconicd
|
||||||
@ -25,6 +28,7 @@ services:
|
|||||||
image: cerc/laconic-registry-cli:local
|
image: cerc/laconic-registry-cli:local
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
||||||
|
- ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
laconicd-data:
|
laconicd-data:
|
||||||
|
@ -0,0 +1,76 @@
|
|||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
ajna-watcher-db:
|
||||||
|
restart: unless-stopped
|
||||||
|
image: postgres:14-alpine
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=vdbm
|
||||||
|
- POSTGRES_MULTIPLE_DATABASES=ajna-watcher,ajna-watcher-job-queue
|
||||||
|
- POSTGRES_EXTENSION=ajna-watcher-job-queue:pgcrypto
|
||||||
|
- POSTGRES_PASSWORD=password
|
||||||
|
volumes:
|
||||||
|
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
||||||
|
- ajna_watcher_db_data:/var/lib/postgresql/data
|
||||||
|
ports:
|
||||||
|
- "5432"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-v", "localhost", "5432"]
|
||||||
|
interval: 20s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 15
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
ajna-watcher-job-runner:
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
ajna-watcher-db:
|
||||||
|
condition: service_healthy
|
||||||
|
image: cerc/watcher-ajna:local
|
||||||
|
environment:
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
|
volumes:
|
||||||
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
|
- ../config/watcher-ajna/start-job-runner.sh:/app/start-job-runner.sh
|
||||||
|
ports:
|
||||||
|
- "9000"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
|
interval: 20s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 15
|
||||||
|
start_period: 5s
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
ajna-watcher-server:
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
ajna-watcher-db:
|
||||||
|
condition: service_healthy
|
||||||
|
ajna-watcher-job-runner:
|
||||||
|
condition: service_healthy
|
||||||
|
image: cerc/watcher-ajna:local
|
||||||
|
environment:
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
command: ["bash", "./start-server.sh"]
|
||||||
|
volumes:
|
||||||
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
|
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
||||||
|
ports:
|
||||||
|
- "3008"
|
||||||
|
- "9001"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
|
interval: 20s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 15
|
||||||
|
start_period: 5s
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ajna_watcher_db_data:
|
@ -50,22 +50,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -142,22 +126,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -234,22 +202,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -326,22 +278,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -418,22 +354,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -510,22 +430,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -602,22 +506,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -694,22 +582,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -788,22 +660,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
@ -880,22 +736,6 @@ groups:
|
|||||||
legendFormat: __auto
|
legendFormat: __auto
|
||||||
range: false
|
range: false
|
||||||
refId: latest_external
|
refId: latest_external
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
- refId: condition
|
||||||
relativeTimeRange:
|
relativeTimeRange:
|
||||||
from: 600
|
from: 600
|
||||||
|
20
stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh
Executable file
20
stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
set -u
|
||||||
|
|
||||||
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
|
# Read in the config template TOML file and modify it
|
||||||
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
|
# Write the modified content to a new file
|
||||||
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|
||||||
|
echo "Running job-runner..."
|
||||||
|
DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js
|
20
stack_orchestrator/data/config/watcher-ajna/start-server.sh
Executable file
20
stack_orchestrator/data/config/watcher-ajna/start-server.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
set -u
|
||||||
|
|
||||||
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
|
# Read in the config template TOML file and modify it
|
||||||
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
|
# Write the modified content to a new file
|
||||||
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|
||||||
|
echo "Running server..."
|
||||||
|
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
@ -0,0 +1,98 @@
|
|||||||
|
[server]
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 3008
|
||||||
|
kind = "active"
|
||||||
|
gqlPath = "/"
|
||||||
|
|
||||||
|
# Checkpointing state.
|
||||||
|
checkpointing = true
|
||||||
|
|
||||||
|
# Checkpoint interval in number of blocks.
|
||||||
|
checkpointInterval = 2000
|
||||||
|
|
||||||
|
# Enable state creation
|
||||||
|
# CAUTION: Disable only if state creation is not desired or can be filled subsequently
|
||||||
|
enableState = false
|
||||||
|
|
||||||
|
subgraphPath = "./subgraph-build"
|
||||||
|
|
||||||
|
# Interval to restart wasm instance periodically
|
||||||
|
wasmRestartBlocksInterval = 20
|
||||||
|
|
||||||
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
|
# Use -1 for skipping check on block range.
|
||||||
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
|
# GQL cache settings
|
||||||
|
[server.gqlCache]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
|
# maxCacheSize
|
||||||
|
|
||||||
|
# GQL cache-control max-age settings (in seconds)
|
||||||
|
maxAge = 15
|
||||||
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
|
[metrics]
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 9000
|
||||||
|
[metrics.gql]
|
||||||
|
port = 9001
|
||||||
|
|
||||||
|
[database]
|
||||||
|
type = "postgres"
|
||||||
|
host = "ajna-watcher-db"
|
||||||
|
port = 5432
|
||||||
|
database = "ajna-watcher"
|
||||||
|
username = "vdbm"
|
||||||
|
password = "password"
|
||||||
|
synchronize = true
|
||||||
|
logging = false
|
||||||
|
|
||||||
|
[upstream]
|
||||||
|
[upstream.ethServer]
|
||||||
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
||||||
|
|
||||||
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
|
rpcClient = true
|
||||||
|
|
||||||
|
# Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint
|
||||||
|
isFEVM = true
|
||||||
|
|
||||||
|
# Boolean flag to filter event logs by contracts
|
||||||
|
filterLogsByAddresses = true
|
||||||
|
# Boolean flag to filter event logs by topics
|
||||||
|
filterLogsByTopics = true
|
||||||
|
|
||||||
|
[upstream.cache]
|
||||||
|
name = "requests"
|
||||||
|
enabled = false
|
||||||
|
deleteOnStart = false
|
||||||
|
|
||||||
|
[jobQueue]
|
||||||
|
dbConnectionString = "postgres://vdbm:password@ajna-watcher-db/ajna-watcher-job-queue"
|
||||||
|
maxCompletionLagInSecs = 300
|
||||||
|
jobDelayInMilliSecs = 100
|
||||||
|
eventsInBatch = 50
|
||||||
|
subgraphEventsOrder = true
|
||||||
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
|
useBlockRanges = true
|
||||||
|
|
||||||
|
# Block range in which logs are fetched during historical blocks processing
|
||||||
|
historicalLogsBlockRange = 2000
|
||||||
|
|
||||||
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
|
historicalMaxFetchAhead = 10000
|
@ -11,8 +11,14 @@ CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/D
|
|||||||
CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local}
|
CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local}
|
||||||
|
|
||||||
docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
|
docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
|
||||||
|
rc=$?
|
||||||
|
|
||||||
if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then
|
if [ $rc -ne 0 ]; then
|
||||||
|
echo "BUILD FAILED" 1>&2
|
||||||
|
exit $rc
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|
||||||
#################################################################
|
#################################################################
|
||||||
|
@ -0,0 +1,10 @@
|
|||||||
|
FROM node:18.17.1-alpine3.18
|
||||||
|
|
||||||
|
RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN echo "Installing dependencies and building ajna-watcher-ts" && \
|
||||||
|
yarn && yarn build
|
9
stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh
Executable file
9
stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Build cerc/watcher-ajna
|
||||||
|
|
||||||
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
|
# See: https://stackoverflow.com/a/246128/1701505
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
|
docker build -t cerc/watcher-ajna:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/ajna-watcher-ts
|
@ -11,8 +11,14 @@ CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/D
|
|||||||
CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/webapp-base:local}
|
CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/webapp-base:local}
|
||||||
|
|
||||||
docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
|
docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
|
||||||
|
rc=$?
|
||||||
|
|
||||||
if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/webapp-base:local" ]; then
|
if [ $rc -ne 0 ]; then
|
||||||
|
echo "BUILD FAILED" 1>&2
|
||||||
|
exit $rc
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/webapp-base:local" ]; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|
||||||
#################################################################
|
#################################################################
|
||||||
|
118
stack_orchestrator/data/stacks/ajna/README.md
Normal file
118
stack_orchestrator/data/stacks/ajna/README.md
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# Ajna Watcher
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
Clone required repositories:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack ajna setup-repositories --git-ssh --pull
|
||||||
|
```
|
||||||
|
|
||||||
|
Build the container images:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack ajna build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy
|
||||||
|
|
||||||
|
Create a spec file for the deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack ajna deploy init --output ajna-spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Ports
|
||||||
|
|
||||||
|
Edit `network` in the spec file to map container ports to host ports as required:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
...
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
ajna-watcher-db:
|
||||||
|
- 15432:5432
|
||||||
|
ajna-watcher-job-runner:
|
||||||
|
- 9000:9000
|
||||||
|
ajna-watcher-server:
|
||||||
|
- 3008:3008
|
||||||
|
- 9001:9001
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a deployment
|
||||||
|
|
||||||
|
Create a deployment from the spec file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack ajna deploy create --spec-file ajna-spec.yml --deployment-dir ajna-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
|
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start the deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so deployment --dir ajna-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
* To list down and monitor the running containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# With status
|
||||||
|
docker ps -a
|
||||||
|
|
||||||
|
# Check logs for a container
|
||||||
|
docker logs -f <CONTAINER_ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
* Open the GQL playground at <http://localhost:3008/graphql>
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
# Example query
|
||||||
|
query {
|
||||||
|
_meta {
|
||||||
|
block {
|
||||||
|
hash
|
||||||
|
number
|
||||||
|
timestamp
|
||||||
|
}
|
||||||
|
deployment
|
||||||
|
hasIndexingErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
accounts {
|
||||||
|
id
|
||||||
|
txCount
|
||||||
|
tokensDelegated
|
||||||
|
rewardsClaimed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Clean up
|
||||||
|
|
||||||
|
Stop all the ajna services running in background:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Only stop the docker containers
|
||||||
|
laconic-so deployment --dir ajna-deployment stop
|
||||||
|
|
||||||
|
# Run 'start' to restart the deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
To stop all the ajna services and also delete data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the docker containers
|
||||||
|
laconic-so deployment --dir ajna-deployment stop --delete-volumes
|
||||||
|
|
||||||
|
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||||
|
rm -r ajna-deployment
|
||||||
|
```
|
9
stack_orchestrator/data/stacks/ajna/stack.yml
Normal file
9
stack_orchestrator/data/stacks/ajna/stack.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
version: "1.0"
|
||||||
|
name: ajna
|
||||||
|
description: "Ajna watcher stack"
|
||||||
|
repos:
|
||||||
|
- git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.1
|
||||||
|
containers:
|
||||||
|
- cerc/watcher-ajna
|
||||||
|
pods:
|
||||||
|
- watcher-ajna
|
@ -16,26 +16,55 @@ laconic-so --stack merkl-sushiswap-v3 build-containers
|
|||||||
|
|
||||||
## Deploy
|
## Deploy
|
||||||
|
|
||||||
### Configuration
|
Create a spec file for the deployment:
|
||||||
|
|
||||||
Create and update an env file to be used in the next step:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External Filecoin (ETH RPC) endpoint to point the watcher
|
laconic-so --stack merkl-sushiswap-v3 deploy init --output merkl-sushiswap-v3-spec.yml
|
||||||
CERC_ETH_RPC_ENDPOINT=
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Deploy the stack
|
### Ports
|
||||||
|
|
||||||
|
Edit `network` in the spec file to map container ports to host ports as required:
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
merkl-sushiswap-v3-watcher-db:
|
||||||
|
- '5432'
|
||||||
|
merkl-sushiswap-v3-watcher-job-runner:
|
||||||
|
- 9002:9000
|
||||||
|
merkl-sushiswap-v3-watcher-server:
|
||||||
|
- 127.0.0.1:3007:3008
|
||||||
|
- 9003:9001
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a deployment
|
||||||
|
|
||||||
|
Create a deployment from the spec file:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-file <PATH_TO_ENV_FILE> up
|
laconic-so --stack merkl-sushiswap-v3 deploy create --spec-file merkl-sushiswap-v3-spec.yml --deployment-dir merkl-sushiswap-v3-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
|
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start the deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so deployment --dir merkl-sushiswap-v3-deployment start
|
||||||
```
|
```
|
||||||
|
|
||||||
* To list down and monitor the running containers:
|
* To list down and monitor the running containers:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 ps
|
|
||||||
|
|
||||||
# With status
|
# With status
|
||||||
docker ps -a
|
docker ps -a
|
||||||
|
|
||||||
@ -46,6 +75,7 @@ laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-
|
|||||||
* Open the GQL playground at http://localhost:3007/graphql
|
* Open the GQL playground at http://localhost:3007/graphql
|
||||||
|
|
||||||
```graphql
|
```graphql
|
||||||
|
# Example query
|
||||||
{
|
{
|
||||||
_meta {
|
_meta {
|
||||||
block {
|
block {
|
||||||
@ -64,18 +94,21 @@ laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-
|
|||||||
|
|
||||||
## Clean up
|
## Clean up
|
||||||
|
|
||||||
Stop all the services running in background:
|
Stop all the merkl-sushiswap-v3 services running in background:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 down
|
# Only stop the docker containers
|
||||||
|
laconic-so deployment --dir merkl-sushiswap-v3-deployment stop
|
||||||
|
|
||||||
|
# Run 'start' to restart the deployment
|
||||||
```
|
```
|
||||||
|
|
||||||
Clear volumes created by this stack:
|
To stop all the merkl-sushiswap-v3 services and also delete data:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# List all relevant volumes
|
# Stop the docker containers
|
||||||
docker volume ls -q --filter "name=merkl_sushiswap_v3"
|
laconic-so deployment --dir merkl-sushiswap-v3-deployment stop --delete-volumes
|
||||||
|
|
||||||
# Remove all the listed volumes
|
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||||
docker volume rm $(docker volume ls -q --filter "name=merkl_sushiswap_v3")
|
rm -r merkl-sushiswap-v3-deployment
|
||||||
```
|
```
|
||||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
|||||||
name: merkl-sushiswap-v3
|
name: merkl-sushiswap-v3
|
||||||
description: "SushiSwap v3 watcher stack"
|
description: "SushiSwap v3 watcher stack"
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.6
|
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.7
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-merkl-sushiswap-v3
|
- cerc/watcher-merkl-sushiswap-v3
|
||||||
pods:
|
pods:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: "0.1"
|
version: "0.1"
|
||||||
name: monitoring
|
name: monitoring
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/watcher-ts@v0.2.79
|
- github.com/cerc-io/watcher-ts@v0.2.81
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-ts
|
- cerc/watcher-ts
|
||||||
pods:
|
pods:
|
||||||
|
@ -55,7 +55,7 @@ ports:
|
|||||||
Create deployment:
|
Create deployment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment
|
laconic-so --stack sushiswap-subgraph deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment
|
||||||
```
|
```
|
||||||
|
|
||||||
## Start the stack
|
## Start the stack
|
||||||
|
@ -16,26 +16,55 @@ laconic-so --stack sushiswap-v3 build-containers
|
|||||||
|
|
||||||
## Deploy
|
## Deploy
|
||||||
|
|
||||||
### Configuration
|
Create a spec file for the deployment:
|
||||||
|
|
||||||
Create and update an env file to be used in the next step:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# External Filecoin (ETH RPC) endpoint to point the watcher
|
laconic-so --stack sushiswap-v3 deploy init --output sushiswap-v3-spec.yml
|
||||||
CERC_ETH_RPC_ENDPOINT=
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Deploy the stack
|
### Ports
|
||||||
|
|
||||||
|
Edit `network` in the spec file to map container ports to host ports as required:
|
||||||
|
|
||||||
|
```
|
||||||
|
...
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
sushiswap-v3-watcher-db:
|
||||||
|
- '5432'
|
||||||
|
sushiswap-v3-watcher-job-runner:
|
||||||
|
- 9000:9000
|
||||||
|
sushiswap-v3-watcher-server:
|
||||||
|
- 127.0.0.1:3008:3008
|
||||||
|
- 9001:9001
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a deployment
|
||||||
|
|
||||||
|
Create a deployment from the spec file:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file <PATH_TO_ENV_FILE> up
|
laconic-so --stack sushiswap-v3 deploy create --spec-file sushiswap-v3-spec.yml --deployment-dir sushiswap-v3-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||||
|
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start the deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so deployment --dir sushiswap-v3-deployment start
|
||||||
```
|
```
|
||||||
|
|
||||||
* To list down and monitor the running containers:
|
* To list down and monitor the running containers:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 ps
|
|
||||||
|
|
||||||
# With status
|
# With status
|
||||||
docker ps -a
|
docker ps -a
|
||||||
|
|
||||||
@ -43,20 +72,43 @@ laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file <PATH_T
|
|||||||
docker logs -f <CONTAINER_ID>
|
docker logs -f <CONTAINER_ID>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Open the GQL playground at http://localhost:3008/graphql
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
# Example query
|
||||||
|
{
|
||||||
|
_meta {
|
||||||
|
block {
|
||||||
|
number
|
||||||
|
timestamp
|
||||||
|
}
|
||||||
|
hasIndexingErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
factories {
|
||||||
|
id
|
||||||
|
poolCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Clean up
|
## Clean up
|
||||||
|
|
||||||
Stop all the services running in background:
|
Stop all the sushiswap-v3 services running in background:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 down
|
# Only stop the docker containers
|
||||||
|
laconic-so deployment --dir sushiswap-v3-deployment stop
|
||||||
|
|
||||||
|
# Run 'start' to restart the deployment
|
||||||
```
|
```
|
||||||
|
|
||||||
Clear volumes created by this stack:
|
To stop all the sushiswap-v3 services and also delete data:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# List all relevant volumes
|
# Stop the docker containers
|
||||||
docker volume ls -q --filter "name=sushiswap_v3"
|
laconic-so deployment --dir sushiswap-v3-deployment stop --delete-volumes
|
||||||
|
|
||||||
# Remove all the listed volumes
|
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||||
docker volume rm $(docker volume ls -q --filter "name=sushiswap_v3")
|
rm -r sushiswap-v3-deployment
|
||||||
```
|
```
|
||||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
|||||||
name: sushiswap-v3
|
name: sushiswap-v3
|
||||||
description: "SushiSwap v3 watcher stack"
|
description: "SushiSwap v3 watcher stack"
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.6
|
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.7
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-sushiswap-v3
|
- cerc/watcher-sushiswap-v3
|
||||||
pods:
|
pods:
|
||||||
|
@ -101,7 +101,7 @@ class ClusterInfo:
|
|||||||
)
|
)
|
||||||
return service
|
return service
|
||||||
|
|
||||||
def get_ingress(self, use_tls=False):
|
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
||||||
# No ingress for a deployment that has no http-proxy defined, for now
|
# No ingress for a deployment that has no http-proxy defined, for now
|
||||||
http_proxy_info_list = self.spec.get_http_proxy()
|
http_proxy_info_list = self.spec.get_http_proxy()
|
||||||
ingress = None
|
ingress = None
|
||||||
@ -114,8 +114,8 @@ class ClusterInfo:
|
|||||||
host_name = http_proxy_info["host-name"]
|
host_name = http_proxy_info["host-name"]
|
||||||
rules = []
|
rules = []
|
||||||
tls = [client.V1IngressTLS(
|
tls = [client.V1IngressTLS(
|
||||||
hosts=[host_name],
|
hosts=certificate["spec"]["dnsNames"] if certificate else [host_name],
|
||||||
secret_name=f"{self.app_name}-tls"
|
secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls"
|
||||||
)] if use_tls else None
|
)] if use_tls else None
|
||||||
paths = []
|
paths = []
|
||||||
for route in http_proxy_info["routes"]:
|
for route in http_proxy_info["routes"]:
|
||||||
@ -147,13 +147,17 @@ class ClusterInfo:
|
|||||||
tls=tls,
|
tls=tls,
|
||||||
rules=rules
|
rules=rules
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ingress_annotations = {
|
||||||
|
"kubernetes.io/ingress.class": "nginx",
|
||||||
|
}
|
||||||
|
if not certificate:
|
||||||
|
ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
|
||||||
|
|
||||||
ingress = client.V1Ingress(
|
ingress = client.V1Ingress(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(
|
||||||
name=f"{self.app_name}-ingress",
|
name=f"{self.app_name}-ingress",
|
||||||
annotations={
|
annotations=ingress_annotations
|
||||||
"kubernetes.io/ingress.class": "nginx",
|
|
||||||
"cert-manager.io/cluster-issuer": "letsencrypt-prod"
|
|
||||||
}
|
|
||||||
),
|
),
|
||||||
spec=spec
|
spec=spec
|
||||||
)
|
)
|
||||||
|
@ -169,6 +169,39 @@ class K8sDeployer(Deployer):
|
|||||||
print("Service created:")
|
print("Service created:")
|
||||||
print(f"{service_resp}")
|
print(f"{service_resp}")
|
||||||
|
|
||||||
|
def _find_certificate_for_host_name(self, host_name):
|
||||||
|
all_certificates = self.custom_obj_api.list_namespaced_custom_object(
|
||||||
|
group="cert-manager.io",
|
||||||
|
version="v1",
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
plural="certificates"
|
||||||
|
)
|
||||||
|
|
||||||
|
host_parts = host_name.split(".", 1)
|
||||||
|
host_as_wild = None
|
||||||
|
if len(host_parts) == 2:
|
||||||
|
host_as_wild = f"*.{host_parts[1]}"
|
||||||
|
|
||||||
|
now = datetime.utcnow().replace(tzinfo=timezone.utc)
|
||||||
|
fmt = "%Y-%m-%dT%H:%M:%S%z"
|
||||||
|
|
||||||
|
# Walk over all the configured certificates.
|
||||||
|
for cert in all_certificates["items"]:
|
||||||
|
dns = cert["spec"]["dnsNames"]
|
||||||
|
# Check for an exact hostname match or a wildcard match.
|
||||||
|
if host_name in dns or host_as_wild in dns:
|
||||||
|
status = cert.get("status", {})
|
||||||
|
# Check the certificate date.
|
||||||
|
if "notAfter" in status and "notBefore" in status:
|
||||||
|
before = datetime.strptime(status["notBefore"], fmt)
|
||||||
|
after = datetime.strptime(status["notAfter"], fmt)
|
||||||
|
if before < now < after:
|
||||||
|
# Check the status is Ready
|
||||||
|
for condition in status.get("conditions", []):
|
||||||
|
if "True" == condition.get("status") and "Ready" == condition.get("type"):
|
||||||
|
return cert
|
||||||
|
return None
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, services):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
if self.is_kind():
|
if self.is_kind():
|
||||||
@ -189,8 +222,15 @@ class K8sDeployer(Deployer):
|
|||||||
self._create_volume_data()
|
self._create_volume_data()
|
||||||
self._create_deployment()
|
self._create_deployment()
|
||||||
|
|
||||||
|
http_proxy_info = self.cluster_info.spec.get_http_proxy()
|
||||||
# Note: at present we don't support tls for kind (and enabling tls causes errors)
|
# Note: at present we don't support tls for kind (and enabling tls causes errors)
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
use_tls = http_proxy_info and not self.is_kind()
|
||||||
|
certificate = self._find_certificate_for_host_name(http_proxy_info[0]["host-name"]) if use_tls else None
|
||||||
|
if opts.o.debug:
|
||||||
|
if certificate:
|
||||||
|
print(f"Using existing certificate: {certificate}")
|
||||||
|
|
||||||
|
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=use_tls, certificate=certificate)
|
||||||
if ingress:
|
if ingress:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ingress: {ingress}")
|
print(f"Sending this ingress: {ingress}")
|
||||||
@ -350,9 +390,11 @@ class K8sDeployer(Deployer):
|
|||||||
name=ingress.spec.tls[0].secret_name
|
name=ingress.spec.tls[0].secret_name
|
||||||
)
|
)
|
||||||
|
|
||||||
hostname = ingress.spec.tls[0].hosts[0]
|
hostname = ingress.spec.rules[0].host
|
||||||
ip = ingress.status.load_balancer.ingress[0].ip
|
ip = ingress.status.load_balancer.ingress[0].ip
|
||||||
tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"])
|
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
||||||
|
cert["status"]["notBefore"], cert["status"]["notAfter"], ingress.spec.tls[0].hosts
|
||||||
|
)
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -27,7 +27,9 @@ class ResourceLimits:
|
|||||||
memory: int = None
|
memory: int = None
|
||||||
storage: int = None
|
storage: int = None
|
||||||
|
|
||||||
def __init__(self, obj={}):
|
def __init__(self, obj=None):
|
||||||
|
if obj is None:
|
||||||
|
obj = {}
|
||||||
if "cpus" in obj:
|
if "cpus" in obj:
|
||||||
self.cpus = float(obj["cpus"])
|
self.cpus = float(obj["cpus"])
|
||||||
if "memory" in obj:
|
if "memory" in obj:
|
||||||
@ -50,7 +52,9 @@ class Resources:
|
|||||||
limits: ResourceLimits = None
|
limits: ResourceLimits = None
|
||||||
reservations: ResourceLimits = None
|
reservations: ResourceLimits = None
|
||||||
|
|
||||||
def __init__(self, obj={}):
|
def __init__(self, obj=None):
|
||||||
|
if obj is None:
|
||||||
|
obj = {}
|
||||||
if "reservations" in obj:
|
if "reservations" in obj:
|
||||||
self.reservations = ResourceLimits(obj["reservations"])
|
self.reservations = ResourceLimits(obj["reservations"])
|
||||||
if "limits" in obj:
|
if "limits" in obj:
|
||||||
@ -72,7 +76,9 @@ class Spec:
|
|||||||
obj: typing.Any
|
obj: typing.Any
|
||||||
file_path: Path
|
file_path: Path
|
||||||
|
|
||||||
def __init__(self, file_path: Path = None, obj={}) -> None:
|
def __init__(self, file_path: Path = None, obj=None) -> None:
|
||||||
|
if obj is None:
|
||||||
|
obj = {}
|
||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
self.obj = obj
|
self.obj = obj
|
||||||
|
|
||||||
@ -91,49 +97,41 @@ class Spec:
|
|||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
|
|
||||||
def get_image_registry(self):
|
def get_image_registry(self):
|
||||||
return (self.obj[constants.image_registry_key]
|
return self.obj.get(constants.image_registry_key)
|
||||||
if self.obj and constants.image_registry_key in self.obj
|
|
||||||
else None)
|
|
||||||
|
|
||||||
def get_volumes(self):
|
def get_volumes(self):
|
||||||
return (self.obj["volumes"]
|
return self.obj.get(constants.volumes_key, {})
|
||||||
if self.obj and "volumes" in self.obj
|
|
||||||
else {})
|
|
||||||
|
|
||||||
def get_configmaps(self):
|
def get_configmaps(self):
|
||||||
return (self.obj["configmaps"]
|
return self.obj.get(constants.configmaps_key, {})
|
||||||
if self.obj and "configmaps" in self.obj
|
|
||||||
else {})
|
|
||||||
|
|
||||||
def get_container_resources(self):
|
def get_container_resources(self):
|
||||||
return Resources(self.obj.get("resources", {}).get("containers", {}))
|
return Resources(self.obj.get(constants.resources_key, {}).get("containers", {}))
|
||||||
|
|
||||||
def get_volume_resources(self):
|
def get_volume_resources(self):
|
||||||
return Resources(self.obj.get("resources", {}).get("volumes", {}))
|
return Resources(self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {}))
|
||||||
|
|
||||||
def get_http_proxy(self):
|
def get_http_proxy(self):
|
||||||
return (self.obj[constants.network_key][constants.http_proxy_key]
|
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
||||||
if self.obj and constants.network_key in self.obj
|
|
||||||
and constants.http_proxy_key in self.obj[constants.network_key]
|
|
||||||
else None)
|
|
||||||
|
|
||||||
def get_annotations(self):
|
def get_annotations(self):
|
||||||
return self.obj.get("annotations", {})
|
return self.obj.get(constants.annotations_key, {})
|
||||||
|
|
||||||
def get_labels(self):
|
def get_labels(self):
|
||||||
return self.obj.get("labels", {})
|
return self.obj.get(constants.labels_key, {})
|
||||||
|
|
||||||
def get_privileged(self):
|
def get_privileged(self):
|
||||||
return "true" == str(self.obj.get("security", {}).get("privileged", "false")).lower()
|
return "true" == str(self.obj.get(constants.security_key, {}).get("privileged", "false")).lower()
|
||||||
|
|
||||||
def get_capabilities(self):
|
def get_capabilities(self):
|
||||||
return self.obj.get("security", {}).get("capabilities", [])
|
return self.obj.get(constants.security_key, {}).get("capabilities", [])
|
||||||
|
|
||||||
def get_deployment_type(self):
|
def get_deployment_type(self):
|
||||||
return self.obj[constants.deploy_to_key]
|
return self.obj.get(constants.deploy_to_key)
|
||||||
|
|
||||||
def is_kubernetes_deployment(self):
|
def is_kubernetes_deployment(self):
|
||||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type, constants.k8s_deploy_type]
|
return self.get_deployment_type() in [constants.k8s_kind_deploy_type,
|
||||||
|
constants.k8s_deploy_type]
|
||||||
|
|
||||||
def is_kind_deployment(self):
|
def is_kind_deployment(self):
|
||||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
|
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
|
||||||
|
43
tests/fixturenet-laconicd/run-cli-test.sh
Executable file
43
tests/fixturenet-laconicd/run-cli-test.sh
Executable file
@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Laconic registry CLI tests"
|
||||||
|
env
|
||||||
|
cat /etc/hosts
|
||||||
|
# Bit of a hack, test the most recent package
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
|
||||||
|
export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX)
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Cloning laconic-registry-cli repository into: $CERC_REPO_BASE_DIR"
|
||||||
|
$TEST_TARGET_SO --stack fixturenet-laconicd setup-repositories --include git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
|
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Starting stack"
|
||||||
|
TEST_AUCTION_ENABLED=true BASE_DIR=${CERC_REPO_BASE_DIR} $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Stack started"
|
||||||
|
|
||||||
|
# Verify that the fixturenet is up and running
|
||||||
|
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps
|
||||||
|
|
||||||
|
# Get the fixturenet account address
|
||||||
|
laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}')
|
||||||
|
|
||||||
|
# Copy over config
|
||||||
|
docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/
|
||||||
|
|
||||||
|
# Wait for the laconid endpoint to come up
|
||||||
|
echo "Waiting for the RPC endpoint to come up"
|
||||||
|
docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
|
||||||
|
|
||||||
|
# Run the tests
|
||||||
|
echo "Running the tests"
|
||||||
|
docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test'
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Test finished"
|
@ -12,6 +12,7 @@ cat /etc/hosts
|
|||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
# Set a new unique repo dir
|
# Set a new unique repo dir
|
||||||
export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX)
|
export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX)
|
||||||
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
|
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
|
||||||
echo "$(date +"%Y-%m-%d %T"): Test version command"
|
echo "$(date +"%Y-%m-%d %T"): Test version command"
|
||||||
reported_version_string=$( $TEST_TARGET_SO version )
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
@ -32,14 +32,14 @@ set +e
|
|||||||
|
|
||||||
CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local)
|
CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local)
|
||||||
sleep 3
|
sleep 3
|
||||||
wget -t 7 -O test.before -m http://localhost:3000
|
wget --tries 20 --retry-connrefused --waitretry=3 -O test.before -m http://localhost:3000
|
||||||
|
|
||||||
docker logs $CONTAINER_ID
|
docker logs $CONTAINER_ID
|
||||||
docker remove -f $CONTAINER_ID
|
docker remove -f $CONTAINER_ID
|
||||||
|
|
||||||
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local)
|
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local)
|
||||||
sleep 3
|
sleep 3
|
||||||
wget -t 7 -O test.after -m http://localhost:3000
|
wget --tries 20 --retry-connrefused --waitretry=3 -O test.after -m http://localhost:3000
|
||||||
|
|
||||||
docker logs $CONTAINER_ID
|
docker logs $CONTAINER_ID
|
||||||
docker remove -f $CONTAINER_ID
|
docker remove -f $CONTAINER_ID
|
||||||
|
Loading…
Reference in New Issue
Block a user