Compare commits
5 Commits
main
...
pm-update-
| Author | SHA1 | Date | |
|---|---|---|---|
| 68132c3305 | |||
| cfc411bfe4 | |||
| abf4d39a22 | |||
| 23d527720f | |||
| 1746f7366c |
@ -1,20 +1,23 @@
|
|||||||
name: External Stack Test
|
name: Fixturenet-Eth-Plugeth-Arm-Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/test-external-stack'
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-arm-test'
|
||||||
- '.gitea/workflows/test-external-stack.yml'
|
|
||||||
- 'tests/external-stack/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '8 19 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run external stack test suite"
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest-arm
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -33,13 +36,13 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Run external stack tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/external-stack/run-test.sh
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
@ -1,23 +1,23 @@
|
|||||||
name: K8s Deployment Control Test
|
name: Fixturenet-Eth-Plugeth-Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||||
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
|
||||||
- 'tests/k8s-deployment-control/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '3 30 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run deployment control suite on kind/k8s"
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -36,22 +36,17 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Check cgroups version"
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment control test"
|
|
||||||
run: |
|
run: |
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
join_cgroup
|
sleep 5
|
||||||
./tests/k8s-deployment-control/run-test.sh
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
63
.gitea/workflows/fixturenet-eth-test.yml
Normal file
63
.gitea/workflows/fixturenet-eth-test.yml
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
name: Fixturenet-Eth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -11,6 +10,9 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -34,11 +36,15 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run deploy tests"
|
- name: "Run deploy tests"
|
||||||
run: ./tests/deploy/run-deploy-test.sh
|
run: ./tests/deploy/run-deploy-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -36,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -11,6 +10,10 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run webapp test suite"
|
name: "Run webapp test suite"
|
||||||
@ -33,13 +36,17 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
||||||
run: apt update && apt install -y wget
|
run: apt update && apt install -y wget
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run webapp tests"
|
- name: "Run webapp tests"
|
||||||
run: ./tests/webapp-test/run-webapp-test.sh
|
run: ./tests/webapp-test/run-webapp-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
|||||||
@ -10,6 +10,9 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -33,11 +36,15 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run smoke tests"
|
- name: "Run smoke tests"
|
||||||
run: ./tests/smoke-test/run-smoke-test.sh
|
run: ./tests/smoke-test/run-smoke-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
|||||||
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-arm-test CI job
|
||||||
|
|
||||||
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||||
|
trigger
|
||||||
|
trigger
|
||||||
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
@ -4,7 +4,3 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
|
|||||||
@ -1,3 +1 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
Triggered: 2026-01-21
|
|
||||||
Triggered: 2026-01-21 19:28:29
|
|
||||||
|
|||||||
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the external-stack CI job
|
|
||||||
trigger
|
|
||||||
@ -1 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
@ -1,34 +0,0 @@
|
|||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v5.0.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
args: ['--allow-multiple-documents']
|
|
||||||
- id: check-json
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-added-large-files
|
|
||||||
|
|
||||||
- repo: https://github.com/psf/black
|
|
||||||
rev: 23.12.1
|
|
||||||
hooks:
|
|
||||||
- id: black
|
|
||||||
language_version: python3
|
|
||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
|
||||||
rev: 7.1.1
|
|
||||||
hooks:
|
|
||||||
- id: flake8
|
|
||||||
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
|
||||||
|
|
||||||
- repo: https://github.com/RobertCraigie/pyright-python
|
|
||||||
rev: v1.1.345
|
|
||||||
hooks:
|
|
||||||
- id: pyright
|
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint
|
|
||||||
rev: v1.35.1
|
|
||||||
hooks:
|
|
||||||
- id: yamllint
|
|
||||||
args: [-d, relaxed]
|
|
||||||
@ -1,151 +0,0 @@
|
|||||||
# Plan: Make Stack-Orchestrator AI-Friendly
|
|
||||||
|
|
||||||
## Goal
|
|
||||||
|
|
||||||
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 1: Documentation & Context Files
|
|
||||||
|
|
||||||
### 1.1 Add CLAUDE.md
|
|
||||||
|
|
||||||
Create a root-level context file for AI assistants.
|
|
||||||
|
|
||||||
**File:** `CLAUDE.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Project overview (what stack-orchestrator does)
|
|
||||||
- Stack creation workflow (step-by-step)
|
|
||||||
- File naming conventions
|
|
||||||
- Required vs optional fields in stack.yml
|
|
||||||
- Common patterns and anti-patterns
|
|
||||||
- Links to example stacks (simple, medium, complex)
|
|
||||||
|
|
||||||
### 1.2 Add JSON Schema for stack.yml
|
|
||||||
|
|
||||||
Create formal validation schema.
|
|
||||||
|
|
||||||
**File:** `schemas/stack-schema.json`
|
|
||||||
|
|
||||||
Benefits:
|
|
||||||
- AI tools can validate generated stacks
|
|
||||||
- IDEs provide autocomplete
|
|
||||||
- CI can catch errors early
|
|
||||||
|
|
||||||
### 1.3 Add Template Stack with Comments
|
|
||||||
|
|
||||||
Create an annotated template for reference.
|
|
||||||
|
|
||||||
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Stack definition template - copy this directory to create a new stack
|
|
||||||
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
|
||||||
name: my-stack # Required: lowercase, hyphens only
|
|
||||||
description: "Human-readable description" # Optional
|
|
||||||
repos: # Git repositories to clone
|
|
||||||
- github.com/org/repo
|
|
||||||
containers: # Container images to build (must have matching container-build/)
|
|
||||||
- cerc/my-container
|
|
||||||
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
|
||||||
- my-pod
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.4 Document Validation Rules
|
|
||||||
|
|
||||||
Create explicit documentation of constraints currently scattered in code.
|
|
||||||
|
|
||||||
**File:** `docs/stack-format.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Container names must start with `cerc/`
|
|
||||||
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
|
||||||
- Repository format: `host/org/repo[@ref]`
|
|
||||||
- Stack directory name should match `name` field
|
|
||||||
- Version field options and differences
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 2: Add `create-stack` Command
|
|
||||||
|
|
||||||
### 2.1 Command Overview
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Behavior:**
|
|
||||||
1. Parse repo URL to extract app name (if --name not provided)
|
|
||||||
2. Create `stacks/{name}/stack.yml`
|
|
||||||
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
|
||||||
4. Create `compose/docker-compose-{name}.yml`
|
|
||||||
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
|
||||||
|
|
||||||
### 2.2 Files to Create
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | Package init |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
|
||||||
|
|
||||||
### 2.3 Files to Modify
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
### 2.4 Command Options
|
|
||||||
|
|
||||||
| Option | Required | Description |
|
|
||||||
|--------|----------|-------------|
|
|
||||||
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
|
||||||
| `--name` | No | Stack name (defaults to repo name) |
|
|
||||||
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
|
||||||
| `--force` | No | Overwrite existing files |
|
|
||||||
|
|
||||||
### 2.5 Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| service | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| empty | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 3: Implementation Summary
|
|
||||||
|
|
||||||
### New Files (6)
|
|
||||||
|
|
||||||
1. `CLAUDE.md` - AI assistant context
|
|
||||||
2. `schemas/stack-schema.json` - Validation schema
|
|
||||||
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
|
||||||
4. `docs/stack-format.md` - Stack format documentation
|
|
||||||
5. `stack_orchestrator/create/__init__.py` - Package init
|
|
||||||
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
|
||||||
|
|
||||||
### Modified Files (1)
|
|
||||||
|
|
||||||
1. `stack_orchestrator/main.py` - Register create-stack command
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# 3. Creates all expected files
|
|
||||||
laconic-so create-stack --repo github.com/org/test-app
|
|
||||||
ls stack_orchestrator/data/stacks/test-app/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-test-app/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
|
||||||
|
|
||||||
# 4. Build works with generated stack
|
|
||||||
laconic-so --stack test-app build-containers
|
|
||||||
```
|
|
||||||
50
CLAUDE.md
50
CLAUDE.md
@ -1,50 +0,0 @@
|
|||||||
# CLAUDE.md
|
|
||||||
|
|
||||||
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
|
||||||
|
|
||||||
## Some rules to follow
|
|
||||||
NEVER speculate about the cause of something
|
|
||||||
NEVER assume your hypotheses are true without evidence
|
|
||||||
|
|
||||||
ALWAYS clearly state when something is a hypothesis
|
|
||||||
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
|
||||||
|
|
||||||
## Key Principles
|
|
||||||
|
|
||||||
### Development Guidelines
|
|
||||||
- **Single responsibility** - Each component has one clear purpose
|
|
||||||
- **Fail fast** - Let errors propagate, don't hide failures
|
|
||||||
- **DRY/KISS** - Minimize duplication and complexity
|
|
||||||
|
|
||||||
## Development Philosophy: Conversational Literate Programming
|
|
||||||
|
|
||||||
### Approach
|
|
||||||
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
|
||||||
|
|
||||||
### Core Principles
|
|
||||||
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
|
||||||
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
|
||||||
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
|
||||||
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
|
||||||
|
|
||||||
### Working Method
|
|
||||||
1. **Explore and Understand**: Read existing code to understand current state
|
|
||||||
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
|
||||||
3. **Document Intent**: Update TODO.md with clear justification before coding
|
|
||||||
4. **Explain Changes**: Each modification includes reasoning and context
|
|
||||||
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
|
||||||
|
|
||||||
### Implementation Guidelines
|
|
||||||
- Treat conversations as primary documentation
|
|
||||||
- Explain architectural decisions before implementing
|
|
||||||
- Use TODO.md as the "literate document" that justifies all work
|
|
||||||
- Maintain clear narrative threads across sessions
|
|
||||||
- Workshop complex ideas before coding
|
|
||||||
|
|
||||||
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
|
||||||
|
|
||||||
## Insights and Observations
|
|
||||||
|
|
||||||
### Design Principles
|
|
||||||
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
|
||||||
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
|
||||||
@ -78,3 +78,5 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,413 +0,0 @@
|
|||||||
# Implementing `laconic-so create-stack` Command
|
|
||||||
|
|
||||||
A plan for adding a new CLI command to scaffold stack files automatically.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Add a `create-stack` command that generates all required files for a new stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --name my-stack --type webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output:**
|
|
||||||
```
|
|
||||||
stack_orchestrator/data/
|
|
||||||
├── stacks/my-stack/stack.yml
|
|
||||||
├── container-build/cerc-my-stack/
|
|
||||||
│ ├── Dockerfile
|
|
||||||
│ └── build.sh
|
|
||||||
└── compose/docker-compose-my-stack.yml
|
|
||||||
|
|
||||||
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CLI Architecture Summary
|
|
||||||
|
|
||||||
### Command Registration Pattern
|
|
||||||
|
|
||||||
Commands are Click functions registered in `main.py`:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# main.py (line ~70)
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Global Options Access
|
|
||||||
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print("message")
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print("(would create files)")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Key Utilities
|
|
||||||
|
|
||||||
| Function | Location | Purpose |
|
|
||||||
|----------|----------|---------|
|
|
||||||
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
|
||||||
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
|
||||||
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files to Create
|
|
||||||
|
|
||||||
### 1. Command Module
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/__init__.py`**
|
|
||||||
```python
|
|
||||||
# Empty file to make this a package
|
|
||||||
```
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/create_stack.py`**
|
|
||||||
```python
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import copy
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import error_exit, get_yaml
|
|
||||||
|
|
||||||
# Template types
|
|
||||||
STACK_TEMPLATES = {
|
|
||||||
"webapp": {
|
|
||||||
"description": "Web application with Node.js",
|
|
||||||
"base_image": "node:20-bullseye-slim",
|
|
||||||
"port": 3000,
|
|
||||||
},
|
|
||||||
"service": {
|
|
||||||
"description": "Backend service",
|
|
||||||
"base_image": "python:3.11-slim",
|
|
||||||
"port": 8080,
|
|
||||||
},
|
|
||||||
"empty": {
|
|
||||||
"description": "Minimal stack with no defaults",
|
|
||||||
"base_image": None,
|
|
||||||
"port": None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_data_dir() -> Path:
|
|
||||||
"""Get path to stack_orchestrator/data directory"""
|
|
||||||
return Path(__file__).absolute().parent.parent.joinpath("data")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_stack_name(name: str) -> None:
|
|
||||||
"""Validate stack name follows conventions"""
|
|
||||||
import re
|
|
||||||
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
|
||||||
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
|
||||||
if name.startswith("cerc-"):
|
|
||||||
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
|
||||||
|
|
||||||
|
|
||||||
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
|
||||||
"""Create stack.yml file"""
|
|
||||||
config = {
|
|
||||||
"version": "1.2",
|
|
||||||
"name": name,
|
|
||||||
"description": template.get("description", f"Stack: {name}"),
|
|
||||||
"repos": [repo_url] if repo_url else [],
|
|
||||||
"containers": [f"cerc/{name}"],
|
|
||||||
"pods": [name],
|
|
||||||
}
|
|
||||||
|
|
||||||
stack_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(stack_dir / "stack.yml", "w") as f:
|
|
||||||
get_yaml().dump(config, f)
|
|
||||||
|
|
||||||
|
|
||||||
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create Dockerfile"""
|
|
||||||
base_image = template.get("base_image", "node:20-bullseye-slim")
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
dockerfile_content = f'''# Build stage
|
|
||||||
FROM {base_image} AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci
|
|
||||||
COPY . .
|
|
||||||
RUN npm run build
|
|
||||||
|
|
||||||
# Production stage
|
|
||||||
FROM {base_image}
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci --only=production
|
|
||||||
COPY --from=builder /app/dist ./dist
|
|
||||||
|
|
||||||
EXPOSE {port}
|
|
||||||
CMD ["npm", "run", "start"]
|
|
||||||
'''
|
|
||||||
|
|
||||||
container_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(container_dir / "Dockerfile", "w") as f:
|
|
||||||
f.write(dockerfile_content)
|
|
||||||
|
|
||||||
|
|
||||||
def create_build_script(container_dir: Path, name: str) -> None:
|
|
||||||
"""Create build.sh script"""
|
|
||||||
build_script = f'''#!/usr/bin/env bash
|
|
||||||
# Build cerc/{name}
|
|
||||||
|
|
||||||
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
|
||||||
|
|
||||||
docker build -t cerc/{name}:local \\
|
|
||||||
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
|
||||||
${{build_command_args}} \\
|
|
||||||
${{CERC_REPO_BASE_DIR}}/{name}
|
|
||||||
'''
|
|
||||||
|
|
||||||
build_path = container_dir / "build.sh"
|
|
||||||
with open(build_path, "w") as f:
|
|
||||||
f.write(build_script)
|
|
||||||
|
|
||||||
# Make executable
|
|
||||||
os.chmod(build_path, 0o755)
|
|
||||||
|
|
||||||
|
|
||||||
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create docker-compose file"""
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
compose_content = {
|
|
||||||
"version": "3.8",
|
|
||||||
"services": {
|
|
||||||
name: {
|
|
||||||
"image": f"cerc/{name}:local",
|
|
||||||
"restart": "unless-stopped",
|
|
||||||
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
|
||||||
"environment": {
|
|
||||||
"NODE_ENV": "${NODE_ENV:-production}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
|
||||||
get_yaml().dump(compose_content, f)
|
|
||||||
|
|
||||||
|
|
||||||
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
|
||||||
"""Add entry to a list file if not already present"""
|
|
||||||
list_path = data_dir / filename
|
|
||||||
|
|
||||||
# Read existing entries
|
|
||||||
existing = set()
|
|
||||||
if list_path.exists():
|
|
||||||
with open(list_path, "r") as f:
|
|
||||||
existing = set(line.strip() for line in f if line.strip())
|
|
||||||
|
|
||||||
# Add new entry
|
|
||||||
if entry not in existing:
|
|
||||||
with open(list_path, "a") as f:
|
|
||||||
f.write(f"{entry}\n")
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
|
||||||
@click.option("--type", "stack_type", default="webapp",
|
|
||||||
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
|
||||||
help="Stack template type")
|
|
||||||
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
|
||||||
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
|
||||||
@click.pass_context
|
|
||||||
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
|
||||||
"""Create a new stack with all required files.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-app --type webapp
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
|
||||||
"""
|
|
||||||
# Validate
|
|
||||||
validate_stack_name(name)
|
|
||||||
|
|
||||||
template = STACK_TEMPLATES[stack_type]
|
|
||||||
data_dir = get_data_dir()
|
|
||||||
|
|
||||||
# Define paths
|
|
||||||
stack_dir = data_dir / "stacks" / name
|
|
||||||
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
|
||||||
compose_dir = data_dir / "compose"
|
|
||||||
|
|
||||||
# Check for existing files
|
|
||||||
if not force:
|
|
||||||
if stack_dir.exists():
|
|
||||||
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
|
||||||
if container_dir.exists():
|
|
||||||
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
|
||||||
|
|
||||||
# Dry run check
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Would create stack '{name}' with template '{stack_type}':")
|
|
||||||
print(f" - {stack_dir}/stack.yml")
|
|
||||||
print(f" - {container_dir}/Dockerfile")
|
|
||||||
print(f" - {container_dir}/build.sh")
|
|
||||||
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
print(f" - Update repository-list.txt")
|
|
||||||
print(f" - Update container-image-list.txt")
|
|
||||||
print(f" - Update pod-list.txt")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Create files
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
|
||||||
|
|
||||||
create_stack_yml(stack_dir, name, template, repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {stack_dir}/stack.yml")
|
|
||||||
|
|
||||||
create_dockerfile(container_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/Dockerfile")
|
|
||||||
|
|
||||||
create_build_script(container_dir, name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/build.sh")
|
|
||||||
|
|
||||||
create_compose_file(compose_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
|
|
||||||
# Update list files
|
|
||||||
if repo:
|
|
||||||
update_list_file(data_dir, "repository-list.txt", repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {repo} to repository-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added cerc/{name} to container-image-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "pod-list.txt", name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {name} to pod-list.txt")
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"\nStack '{name}' created successfully!")
|
|
||||||
print(f"\nNext steps:")
|
|
||||||
print(f" 1. Edit {stack_dir}/stack.yml")
|
|
||||||
print(f" 2. Customize {container_dir}/Dockerfile")
|
|
||||||
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
|
||||||
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Register Command in main.py
|
|
||||||
|
|
||||||
**Edit `stack_orchestrator/main.py`**
|
|
||||||
|
|
||||||
Add import:
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
```
|
|
||||||
|
|
||||||
Add command registration (after line ~78):
|
|
||||||
```python
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Implementation Steps
|
|
||||||
|
|
||||||
### Step 1: Create module structure
|
|
||||||
```bash
|
|
||||||
mkdir -p stack_orchestrator/create
|
|
||||||
touch stack_orchestrator/create/__init__.py
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Create the command file
|
|
||||||
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
|
||||||
|
|
||||||
### Step 3: Register in main.py
|
|
||||||
Add the import and `cli.add_command()` line.
|
|
||||||
|
|
||||||
### Step 4: Test the command
|
|
||||||
```bash
|
|
||||||
# Show help
|
|
||||||
laconic-so create-stack --help
|
|
||||||
|
|
||||||
# Dry run
|
|
||||||
laconic-so --dry-run create-stack --name test-app --type webapp
|
|
||||||
|
|
||||||
# Create a stack
|
|
||||||
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
ls -la stack_orchestrator/data/stacks/test-app/
|
|
||||||
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| `service` | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| `empty` | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Future Enhancements
|
|
||||||
|
|
||||||
1. **Interactive mode** - Prompt for values if not provided
|
|
||||||
2. **More templates** - Go, Rust, database stacks
|
|
||||||
3. **Template from existing** - `--from-stack existing-stack`
|
|
||||||
4. **External stack support** - Create in custom directory
|
|
||||||
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files Modified
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --name verify-test --type webapp
|
|
||||||
|
|
||||||
# 3. Full creation works
|
|
||||||
laconic-so create-stack --name verify-test --type webapp
|
|
||||||
ls stack_orchestrator/data/stacks/verify-test/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
|
|
||||||
# 4. Build works
|
|
||||||
laconic-so --stack verify-test build-containers
|
|
||||||
|
|
||||||
# 5. Cleanup
|
|
||||||
rm -rf stack_orchestrator/data/stacks/verify-test
|
|
||||||
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
|
||||||
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
```
|
|
||||||
16
TODO.md
16
TODO.md
@ -1,16 +0,0 @@
|
|||||||
# TODO
|
|
||||||
|
|
||||||
## Features Needed
|
|
||||||
|
|
||||||
### Update Stack Command
|
|
||||||
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
|
||||||
|
|
||||||
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
|
||||||
|
|
||||||
## Architecture Refactoring
|
|
||||||
|
|
||||||
### Separate Deployer from Stack Orchestrator CLI
|
|
||||||
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
|
||||||
|
|
||||||
### Separate Stacks from Stack Orchestrator Repo
|
|
||||||
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
|
||||||
@ -51,7 +51,7 @@ $ laconic-so build-npms --include <package-name>
|
|||||||
```
|
```
|
||||||
e.g.
|
e.g.
|
||||||
```
|
```
|
||||||
$ laconic-so build-npms --include registry-sdk
|
$ laconic-so build-npms --include laconic-sdk
|
||||||
```
|
```
|
||||||
Build the packages for a stack:
|
Build the packages for a stack:
|
||||||
```
|
```
|
||||||
|
|||||||
@ -1,550 +0,0 @@
|
|||||||
# Docker Compose Deployment Guide
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
### What is a Deployer?
|
|
||||||
|
|
||||||
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
|
||||||
|
|
||||||
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To deploy stacks using Docker Compose, you need:
|
|
||||||
|
|
||||||
- Docker Engine (20.10+)
|
|
||||||
- Docker Compose plugin (v2.0+)
|
|
||||||
- Python 3.8+
|
|
||||||
- stack-orchestrator installed (`laconic-so`)
|
|
||||||
|
|
||||||
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
|
||||||
|
|
||||||
## Deployment Workflow
|
|
||||||
|
|
||||||
The typical deployment workflow consists of four main steps:
|
|
||||||
|
|
||||||
1. **Setup repositories and build containers** (first time only)
|
|
||||||
2. **Initialize deployment specification**
|
|
||||||
3. **Create deployment directory**
|
|
||||||
4. **Start and manage services**
|
|
||||||
|
|
||||||
## Quick Start Example
|
|
||||||
|
|
||||||
Here's a complete example using the built-in `test` stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Step 1: Setup (first time only)
|
|
||||||
laconic-so --stack test setup-repositories
|
|
||||||
laconic-so --stack test build-containers
|
|
||||||
|
|
||||||
# Step 2: Initialize deployment spec
|
|
||||||
laconic-so --stack test deploy init --output test-spec.yml
|
|
||||||
|
|
||||||
# Step 3: Create deployment directory
|
|
||||||
laconic-so --stack test deploy create \
|
|
||||||
--spec-file test-spec.yml \
|
|
||||||
--deployment-dir test-deployment
|
|
||||||
|
|
||||||
# Step 4: Start services
|
|
||||||
laconic-so deployment --dir test-deployment start
|
|
||||||
|
|
||||||
# View running services
|
|
||||||
laconic-so deployment --dir test-deployment ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir test-deployment logs
|
|
||||||
|
|
||||||
# Stop services (preserves data)
|
|
||||||
laconic-so deployment --dir test-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
Stack-orchestrator supports two deployment workflows:
|
|
||||||
|
|
||||||
### 1. Deployment Directory Workflow (Recommended)
|
|
||||||
|
|
||||||
This workflow creates a persistent deployment directory that contains all configuration and data.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Production deployments
|
|
||||||
- When you need to preserve configuration
|
|
||||||
- When you want to manage multiple deployments
|
|
||||||
- When you need persistent volume data
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Optionally edit eth-spec.yml to customize configuration
|
|
||||||
|
|
||||||
# Create deployment directory
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir my-eth-deployment
|
|
||||||
|
|
||||||
# Start the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment start
|
|
||||||
|
|
||||||
# Manage the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment ps
|
|
||||||
laconic-so deployment --dir my-eth-deployment logs
|
|
||||||
laconic-so deployment --dir my-eth-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Quick Deploy Workflow
|
|
||||||
|
|
||||||
This workflow deploys directly without creating a persistent deployment directory.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Quick testing
|
|
||||||
- Temporary deployments
|
|
||||||
- Simple stacks that don't require customization
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start the stack directly
|
|
||||||
laconic-so --stack test deploy up
|
|
||||||
|
|
||||||
# Check service status
|
|
||||||
laconic-so --stack test deploy port test 80
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack test deploy logs
|
|
||||||
|
|
||||||
# Stop (preserves volumes)
|
|
||||||
laconic-so --stack test deploy down
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so --stack test deploy down --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Real-World Example: Ethereum Fixturenet
|
|
||||||
|
|
||||||
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Setup (first time only)
|
|
||||||
laconic-so --stack fixturenet-eth setup-repositories
|
|
||||||
laconic-so --stack fixturenet-eth build-containers
|
|
||||||
|
|
||||||
# Initialize with default configuration
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Create deployment
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir fixturenet-eth-deployment
|
|
||||||
|
|
||||||
# Start the network
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment ps
|
|
||||||
|
|
||||||
# Access logs from specific service
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
|
||||||
|
|
||||||
# Stop the network (preserves blockchain data)
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop
|
|
||||||
|
|
||||||
# Start again - blockchain data is preserved
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Clean up everything including data
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Passing Configuration Parameters
|
|
||||||
|
|
||||||
Configuration can be passed in three ways:
|
|
||||||
|
|
||||||
**1. At init time via `--config` flag:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack test deploy init --output spec.yml \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**2. Edit the spec file after init:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize
|
|
||||||
laconic-so --stack test deploy init --output spec.yml
|
|
||||||
|
|
||||||
# Edit spec.yml
|
|
||||||
vim spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Example spec.yml:
|
|
||||||
```yaml
|
|
||||||
stack: test
|
|
||||||
config:
|
|
||||||
PARAM1: value1
|
|
||||||
PARAM2: value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**3. Docker Compose defaults:**
|
|
||||||
|
|
||||||
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
|
||||||
|
|
||||||
### Port Mapping
|
|
||||||
|
|
||||||
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Find the host port for container port 80 on service 'webapp'
|
|
||||||
laconic-so deployment --dir my-deployment port webapp 80
|
|
||||||
|
|
||||||
# Output example: 0.0.0.0:32768
|
|
||||||
```
|
|
||||||
|
|
||||||
To configure fixed ports, edit the spec file before creating the deployment:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8080:80' # Maps host port 8080 to container port 80
|
|
||||||
api:
|
|
||||||
- '3000:3000'
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create the deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack my-stack deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volume Persistence
|
|
||||||
|
|
||||||
Volumes are preserved between stop/start cycles by default:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop but keep data
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Start again - data is still there
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
To completely remove all data:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop and delete all volumes
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
Volume data is stored in `<deployment-dir>/data/`.
|
|
||||||
|
|
||||||
## Common Operations
|
|
||||||
|
|
||||||
### Viewing Logs
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# All services, continuous follow
|
|
||||||
laconic-so deployment --dir my-deployment logs --follow
|
|
||||||
|
|
||||||
# Last 100 lines from all services
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 100
|
|
||||||
|
|
||||||
# Specific service only
|
|
||||||
laconic-so deployment --dir my-deployment logs webapp
|
|
||||||
|
|
||||||
# Combine options
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
### Executing Commands in Containers
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Execute a command in a running service
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp ls -la
|
|
||||||
|
|
||||||
# Interactive shell
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
|
||||||
|
|
||||||
# Run command with specific environment variables
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
|
||||||
```
|
|
||||||
|
|
||||||
### Checking Service Status
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# List all running services
|
|
||||||
laconic-so deployment --dir my-deployment ps
|
|
||||||
|
|
||||||
# Check using Docker directly
|
|
||||||
docker ps
|
|
||||||
```
|
|
||||||
|
|
||||||
### Updating a Running Deployment
|
|
||||||
|
|
||||||
If you need to change configuration after deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Edit the spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
|
|
||||||
# 2. Regenerate configuration
|
|
||||||
laconic-so deployment --dir my-deployment update
|
|
||||||
|
|
||||||
# 3. Restart services to apply changes
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
## Multi-Service Deployments
|
|
||||||
|
|
||||||
Many stacks deploy multiple services that work together:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Deploy a stack with multiple services
|
|
||||||
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
|
||||||
laconic-so --stack laconicd-with-console deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir laconicd-deployment
|
|
||||||
|
|
||||||
laconic-so deployment --dir laconicd-deployment start
|
|
||||||
|
|
||||||
# View all services
|
|
||||||
laconic-so deployment --dir laconicd-deployment ps
|
|
||||||
|
|
||||||
# View logs from specific services
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs laconicd
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs console
|
|
||||||
```
|
|
||||||
|
|
||||||
## ConfigMaps
|
|
||||||
|
|
||||||
ConfigMaps allow you to mount configuration files into containers:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Create the config directory in your deployment
|
|
||||||
mkdir -p my-deployment/data/my-config
|
|
||||||
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
|
||||||
|
|
||||||
# 2. Reference in spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Add to spec.yml:
|
|
||||||
```yaml
|
|
||||||
configmaps:
|
|
||||||
my-config: ./data/my-config
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 3. Restart to apply
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
|
||||||
|
|
||||||
## Deployment Directory Structure
|
|
||||||
|
|
||||||
A typical deployment directory contains:
|
|
||||||
|
|
||||||
```
|
|
||||||
my-deployment/
|
|
||||||
├── compose/
|
|
||||||
│ └── docker-compose-*.yml # Generated compose files
|
|
||||||
├── config.env # Environment variables
|
|
||||||
├── deployment.yml # Deployment metadata
|
|
||||||
├── spec.yml # Deployment specification
|
|
||||||
└── data/ # Volume mounts and configs
|
|
||||||
├── service-data/ # Persistent service data
|
|
||||||
└── config-maps/ # ConfigMap files
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
**Problem: "Cannot connect to Docker daemon"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Ensure Docker is running
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Start Docker if needed (macOS)
|
|
||||||
open -a Docker
|
|
||||||
|
|
||||||
# Start Docker (Linux)
|
|
||||||
sudo systemctl start docker
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Port already in use"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Either stop the conflicting service or use different ports
|
|
||||||
# Edit spec.yml before creating deployment:
|
|
||||||
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8081:80' # Use 8081 instead of 8080
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Image not found"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build containers first
|
|
||||||
laconic-so --stack your-stack build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Volumes not persisting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check if you used --delete-volumes when stopping
|
|
||||||
# Volume data is in: <deployment-dir>/data/
|
|
||||||
|
|
||||||
# Don't use --delete-volumes if you want to keep data:
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Only use --delete-volumes when you want to reset completely:
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Services not starting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check logs for errors
|
|
||||||
laconic-so deployment --dir my-deployment logs
|
|
||||||
|
|
||||||
# Check Docker container status
|
|
||||||
docker ps -a
|
|
||||||
|
|
||||||
# Try stopping and starting again
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Inspecting Deployment State
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check deployment directory structure
|
|
||||||
ls -la my-deployment/
|
|
||||||
|
|
||||||
# Check running containers
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Check container details
|
|
||||||
docker inspect <container-name>
|
|
||||||
|
|
||||||
# Check networks
|
|
||||||
docker network ls
|
|
||||||
|
|
||||||
# Check volumes
|
|
||||||
docker volume ls
|
|
||||||
```
|
|
||||||
|
|
||||||
## CLI Commands Reference
|
|
||||||
|
|
||||||
### Stack Operations
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone required repositories
|
|
||||||
laconic-so --stack <name> setup-repositories
|
|
||||||
|
|
||||||
# Build container images
|
|
||||||
laconic-so --stack <name> build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Initialization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec with defaults
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file>
|
|
||||||
|
|
||||||
# Initialize with configuration
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file> \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Creation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Create deployment directory from spec
|
|
||||||
laconic-so --stack <name> deploy create \
|
|
||||||
--spec-file <spec-file> \
|
|
||||||
--deployment-dir <dir>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Management
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start all services
|
|
||||||
laconic-so deployment --dir <dir> start
|
|
||||||
|
|
||||||
# Stop services (preserves volumes)
|
|
||||||
laconic-so deployment --dir <dir> stop
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so deployment --dir <dir> stop --delete-volumes
|
|
||||||
|
|
||||||
# List running services
|
|
||||||
laconic-so deployment --dir <dir> ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
|
||||||
|
|
||||||
# Show mapped port
|
|
||||||
laconic-so deployment --dir <dir> port <service> <private-port>
|
|
||||||
|
|
||||||
# Execute command in service
|
|
||||||
laconic-so deployment --dir <dir> exec <service> <command>
|
|
||||||
|
|
||||||
# Update configuration
|
|
||||||
laconic-so deployment --dir <dir> update
|
|
||||||
```
|
|
||||||
|
|
||||||
### Quick Deploy Commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start stack directly
|
|
||||||
laconic-so --stack <name> deploy up
|
|
||||||
|
|
||||||
# Stop stack
|
|
||||||
laconic-so --stack <name> deploy down [--delete-volumes]
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack <name> deploy logs
|
|
||||||
|
|
||||||
# Show port mapping
|
|
||||||
laconic-so --stack <name> deploy port <service> <port>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Related Documentation
|
|
||||||
|
|
||||||
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
|
||||||
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
|
||||||
- [Specification](./spec.md) - Internal structure and design
|
|
||||||
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
|
||||||
- [Web App Deployment](./webapp.md) - Deploying web applications
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
For more examples, see the test scripts:
|
|
||||||
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
|
||||||
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
|
||||||
|
|
||||||
## Summary
|
|
||||||
|
|
||||||
- Docker Compose is the default and recommended deployment mode
|
|
||||||
- Two workflows: deployment directory (recommended) or quick deploy
|
|
||||||
- The standard workflow is: setup → build → init → create → start
|
|
||||||
- Configuration is flexible with multiple override layers
|
|
||||||
- Volume persistence is automatic unless explicitly deleted
|
|
||||||
- All deployment state is contained in the deployment directory
|
|
||||||
- For Kubernetes deployments, see separate K8s documentation
|
|
||||||
|
|
||||||
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
|
||||||
@ -56,7 +56,7 @@ laconic-so --stack fixturenet-laconicd build-npms
|
|||||||
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
||||||
|
|
||||||
- `@cerc-io/laconic-registry-cli`
|
- `@cerc-io/laconic-registry-cli`
|
||||||
- `@cerc-io/registry-sdk`
|
- `@cerc-io/laconic-sdk`
|
||||||
|
|
||||||
### Build and deploy fixturenet containers
|
### Build and deploy fixturenet containers
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ laconic-so --stack fixturenet-laconicd deploy logs
|
|||||||
### Test with the registry CLI
|
### Test with the registry CLI
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-laconicd deploy exec cli "laconic registry status"
|
laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status"
|
||||||
```
|
```
|
||||||
|
|
||||||
Try additional CLI commands, documented [here](https://github.com/cerc-io/laconic-registry-cli#operations).
|
Try additional CLI commands, documented [here](https://github.com/cerc-io/laconic-registry-cli#operations).
|
||||||
|
|||||||
@ -1,113 +0,0 @@
|
|||||||
# Helm Chart Generation
|
|
||||||
|
|
||||||
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Install Kompose:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Linux
|
|
||||||
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
|
||||||
chmod +x kompose
|
|
||||||
sudo mv kompose /usr/local/bin/
|
|
||||||
|
|
||||||
# macOS
|
|
||||||
brew install kompose
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
kompose version
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### 1. Create spec file
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Generate Helm chart
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment \
|
|
||||||
--helm-chart
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Deploy to Kubernetes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
helm install my-release my-deployment/chart
|
|
||||||
kubectl get pods -n zenith
|
|
||||||
```
|
|
||||||
|
|
||||||
## Output Structure
|
|
||||||
|
|
||||||
```bash
|
|
||||||
my-deployment/
|
|
||||||
├── spec.yml # Reference
|
|
||||||
├── stack.yml # Reference
|
|
||||||
└── chart/ # Helm chart
|
|
||||||
├── Chart.yaml
|
|
||||||
├── README.md
|
|
||||||
└── templates/
|
|
||||||
└── *.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Generate chart for stage1-zenithd
|
|
||||||
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output stage1-spec.yml
|
|
||||||
|
|
||||||
laconic-so --stack stage1-zenithd deploy create \
|
|
||||||
--spec-file stage1-spec.yml \
|
|
||||||
--deployment-dir stage1-deployment \
|
|
||||||
--helm-chart
|
|
||||||
|
|
||||||
# Deploy
|
|
||||||
helm install stage1-zenithd stage1-deployment/chart
|
|
||||||
```
|
|
||||||
|
|
||||||
## Production Deployment (TODO)
|
|
||||||
|
|
||||||
### Local Development
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Access services using port-forward
|
|
||||||
kubectl port-forward service/zenithd 26657:26657
|
|
||||||
kubectl port-forward service/nginx-api-proxy 1317:80
|
|
||||||
kubectl port-forward service/cosmos-explorer 4173:4173
|
|
||||||
```
|
|
||||||
|
|
||||||
### Production Access Options
|
|
||||||
|
|
||||||
- Option 1: Ingress + cert-manager (Recommended)
|
|
||||||
- Install ingress-nginx + cert-manager
|
|
||||||
- Point DNS to cluster LoadBalancer IP
|
|
||||||
- Auto-provisions Let's Encrypt TLS certs
|
|
||||||
- Access: `https://api.zenith.example.com`
|
|
||||||
- Option 2: Cloud LoadBalancer
|
|
||||||
- Use cloud provider's LoadBalancer service type
|
|
||||||
- Point DNS to assigned external IP
|
|
||||||
- Manual TLS cert management
|
|
||||||
- Option 3: Bare Metal (MetalLB + Ingress)
|
|
||||||
- MetalLB provides LoadBalancer IPs from local network
|
|
||||||
- Same Ingress setup as cloud
|
|
||||||
- Option 4: NodePort + External Proxy
|
|
||||||
- Expose services on 30000-32767 range
|
|
||||||
- External nginx/Caddy proxies 80/443 → NodePort
|
|
||||||
- Manual cert management
|
|
||||||
|
|
||||||
### Changes Needed
|
|
||||||
|
|
||||||
- Add Ingress template to charts
|
|
||||||
- Add TLS configuration to values.yaml
|
|
||||||
- Document cert-manager setup
|
|
||||||
- Add production deployment guide
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# K8S Deployment Enhancements
|
|
||||||
## Controlling pod placement
|
|
||||||
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
|
||||||
### Pod/Node Affinity
|
|
||||||
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
|
||||||
|
|
||||||
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-affinities:
|
|
||||||
- label: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
|
||||||
### Node Taint Toleration
|
|
||||||
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
|
||||||
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
|
||||||
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
|
||||||
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
|
||||||
|
|
||||||
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-tolerations:
|
|
||||||
- key: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
|
||||||
@ -1,8 +1,9 @@
|
|||||||
# Running a laconicd fixturenet with console
|
# Running a laconicd fixturenet with console
|
||||||
|
|
||||||
The following tutorial explains the steps to run a laconicd fixturenet with CLI and web console that displays records in the registry. It is designed as an introduction to Stack Orchestrator and to showcase one component of the Laconic Stack. Prior to Stack Orchestrator, the following repositories had to be cloned and setup manually:
|
The following tutorial explains the steps to run a laconicd fixturenet with CLI and web console that displays records in the registry. It is designed as an introduction to Stack Orchestrator and to showcase one component of the Laconic Stack. Prior to Stack Orchestrator, the following 4 repositories had to be cloned and setup manually:
|
||||||
|
|
||||||
- https://git.vdb.to/cerc-io/laconicd
|
- https://git.vdb.to/cerc-io/laconicd
|
||||||
|
- https://git.vdb.to/cerc-io/laconic-sdk
|
||||||
- https://git.vdb.to/cerc-io/laconic-registry-cli
|
- https://git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
- https://git.vdb.to/cerc-io/laconic-console
|
- https://git.vdb.to/cerc-io/laconic-console
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the
|
|||||||
1. Get the repositories
|
1. Get the repositories
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/laconic-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Build the containers:
|
1. Build the containers:
|
||||||
@ -75,8 +76,6 @@ To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the
|
|||||||
1. Create a deployment directory for the stack:
|
1. Create a deployment directory for the stack:
|
||||||
```
|
```
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy init --output laconic-loaded.spec --map-ports-to-host any-same --config LACONIC_HOSTED_ENDPOINT=$BACKEND_ENDPOINT
|
laconic-so --stack fixturenet-laconic-loaded deploy init --output laconic-loaded.spec --map-ports-to-host any-same --config LACONIC_HOSTED_ENDPOINT=$BACKEND_ENDPOINT
|
||||||
|
|
||||||
# Update port mapping in the laconic-loaded.spec file to resolve port conflicts on host if any
|
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy create --deployment-dir laconic-loaded-deployment --spec-file laconic-loaded.spec
|
laconic-so --stack fixturenet-laconic-loaded deploy create --deployment-dir laconic-loaded-deployment --spec-file laconic-loaded.spec
|
||||||
@ -96,51 +95,52 @@ To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the
|
|||||||
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconicd-1 | 6:12AM INF indexed block events height=16 module=txindex
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:29PM INF indexed block exents height=12 module=txindex server=node
|
||||||
laconicd-1 | 6:12AM INF Timed out dur=2993.893332 height=17 module=consensus round=0 step=RoundStepNewHeight
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF Timed out dur=4976.960115 height=13 module=consensus round=0 server=node step=1
|
||||||
laconicd-1 | 6:12AM INF received proposal module=consensus proposal="Proposal{17/0 (E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267:1:DF138772FEF0, -1) 6A6F3B0A42B3 @ 2024-07-25T06:12:31.952967053Z}" proposer=86970D950BC9C16F3991A52D9C6DC55BA478A7C6
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received proposal module=consensus proposal={"Type":32,"block_id":{"hash":"D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4","parts":{"hash":"22411A20B7F14CDA33244420FBDDAF24450C0628C7A06034FF22DAC3699DDCC8","total":1}},"height":13,"pol_round":-1,"round":0,"signature":"DEuqnaQmvyYbUwckttJmgKdpRu6eVm9i+9rQ1pIrV2PidkMNdWRZBLdmNghkIrUzGbW8Xd7UVJxtLRmwRASgBg==","timestamp":"2023-04-18T21:30:01.49450663Z"} server=node
|
||||||
laconicd-1 | 6:12AM INF received complete proposal block hash=E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267 height=17 module=consensus
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received complete proposal block hash=D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4 height=13 module=consensus server=node
|
||||||
laconicd-1 | 6:12AM INF finalizing commit of block hash=E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267 height=17 module=consensus num_txs=0 root=AF4941107DC718ED1425E77A3DC7F1154FB780B7A7DE20288DC43442203527E3
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF finalizing commit of block hash={} height=13 module=consensus num_txs=0 root=1A8CA1AF139CCC80EC007C6321D8A63A46A793386EE2EDF9A5CA0AB2C90728B7 server=node
|
||||||
laconicd-1 | 6:12AM INF finalized block block_app_hash=26A665360BB1EE64E54F97F2A5AB7F621B33A86D9896574000C05DE63F43F788 height=17 module=state num_txs_res=0 num_val_updates=0
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF minted coins from module account amount=2059730459416582643aphoton from=mint module=x/bank
|
||||||
laconicd-1 | 6:12AM INF executed block app_hash=26A665360BB1EE64E54F97F2A5AB7F621B33A86D9896574000C05DE63F43F788 height=17 module=state
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF executed block height=13 module=state num_invalid_txs=0 num_valid_txs=0 server=node
|
||||||
laconicd-1 | 6:12AM INF committed state block_app_hash=AF4941107DC718ED1425E77A3DC7F1154FB780B7A7DE20288DC43442203527E3 height=17 module=state
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF commit synced commit=436F6D6D697449447B5B363520313037203630203232372039352038352032303820313334203231392032303520313433203130372031343920313431203139203139322038362031323720362031383520323533203137362031333820313735203135392031383620323334203135382031323120313431203230342037335D3A447D
|
||||||
laconicd-1 | 6:12AM INF indexed block events height=17 module=txindex
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF committed state app_hash=416B3CE35F55D086DBCD8F6B958D13C0567F06B9FDB08AAF9FBAEA9E798DCC49 height=13 module=state num_txs=0 server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed block exents height=13 module=txindex server=node
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Confirm operation of the registry CLI:
|
4. Confirm operation of the registry CLI:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic cns status"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"version": "0.3.0",
|
"version": "0.3.0",
|
||||||
"node": {
|
"node": {
|
||||||
"id": "6e072894aa1f5d9535a1127a0d7a7f8e65100a2c",
|
"id": "4216af2ac9f68bda33a38803fc1b5c9559312c1d",
|
||||||
"network": "laconic_9000-1",
|
"network": "laconic_9000-1",
|
||||||
"moniker": "localtestnet"
|
"moniker": "localtestnet"
|
||||||
},
|
},
|
||||||
"sync": {
|
"sync": {
|
||||||
"latestBlockHash": "260102C283D0411CFBA0270F7DC182650FFCA737A2F6F652A985F6065696F590",
|
"latest_block_hash": "1BDF4CB9AE2390DA65BCF997C83133C18014FCDDCAE03708488F0B56FCEEA429",
|
||||||
"latestBlockHeight": "49",
|
"latest_block_height": "5",
|
||||||
"latestBlockTime": "2024-07-25 06:14:05.626744215 +0000 UTC",
|
"latest_block_time": "2023-08-09 16:00:30.386903172 +0000 UTC",
|
||||||
"catchingUp": false
|
"catching_up": false
|
||||||
},
|
},
|
||||||
"validator": {
|
"validator": {
|
||||||
"address": "86970D950BC9C16F3991A52D9C6DC55BA478A7C6",
|
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
|
||||||
"votingPower": "1000000000000000"
|
"voting_power": "1000000000000000"
|
||||||
},
|
},
|
||||||
"validators": [
|
"validators": [
|
||||||
{
|
{
|
||||||
"address": "86970D950BC9C16F3991A52D9C6DC55BA478A7C6",
|
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
|
||||||
"votingPower": "1000000000000000",
|
"voting_power": "1000000000000000",
|
||||||
"proposerPriority": "0"
|
"proposer_priority": "0"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"numPeers": "0",
|
"num_peers": "0",
|
||||||
"peers": [],
|
"peers": [],
|
||||||
"diskUsage": "688K"
|
"disk_usage": "292.0K"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -186,13 +186,13 @@ wns
|
|||||||
1. The following command will create a bond and publish a record:
|
1. The following command will create a bond and publish a record:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli ./scripts/create-demo-records.sh
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-demo-records.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
You'll get an output like:
|
You'll get an output like:
|
||||||
|
|
||||||
```
|
```
|
||||||
Balance is: 9.9999e+25
|
Balance is: 99998999999999998999600000
|
||||||
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
```
|
```
|
||||||
@ -223,5 +223,5 @@ record:
|
|||||||
- e.g,:
|
- e.g,:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry record list"
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns record list"
|
||||||
```
|
```
|
||||||
|
|||||||
@ -26,3 +26,4 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -1,128 +0,0 @@
|
|||||||
# Deploying to the Laconic Network
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
|
||||||
|
|
||||||
## Key Documentation in stack-orchestrator
|
|
||||||
|
|
||||||
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
|
||||||
- `docs/webapp.md` - Webapp building/running
|
|
||||||
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
|
||||||
|
|
||||||
## Core Concepts
|
|
||||||
|
|
||||||
### LRN (Laconic Resource Name)
|
|
||||||
Format: `lrn://laconic/[namespace]/[name]`
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
- `lrn://laconic/deployers/my-deployer-name`
|
|
||||||
- `lrn://laconic/dns/example.com`
|
|
||||||
- `lrn://laconic/deployments/example.com`
|
|
||||||
|
|
||||||
### Registry Record Types
|
|
||||||
|
|
||||||
| Record Type | Purpose |
|
|
||||||
|-------------|---------|
|
|
||||||
| `ApplicationRecord` | Published app metadata |
|
|
||||||
| `WebappDeployer` | Deployment service offering |
|
|
||||||
| `ApplicationDeploymentRequest` | User's request to deploy |
|
|
||||||
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
|
||||||
| `ApplicationDeploymentRecord` | Completed deployment result |
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
### 1. Direct Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentRequest
|
|
||||||
→ targets specific WebappDeployer (by LRN)
|
|
||||||
→ includes payment TX hash
|
|
||||||
→ Deployer picks up request, builds, deploys, publishes result
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Auction-Based Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentAuction
|
|
||||||
→ Deployers bid (commit/reveal phases)
|
|
||||||
→ Winner selected
|
|
||||||
→ User publishes request targeting winner
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key CLI Commands
|
|
||||||
|
|
||||||
### Publish a Deployer Service
|
|
||||||
```bash
|
|
||||||
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
|
||||||
--api-url https://deployer-api.example.com \
|
|
||||||
--name my-deployer \
|
|
||||||
--payment-address laconic1... \
|
|
||||||
--minimum-payment 1000alnt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Request Deployment (User Side)
|
|
||||||
```bash
|
|
||||||
laconic-so request-webapp-deployment --laconic-config config.yml \
|
|
||||||
--app lrn://laconic/apps/my-app \
|
|
||||||
--deployer lrn://laconic/deployers/xyz \
|
|
||||||
--make-payment auto
|
|
||||||
```
|
|
||||||
|
|
||||||
### Run Deployer Service (Deployer Side)
|
|
||||||
```bash
|
|
||||||
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
|
||||||
```
|
|
||||||
|
|
||||||
## Laconic Config File
|
|
||||||
|
|
||||||
All tools require a laconic config file (`laconic.toml`):
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[cosmos]
|
|
||||||
address_prefix = "laconic"
|
|
||||||
chain_id = "laconic_9000-1"
|
|
||||||
endpoint = "http://localhost:26657"
|
|
||||||
key = "<account-name>"
|
|
||||||
password = "<account-password>"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting Up a Local Laconicd Network
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone and build
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy create
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key Implementation Files
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `publish_webapp_deployer.py` | Register deployment service on network |
|
|
||||||
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
|
||||||
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
|
||||||
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
|
||||||
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
|
||||||
| `request_webapp_undeployment.py` | Request app removal |
|
|
||||||
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
|
||||||
| `util.py` | LaconicRegistryClient - all registry interactions |
|
|
||||||
|
|
||||||
## Payment System
|
|
||||||
|
|
||||||
- **Token Denom**: `alnt` (Laconic network tokens)
|
|
||||||
- **Payment Options**:
|
|
||||||
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
|
||||||
- `--use-payment`: Reference existing payment TX
|
|
||||||
|
|
||||||
## What's NOT Well-Documented
|
|
||||||
|
|
||||||
1. No end-to-end tutorial for full deployment workflow
|
|
||||||
2. Stack publishing (vs webapp) process unclear
|
|
||||||
3. LRN naming conventions not formally specified
|
|
||||||
4. Payment economics and token mechanics
|
|
||||||
110
pyproject.toml
110
pyproject.toml
@ -1,110 +0,0 @@
|
|||||||
[build-system]
|
|
||||||
requires = ["setuptools>=61.0", "wheel"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
|
||||||
|
|
||||||
[project]
|
|
||||||
name = "laconic-stack-orchestrator"
|
|
||||||
version = "1.1.0"
|
|
||||||
description = "Orchestrates deployment of the Laconic stack"
|
|
||||||
readme = "README.md"
|
|
||||||
license = {text = "GNU Affero General Public License"}
|
|
||||||
authors = [
|
|
||||||
{name = "Cerc", email = "info@cerc.io"}
|
|
||||||
]
|
|
||||||
requires-python = ">=3.8"
|
|
||||||
classifiers = [
|
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
]
|
|
||||||
dependencies = [
|
|
||||||
"python-decouple>=3.8",
|
|
||||||
"python-dotenv==1.0.0",
|
|
||||||
"GitPython>=3.1.32",
|
|
||||||
"tqdm>=4.65.0",
|
|
||||||
"python-on-whales>=0.64.0",
|
|
||||||
"click>=8.1.6",
|
|
||||||
"PyYAML>=6.0.1",
|
|
||||||
"ruamel.yaml>=0.17.32",
|
|
||||||
"pydantic==1.10.9",
|
|
||||||
"tomli==2.0.1",
|
|
||||||
"validators==0.22.0",
|
|
||||||
"kubernetes>=28.1.0",
|
|
||||||
"humanfriendly>=10.0",
|
|
||||||
"python-gnupg>=0.5.2",
|
|
||||||
"requests>=2.3.2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
|
||||||
dev = [
|
|
||||||
"pytest>=7.0.0",
|
|
||||||
"pytest-cov>=4.0.0",
|
|
||||||
"black>=22.0.0",
|
|
||||||
"flake8>=5.0.0",
|
|
||||||
"pyright>=1.1.0",
|
|
||||||
"yamllint>=1.28.0",
|
|
||||||
"pre-commit>=3.0.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
laconic-so = "stack_orchestrator.main:cli"
|
|
||||||
|
|
||||||
[project.urls]
|
|
||||||
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
|
||||||
where = ["."]
|
|
||||||
|
|
||||||
[tool.setuptools.package-data]
|
|
||||||
"*" = ["data/**"]
|
|
||||||
|
|
||||||
[tool.black]
|
|
||||||
line-length = 88
|
|
||||||
target-version = ['py38']
|
|
||||||
|
|
||||||
[tool.flake8]
|
|
||||||
max-line-length = 88
|
|
||||||
extend-ignore = ["E203", "W503", "E402"]
|
|
||||||
|
|
||||||
[tool.pyright]
|
|
||||||
pythonVersion = "3.9"
|
|
||||||
typeCheckingMode = "basic"
|
|
||||||
reportMissingImports = "none"
|
|
||||||
reportMissingModuleSource = "none"
|
|
||||||
reportUnusedImport = "error"
|
|
||||||
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
|
||||||
exclude = ["**/build/**", "**/__pycache__/**"]
|
|
||||||
|
|
||||||
[tool.mypy]
|
|
||||||
python_version = "3.8"
|
|
||||||
warn_return_any = true
|
|
||||||
warn_unused_configs = true
|
|
||||||
disallow_untyped_defs = true
|
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
|
||||||
testpaths = ["tests"]
|
|
||||||
python_files = ["test_*.py"]
|
|
||||||
python_classes = ["Test*"]
|
|
||||||
python_functions = ["test_*"]
|
|
||||||
markers = [
|
|
||||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
|
||||||
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
|
||||||
]
|
|
||||||
addopts = [
|
|
||||||
"--cov",
|
|
||||||
"--cov-report=term-missing",
|
|
||||||
"--cov-report=html",
|
|
||||||
"--strict-markers",
|
|
||||||
]
|
|
||||||
asyncio_default_fixture_loop_scope = "function"
|
|
||||||
|
|
||||||
[tool.coverage.run]
|
|
||||||
source = ["stack_orchestrator"]
|
|
||||||
disable_warnings = ["couldnt-parse"]
|
|
||||||
|
|
||||||
[tool.coverage.report]
|
|
||||||
exclude_lines = [
|
|
||||||
"pragma: no cover",
|
|
||||||
"def __repr__",
|
|
||||||
"raise AssertionError",
|
|
||||||
"raise NotImplementedError",
|
|
||||||
]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"pythonVersion": "3.9",
|
|
||||||
"typeCheckingMode": "basic",
|
|
||||||
"reportMissingImports": "none",
|
|
||||||
"reportMissingModuleSource": "none",
|
|
||||||
"reportUnusedImport": "error",
|
|
||||||
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
|
||||||
"exclude": ["**/build/**", "**/__pycache__/**"]
|
|
||||||
}
|
|
||||||
@ -11,5 +11,3 @@ tomli==2.0.1
|
|||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
humanfriendly>=10.0
|
||||||
python-gnupg>=0.5.2
|
|
||||||
requests>=2.3.2
|
|
||||||
|
|||||||
30
setup.py
30
setup.py
@ -1,34 +1,30 @@
|
|||||||
# See
|
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
requirements = fh.read()
|
requirements = fh.read()
|
||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
|
||||||
setup(
|
setup(
|
||||||
name="laconic-stack-orchestrator",
|
name='laconic-stack-orchestrator',
|
||||||
version=version,
|
version='1.0.12',
|
||||||
author="Cerc",
|
author='Cerc',
|
||||||
author_email="info@cerc.io",
|
author_email='info@cerc.io',
|
||||||
license="GNU Affero General Public License",
|
license='GNU Affero General Public License',
|
||||||
description="Orchestrates deployment of the Laconic stack",
|
description='Orchestrates deployment of the Laconic stack',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
||||||
py_modules=["stack_orchestrator"],
|
py_modules=['stack_orchestrator'],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires=">=3.7",
|
python_requires='>=3.7',
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={"": ["data/**"]},
|
package_data={'': ['data/**']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
||||||
},
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,10 +23,11 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return default_stack(config, stack)
|
return base_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -40,27 +41,15 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class default_stack(base_stack):
|
|
||||||
"""Default stack implementation for stacks without specific handling."""
|
|
||||||
|
|
||||||
def ensure_available(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_url(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class package_registry_stack(base_stack):
|
||||||
|
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(
|
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
||||||
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
|
||||||
f"{url_from_environment}"
|
|
||||||
)
|
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -73,16 +62,10 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the
|
# If not, print a message about how to start it and return fail to the caller
|
||||||
# caller
|
print("ERROR: The package-registry stack is not running, and no external registry "
|
||||||
print(
|
"specified with CERC_NPM_REGISTRY_URL")
|
||||||
"ERROR: The package-registry stack is not running, "
|
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
||||||
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"ERROR: Start the local package registry with: "
|
|
||||||
"laconic-so --stack package-registry deploy-system up"
|
|
||||||
)
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -93,9 +76,7 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = (
|
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
||||||
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
"CERC_NPM_AUTH_TOKEN", default=None
|
||||||
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
|
||||||
)
|
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -35,17 +34,14 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars:
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(
|
def make_container_build_env(dev_root_path: str,
|
||||||
dev_root_path: str,
|
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
force_rebuild: bool,
|
force_rebuild: bool,
|
||||||
extra_build_args: str,
|
extra_build_args: str):
|
||||||
):
|
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -54,15 +50,11 @@ def make_container_build_env(
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update(
|
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -75,18 +67,12 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update(
|
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
||||||
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
container_parent_dir = Path(build_context.stack).joinpath("container-build")
|
||||||
"container-build"
|
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
||||||
)
|
|
||||||
temp_build_dir = container_parent_dir.joinpath(
|
|
||||||
build_context.container.replace("/", "-")
|
|
||||||
)
|
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -104,34 +90,21 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"No script file found: {build_script_filename}, using default build script")
|
||||||
f"No script file found: {build_script_filename}, "
|
repo_dir = build_context.container.split('/')[1]
|
||||||
"using default build script"
|
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
||||||
)
|
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
||||||
repo_dir = build_context.container.split("/")[1]
|
|
||||||
# TODO: make this less of a hack -- should be specified in
|
|
||||||
# some metadata somewhere. Check if we have a repo for this
|
|
||||||
# container. If not, set the context dir to container-build subdir
|
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = (
|
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
build_command = os.path.join(build_context.container_build_dir,
|
||||||
)
|
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
build_command = (
|
|
||||||
os.path.join(build_context.container_build_dir, "default-build.sh")
|
|
||||||
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
|
||||||
)
|
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
||||||
f"Executing: {build_command} with environment: "
|
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
||||||
f"{build_context.container_build_env}"
|
|
||||||
)
|
|
||||||
build_result = subprocess.run(
|
|
||||||
build_command, shell=True, env=build_context.container_build_env
|
|
||||||
)
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -144,61 +117,33 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these containers")
|
@click.option('--include', help="only build these containers")
|
||||||
@click.option("--exclude", help="don't build these containers")
|
@click.option('--exclude', help="don\'t build these containers")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option(
|
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
||||||
"--publish-images",
|
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Publish the built images in the specified image registry",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", help="Specify the image registry for --publish-images"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
||||||
ctx,
|
'''build the set of containers required for a complete stack'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_rebuild,
|
|
||||||
extra_build_args,
|
|
||||||
publish_images,
|
|
||||||
image_registry,
|
|
||||||
):
|
|
||||||
"""build the set of containers required for a complete stack"""
|
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -206,22 +151,21 @@ def command(
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(
|
container_build_env = make_container_build_env(dev_root_path,
|
||||||
dev_root_path,
|
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
opts.o.debug,
|
opts.o.debug,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
extra_build_args,
|
extra_build_args)
|
||||||
)
|
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path,
|
dev_root_path
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -230,16 +174,10 @@ def command(
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit(
|
error_exit("container build failed and --continue-on-error not set, exiting")
|
||||||
"container build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Container Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Container Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,18 +32,14 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these packages")
|
@click.option('--include', help="only build these packages")
|
||||||
@click.option("--exclude", help="don't build these packages")
|
@click.option('--exclude', help="don\'t build these packages")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False,
|
||||||
"--force-rebuild",
|
help="Override existing target package version check -- force rebuild")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override existing target package version check -- force rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
"""build the set of npm packages required for a complete stack"""
|
'''build the set of npm packages required for a complete stack'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -69,54 +65,45 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Build root directory doesn't exist, creating")
|
print('Build root directory doesn\'t exist, creating')
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "npm-package-list.txt"
|
|
||||||
) as package_list_file:
|
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config["npms"]
|
packages_in_scope = stack_config['npms']
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Packages: {packages_in_scope}")
|
print(f'Packages: {packages_in_scope}')
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating
|
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
||||||
# JS tooling file changes back into the cloned repo
|
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -129,63 +116,41 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = [
|
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
"cd /workspace && "
|
|
||||||
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
|
||||||
]
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
# so for now we use dict.update:
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
||||||
envs = {
|
|
||||||
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
|
||||||
# Convention used by our web app packages
|
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
|
||||||
}
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update(
|
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
docker.run(
|
docker.run(builder_js_image_name,
|
||||||
builder_js_image_name,
|
|
||||||
remove=True,
|
remove=True,
|
||||||
interactive=True,
|
interactive=True,
|
||||||
tty=True,
|
tty=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
envs=envs,
|
envs=envs,
|
||||||
# TODO: detect this host name in npm_registry_url
|
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
||||||
# rather than hard-wiring it
|
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
command=build_command,
|
command=build_command
|
||||||
)
|
)
|
||||||
# Note that although the docs say that build_result should
|
# Note that although the docs say that build_result should contain
|
||||||
# contain the command output as a string, in reality it is
|
# the command output as a string, in reality it is always the empty string.
|
||||||
# always the empty string. Since we detect errors via catching
|
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
||||||
# exceptions below, we can safely ignore it here.
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print(
|
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
||||||
"FATAL Error: build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -203,12 +168,6 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(
|
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
||||||
f"FATAL: builder image: {builder_js_image_name} is required "
|
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
||||||
"but was not found"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"Please run this command to create it: "
|
|
||||||
"laconic-so --stack build-support build-containers"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -24,5 +24,6 @@ class BuildContext:
|
|||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str, str]
|
container_build_env: Mapping[str,str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,23 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
|
all_containers = container_list_file.read().splitlines()
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config["containers"]
|
containers_in_scope = stack_config['containers']
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
containers_in_scope = all_containers
|
||||||
from stack_orchestrator import data
|
|
||||||
|
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "container-image-list.txt"
|
|
||||||
) as container_list_file:
|
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Containers: {containers_in_scope}")
|
print(f'Containers: {containers_in_scope}')
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -33,55 +32,40 @@ from stack_orchestrator.build.build_types import BuildContext
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--base-container")
|
@click.option('--base-container')
|
||||||
@click.option(
|
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||||
"--source-repo", help="directory containing the webapp to build", required=True
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
"""build the specified webapp container"""
|
'''build the specified webapp container'''
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
|
quiet = ctx.obj.quiet
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(
|
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Dev Root is: {dev_root_path}")
|
logger.log(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
force_rebuild, extra_build_args)
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -101,13 +85,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script,
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
# but with a different Dockerfile and work dir.
|
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||||
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
base_container.replace("/", "-"),
|
||||||
)
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,8 +52,7 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET \
|
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -61,9 +60,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(
|
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
||||||
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
|
||||||
)
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -71,10 +68,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(
|
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
||||||
f"failed to fetch tags from image registry, "
|
|
||||||
f"status code: {response.status_code}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -85,9 +79,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(
|
def _filter_for_platform(container: str,
|
||||||
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
registry_info: RegistryInfo,
|
||||||
) -> List[str]:
|
tag_list: List[str]) -> List[str] :
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -143,44 +137,21 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only fetch these containers")
|
@click.option('--include', help="only fetch these containers")
|
||||||
@click.option("--exclude", help="don't fetch these containers")
|
@click.option('--exclude', help="don\'t fetch these containers")
|
||||||
@click.option(
|
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
||||||
"--force-local-overwrite",
|
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
||||||
is_flag=True,
|
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
||||||
default=False,
|
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
||||||
help="Overwrite a locally built image, if present",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-username", required=True, help="Specify the image registry username"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-token", required=True, help="Specify the image registry access token"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
||||||
ctx,
|
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_local_overwrite,
|
|
||||||
image_registry,
|
|
||||||
registry_username,
|
|
||||||
registry_token,
|
|
||||||
):
|
|
||||||
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(
|
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
||||||
registry_info.registry,
|
|
||||||
registry_info.registry_username,
|
|
||||||
registry_info.registry_token,
|
|
||||||
)
|
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -201,24 +172,19 @@ def command(
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if _exists_locally(container):
|
if (_exists_locally(container)):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if force_local_overwrite:
|
if (force_local_overwrite):
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Warning: overwriting local tag from this image: {container} because "
|
||||||
f"Warning: overwriting local tag from this image: "
|
"--force-local-overwrite was specified")
|
||||||
f"{container} because --force-local-overwrite was specified"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Skipping local tagging for this image: {container} because that would "
|
||||||
f"Skipping local tagging for this image: {container} "
|
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
||||||
"because that would overwrite an existing :local tagged "
|
|
||||||
"image, use --force-local-overwrite to do so."
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -226,7 +192,4 @@ def command(
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print(
|
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
||||||
"Warning: couldn't find usable images for one or more containers, "
|
|
||||||
"this stack will not deploy"
|
|
||||||
)
|
|
||||||
|
|||||||
@ -34,13 +34,5 @@ volumes_key = "volumes"
|
|||||||
security_key = "security"
|
security_key = "security"
|
||||||
annotations_key = "annotations"
|
annotations_key = "annotations"
|
||||||
labels_key = "labels"
|
labels_key = "labels"
|
||||||
replicas_key = "replicas"
|
|
||||||
node_affinities_key = "node-affinities"
|
|
||||||
node_tolerations_key = "node-tolerations"
|
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
cri_base_filename = "cri-base.json"
|
|
||||||
unlimited_memlock_key = "unlimited-memlock"
|
|
||||||
runtime_class_key = "runtime-class"
|
|
||||||
high_memlock_runtime = "high-memlock"
|
|
||||||
high_memlock_spec_filename = "high-memlock-spec.json"
|
|
||||||
|
|||||||
@ -2,11 +2,10 @@ services:
|
|||||||
laconicd:
|
laconicd:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: cerc/laconicd:local
|
image: cerc/laconicd:local
|
||||||
command: ["bash", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||||
environment:
|
environment:
|
||||||
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED:-false}
|
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED}
|
||||||
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY:-false}
|
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY}
|
||||||
ONBOARDING_ENABLED: ${ONBOARDING_ENABLED:-false}
|
|
||||||
volumes:
|
volumes:
|
||||||
# The cosmos-sdk node's database directory:
|
# The cosmos-sdk node's database directory:
|
||||||
- laconicd-data:/root/.laconicd
|
- laconicd-data:/root/.laconicd
|
||||||
@ -20,9 +19,11 @@ services:
|
|||||||
- "26657"
|
- "26657"
|
||||||
- "26656"
|
- "26656"
|
||||||
- "9473"
|
- "9473"
|
||||||
|
- "8545"
|
||||||
|
- "8546"
|
||||||
- "9090"
|
- "9090"
|
||||||
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
cli:
|
cli:
|
||||||
image: cerc/laconic-registry-cli:local
|
image: cerc/laconic-registry-cli:local
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -6,20 +6,12 @@ services:
|
|||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
||||||
CERC_GRAFANA_ALERTS_SUBGRAPH_IDS: ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
||||||
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
||||||
- ../config/monitoring/update-grafana-alerts-config.sh:/update-grafana-alerts-config.sh
|
|
||||||
- grafana_storage:/var/lib/grafana
|
- grafana_storage:/var/lib/grafana
|
||||||
user: root
|
|
||||||
entrypoint: ["bash", "-c"]
|
|
||||||
command: |
|
|
||||||
"/update-grafana-alerts-config.sh && /run.sh"
|
|
||||||
ports:
|
ports:
|
||||||
- "3000"
|
- "3000"
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
|
|||||||
@ -16,13 +16,8 @@ services:
|
|||||||
postgres_pass: password
|
postgres_pass: password
|
||||||
postgres_db: graph-node
|
postgres_db: graph-node
|
||||||
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
||||||
# Env varaibles reference: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
|
||||||
GRAPH_LOG: debug
|
GRAPH_LOG: debug
|
||||||
ETHEREUM_REORG_THRESHOLD: 3
|
ETHEREUM_REORG_THRESHOLD: 3
|
||||||
GRAPH_ETHEREUM_JSON_RPC_TIMEOUT: ${GRAPH_ETHEREUM_JSON_RPC_TIMEOUT:-180}
|
|
||||||
GRAPH_ETHEREUM_REQUEST_RETRIES: ${GRAPH_ETHEREUM_REQUEST_RETRIES:-10}
|
|
||||||
GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE: ${GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE:-2000}
|
|
||||||
GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS: ${GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS:-1000}
|
|
||||||
entrypoint: ["bash", "-c"]
|
entrypoint: ["bash", "-c"]
|
||||||
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
||||||
command: |
|
command: |
|
||||||
@ -32,7 +27,6 @@ services:
|
|||||||
- "8001"
|
- "8001"
|
||||||
- "8020"
|
- "8020"
|
||||||
- "8030"
|
- "8030"
|
||||||
- "8040"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
services:
|
|
||||||
laconic-explorer:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: cerc/ping-pub:local
|
|
||||||
environment:
|
|
||||||
- LACONIC_LACONICD_API_URL=${LACONIC_LACONICD_API_URL:-http://localhost:1317}
|
|
||||||
- LACONIC_LACONICD_RPC_URL=${LACONIC_LACONICD_RPC_URL:-http://localhost:26657}
|
|
||||||
- LACONIC_LACONICD_CHAIN_ID=${LACONIC_LACONICD_CHAIN_ID:-chain-id-not-set}
|
|
||||||
ports:
|
|
||||||
- "5173"
|
|
||||||
@ -14,3 +14,4 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -17,3 +17,4 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -28,37 +28,15 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
ethereum-chain-head-exporter:
|
chain-head-exporter:
|
||||||
image: cerc/watcher-ts:local
|
image: cerc/watcher-ts:local
|
||||||
restart: always
|
restart: always
|
||||||
working_dir: /app/packages/cli
|
working_dir: /app/packages/cli
|
||||||
environment:
|
environment:
|
||||||
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT:-https://mainnet.infura.io/v3}
|
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
FIL_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT}
|
||||||
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
PORT: ${CERC_METRICS_PORT}
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
filecoin-chain-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT:-https://api.node.glif.io/rpc/v1}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
graph-node-upstream-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${GRAPH_NODE_RPC_ENDPOINT}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
||||||
ports:
|
ports:
|
||||||
- '5000'
|
- '5000'
|
||||||
|
|||||||
@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
snowballtools-base-backend:
|
||||||
|
image: cerc/snowballtools-base-backend:local
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- data:/data
|
||||||
|
- config:/config:ro
|
||||||
|
ports:
|
||||||
|
- 8000
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
config:
|
||||||
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-ajna:local
|
image: cerc/watcher-ajna:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,17 +55,16 @@ services:
|
|||||||
image: cerc/watcher-ajna:local
|
image: cerc/watcher-ajna:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
||||||
- ajna_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3008"
|
- "3008"
|
||||||
- "9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -75,4 +74,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
ajna_watcher_db_data:
|
ajna_watcher_db_data:
|
||||||
ajna_watcher_gql_logs_data:
|
|
||||||
|
|||||||
@ -32,8 +32,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CERC_HISTORICAL_BLOCK_RANGE: 500
|
CERC_HISTORICAL_BLOCK_RANGE: 500
|
||||||
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
||||||
CONTRACT_NAME: Azimuth
|
CONTRACT_NAME: Azimuth
|
||||||
@ -47,7 +47,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-vz", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -66,20 +66,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/azimuth-watcher
|
working_dir: /app/packages/azimuth-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
||||||
- azimuth_watcher_gql_logs_data:/app/packages/azimuth-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3001"
|
- "3001"
|
||||||
- "9001"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3001"]
|
test: ["CMD", "nc", "-vz", "localhost", "3001"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -96,8 +94,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
||||||
CONTRACT_NAME: Censures
|
CONTRACT_NAME: Censures
|
||||||
STARTING_BLOCK: 6784954
|
STARTING_BLOCK: 6784954
|
||||||
@ -110,7 +108,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9002"
|
- "9002"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9002"]
|
test: ["CMD", "nc", "-vz", "localhost", "9002"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -129,20 +127,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/censures-watcher
|
working_dir: /app/packages/censures-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
||||||
- censures_watcher_gql_logs_data:/app/packages/censures-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3002"
|
- "3002"
|
||||||
- "9003"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3002"]
|
test: ["CMD", "nc", "-vz", "localhost", "3002"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -159,8 +155,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
||||||
CONTRACT_NAME: Claims
|
CONTRACT_NAME: Claims
|
||||||
STARTING_BLOCK: 6784941
|
STARTING_BLOCK: 6784941
|
||||||
@ -173,7 +169,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9004"
|
- "9004"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9004"]
|
test: ["CMD", "nc", "-vz", "localhost", "9004"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -192,20 +188,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/claims-watcher
|
working_dir: /app/packages/claims-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
||||||
- claims_watcher_gql_logs_data:/app/packages/claims-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3003"
|
- "3003"
|
||||||
- "9005"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3003"]
|
test: ["CMD", "nc", "-vz", "localhost", "3003"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -222,8 +216,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
||||||
CONTRACT_NAME: ConditionalStarRelease
|
CONTRACT_NAME: ConditionalStarRelease
|
||||||
STARTING_BLOCK: 6828004
|
STARTING_BLOCK: 6828004
|
||||||
@ -236,7 +230,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9006"
|
- "9006"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9006"]
|
test: ["CMD", "nc", "-vz", "localhost", "9006"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -255,20 +249,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/conditional-star-release-watcher
|
working_dir: /app/packages/conditional-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
||||||
- conditional_star_release_watcher_gql_logs_data:/app/packages/conditional-star-release-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3004"
|
- "3004"
|
||||||
- "9007"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3004"]
|
test: ["CMD", "nc", "-vz", "localhost", "3004"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -285,8 +277,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
||||||
CONTRACT_NAME: DelegatedSending
|
CONTRACT_NAME: DelegatedSending
|
||||||
STARTING_BLOCK: 6784956
|
STARTING_BLOCK: 6784956
|
||||||
@ -299,7 +291,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9008"
|
- "9008"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9008"]
|
test: ["CMD", "nc", "-vz", "localhost", "9008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -318,20 +310,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/delegated-sending-watcher
|
working_dir: /app/packages/delegated-sending-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
||||||
- delegated_sending_watcher_gql_logs_data:/app/packages/delegated-sending-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3005"
|
- "3005"
|
||||||
- "9009"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3005"]
|
test: ["CMD", "nc", "-vz", "localhost", "3005"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -348,8 +338,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
||||||
CONTRACT_NAME: Ecliptic
|
CONTRACT_NAME: Ecliptic
|
||||||
STARTING_BLOCK: 13692129
|
STARTING_BLOCK: 13692129
|
||||||
@ -362,7 +352,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9010"
|
- "9010"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9010"]
|
test: ["CMD", "nc", "-vz", "localhost", "9010"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -381,20 +371,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/ecliptic-watcher
|
working_dir: /app/packages/ecliptic-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
||||||
- ecliptic_watcher_gql_logs_data:/app/packages/ecliptic-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3006"
|
- "3006"
|
||||||
- "9011"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3006"]
|
test: ["CMD", "nc", "-vz", "localhost", "3006"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -411,8 +399,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
||||||
CONTRACT_NAME: LinearStarRelease
|
CONTRACT_NAME: LinearStarRelease
|
||||||
STARTING_BLOCK: 6784943
|
STARTING_BLOCK: 6784943
|
||||||
@ -425,7 +413,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9012"
|
- "9012"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9012"]
|
test: ["CMD", "nc", "-vz", "localhost", "9012"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -444,20 +432,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/linear-star-release-watcher
|
working_dir: /app/packages/linear-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
||||||
- linear_star_release_watcher_gql_logs_data:/app/packages/linear-star-release-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3007"
|
- "3007"
|
||||||
- "9013"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3007"]
|
test: ["CMD", "nc", "-vz", "localhost", "3007"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -474,8 +460,8 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
||||||
CONTRACT_NAME: Polls
|
CONTRACT_NAME: Polls
|
||||||
STARTING_BLOCK: 6784912
|
STARTING_BLOCK: 6784912
|
||||||
@ -488,7 +474,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9014"
|
- "9014"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9014"]
|
test: ["CMD", "nc", "-vz", "localhost", "9014"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -507,20 +493,18 @@ services:
|
|||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/polls-watcher
|
working_dir: /app/packages/polls-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
||||||
- polls_watcher_gql_logs_data:/app/packages/polls-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3008"
|
- "3008"
|
||||||
- "9015"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-vz", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -558,7 +542,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:4000:4000"
|
- "0.0.0.0:4000:4000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "4000"]
|
test: ["CMD", "nc", "-vz", "localhost", "4000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -568,11 +552,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
watcher_db_data:
|
watcher_db_data:
|
||||||
azimuth_watcher_gql_logs_data:
|
|
||||||
censures_watcher_gql_logs_data:
|
|
||||||
claims_watcher_gql_logs_data:
|
|
||||||
conditional_star_release_watcher_gql_logs_data:
|
|
||||||
delegated_sending_watcher_gql_logs_data:
|
|
||||||
ecliptic_watcher_gql_logs_data:
|
|
||||||
linear_star_release_watcher_gql_logs_data:
|
|
||||||
polls_watcher_gql_logs_data:
|
|
||||||
|
|||||||
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9002:9000"
|
- "9002:9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,17 +55,16 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
- merkl_sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3007:3008"
|
- "127.0.0.1:3007:3008"
|
||||||
- "9003:9001"
|
- "9003:9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -75,4 +74,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
merkl_sushiswap_v3_watcher_db_data:
|
merkl_sushiswap_v3_watcher_db_data:
|
||||||
merkl_sushiswap_v3_watcher_gql_logs_data:
|
|
||||||
|
|||||||
@ -29,7 +29,7 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
@ -37,7 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000:9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,17 +55,16 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
- sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3008:3008"
|
- "127.0.0.1:3008:3008"
|
||||||
- "9001:9001"
|
- "9001:9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -75,4 +74,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
sushiswap_v3_watcher_db_data:
|
sushiswap_v3_watcher_db_data:
|
||||||
sushiswap_v3_watcher_gql_logs_data:
|
|
||||||
|
|||||||
@ -8,68 +8,68 @@ KEY="mykey"
|
|||||||
CHAINID="laconic_9000-1"
|
CHAINID="laconic_9000-1"
|
||||||
MONIKER="localtestnet"
|
MONIKER="localtestnet"
|
||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="eth_secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="info"
|
||||||
DENOM="alnt"
|
# trace evm
|
||||||
|
TRACE="--trace"
|
||||||
|
# TRACE=""
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||||
# validate dependencies are installed
|
# validate dependencies are installed
|
||||||
command -v jq > /dev/null 2>&1 || {
|
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
|
||||||
echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# remove existing daemon and client
|
# remove existing daemon and client
|
||||||
rm -rf $HOME/.laconicd/*
|
rm -rf $HOME/.laconicd/*
|
||||||
|
rm -rf $HOME/.laconic/*
|
||||||
|
|
||||||
if [ -n "`which make`" ]; then
|
if [ -n "`which make`" ]; then
|
||||||
make install
|
make install
|
||||||
fi
|
fi
|
||||||
|
|
||||||
laconicd config set client chain-id $CHAINID
|
laconicd config keyring-backend $KEYRING
|
||||||
laconicd config set client keyring-backend $KEYRING
|
laconicd config chain-id $CHAINID
|
||||||
|
|
||||||
# if $KEY exists it should be deleted
|
# if $KEY exists it should be deleted
|
||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
laconicd init $MONIKER --chain-id $CHAINID
|
||||||
|
|
||||||
update_genesis() {
|
# Change parameter token denominations to aphoton
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
}
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
# Custom modules
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
|
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
|
||||||
echo "Setting timers for expiry tests."
|
echo "Setting timers for expiry tests."
|
||||||
|
|
||||||
update_genesis '.app_state["registry"]["params"]["record_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
|
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
|
||||||
echo "Enabling auction and setting timers."
|
echo "Enabling auction and setting timers."
|
||||||
|
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_enabled"]=true'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="300s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$ONBOARDING_ENABLED" == "true" ]]; then
|
|
||||||
echo "Enabling validator onboarding."
|
|
||||||
|
|
||||||
update_genesis '.app_state["onboarding"]["params"]["onboarding_enabled"]=true'
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# increase block time (?)
|
# increase block time (?)
|
||||||
update_genesis '.consensus["params"]["block"]["time_iota_ms"]="1000"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
# Set gas limit in genesis
|
# Set gas limit in genesis
|
||||||
update_genesis '.consensus["params"]["block"]["max_gas"]="10000000"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
# disable produce empty block
|
# disable produce empty block
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
@ -78,6 +78,30 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $1 == "pending" ]]; then
|
||||||
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
else
|
||||||
|
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Enable telemetry (prometheus metrics: http://localhost:1317/metrics?format=prometheus)
|
# Enable telemetry (prometheus metrics: http://localhost:1317/metrics?format=prometheus)
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
sed -i '' 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
sed -i '' 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
||||||
@ -90,27 +114,23 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
# 10^30 alnt | 10^12 lnt
|
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
|
||||||
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
# 10^24 alnt | 10^6 lnt
|
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd collect-gentxs
|
||||||
|
|
||||||
# Run this to ensure everything worked and that the genesis file is setup correctly
|
# Run this to ensure everything worked and that the genesis file is setup correctly
|
||||||
laconicd genesis validate
|
laconicd validate-genesis
|
||||||
|
|
||||||
|
if [[ $1 == "pending" ]]; then
|
||||||
|
echo "pending mode is on, please wait for the first block committed."
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'"
|
echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
||||||
laconicd start \
|
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground
|
||||||
--pruning=nothing \
|
|
||||||
--log_level $LOGLEVEL \
|
|
||||||
--minimum-gas-prices=1$DENOM \
|
|
||||||
--api.enable \
|
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
|
||||||
--gql-server --gql-playground
|
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
services:
|
services:
|
||||||
registry:
|
cns:
|
||||||
rpcEndpoint: 'http://laconicd:26657'
|
restEndpoint: 'http://laconicd:1317'
|
||||||
gqlEndpoint: 'http://laconicd:9473/api'
|
gqlEndpoint: 'http://laconicd:9473/api'
|
||||||
userKey: REPLACE_WITH_MYKEY
|
userKey: REPLACE_WITH_MYKEY
|
||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 2000000alnt
|
fees: 200000aphoton
|
||||||
|
|||||||
@ -29,3 +29,4 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12,10 +12,7 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = (
|
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
||||||
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
|
||||||
f"-d {db_name} -c --inserts -f {file_name}"
|
|
||||||
)
|
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
services:
|
services:
|
||||||
registry:
|
cns:
|
||||||
rpcEndpoint: 'http://laconicd:26657'
|
restEndpoint: 'http://laconicd:1317'
|
||||||
gqlEndpoint: 'http://laconicd:9473/api'
|
gqlEndpoint: 'http://laconicd:9473/api'
|
||||||
userKey: REPLACE_WITH_MYKEY
|
userKey: REPLACE_WITH_MYKEY
|
||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 2000000alnt
|
fees: 200000aphoton
|
||||||
|
|||||||
@ -1,15 +1,18 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#TODO: pass these in from the caller
|
#TODO: pass these in from the caller
|
||||||
|
TRACE="--trace"
|
||||||
LOGLEVEL="info"
|
LOGLEVEL="info"
|
||||||
|
|
||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
|
--evm.tracer=json $TRACE \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1alnt \
|
--minimum-gas-prices=0.0001aphoton \
|
||||||
|
--json-rpc.api eth,txpool,personal,net,debug,web3,miner \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
|||||||
@ -0,0 +1,147 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
groups:
|
||||||
|
- orgId: 1
|
||||||
|
name: blackbox
|
||||||
|
folder: BlackboxAlerts
|
||||||
|
interval: 30s
|
||||||
|
rules:
|
||||||
|
# Azimuth Gateway endpoint
|
||||||
|
- uid: azimuth_gateway
|
||||||
|
title: azimuth_gateway_endpoint_tracking
|
||||||
|
condition: condition
|
||||||
|
data:
|
||||||
|
- refId: probe
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
editorMode: code
|
||||||
|
expr: probe_success{destination="azimuth_gateway"}
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: probe
|
||||||
|
- refId: http_status_code
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
editorMode: code
|
||||||
|
expr: probe_http_status_code{destination="azimuth_gateway"}
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: http_status_code
|
||||||
|
- refId: condition
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: __expr__
|
||||||
|
model:
|
||||||
|
conditions:
|
||||||
|
- evaluator:
|
||||||
|
params:
|
||||||
|
- 0
|
||||||
|
- 0
|
||||||
|
type: gt
|
||||||
|
operator:
|
||||||
|
type: and
|
||||||
|
query:
|
||||||
|
params: []
|
||||||
|
reducer:
|
||||||
|
params: []
|
||||||
|
type: avg
|
||||||
|
type: query
|
||||||
|
datasource:
|
||||||
|
name: Expression
|
||||||
|
type: __expr__
|
||||||
|
uid: __expr__
|
||||||
|
expression: ${probe} != 1 || ${http_status_code} != 200
|
||||||
|
intervalMs: 1000
|
||||||
|
maxDataPoints: 43200
|
||||||
|
refId: condition
|
||||||
|
type: math
|
||||||
|
noDataState: Alerting
|
||||||
|
execErrState: Alerting
|
||||||
|
for: 5m
|
||||||
|
annotations:
|
||||||
|
summary: Probe failed for Azimuth gateway endpoint
|
||||||
|
labels:
|
||||||
|
probe_success: '{{ index $values "probe" }}'
|
||||||
|
isPaused: false
|
||||||
|
# Laconicd GQL endpoint
|
||||||
|
- uid: laconicd_gql
|
||||||
|
title: laconicd_gql_endpoint_tracking
|
||||||
|
condition: condition
|
||||||
|
data:
|
||||||
|
- refId: probe
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
editorMode: code
|
||||||
|
expr: probe_success{destination="laconicd_gql"}
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: probe
|
||||||
|
- refId: http_status_code
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: PBFA97CFB590B2093
|
||||||
|
model:
|
||||||
|
editorMode: code
|
||||||
|
expr: probe_http_status_code{destination="laconicd_gql"}
|
||||||
|
instant: true
|
||||||
|
intervalMs: 1000
|
||||||
|
legendFormat: __auto
|
||||||
|
maxDataPoints: 43200
|
||||||
|
range: false
|
||||||
|
refId: http_status_code
|
||||||
|
- refId: condition
|
||||||
|
relativeTimeRange:
|
||||||
|
from: 600
|
||||||
|
to: 0
|
||||||
|
datasourceUid: __expr__
|
||||||
|
model:
|
||||||
|
conditions:
|
||||||
|
- evaluator:
|
||||||
|
params:
|
||||||
|
- 0
|
||||||
|
- 0
|
||||||
|
type: gt
|
||||||
|
operator:
|
||||||
|
type: and
|
||||||
|
query:
|
||||||
|
params: []
|
||||||
|
reducer:
|
||||||
|
params: []
|
||||||
|
type: avg
|
||||||
|
type: query
|
||||||
|
datasource:
|
||||||
|
name: Expression
|
||||||
|
type: __expr__
|
||||||
|
uid: __expr__
|
||||||
|
expression: ${probe} != 1 || ${http_status_code} != 200
|
||||||
|
intervalMs: 1000
|
||||||
|
maxDataPoints: 43200
|
||||||
|
refId: condition
|
||||||
|
type: math
|
||||||
|
noDataState: Alerting
|
||||||
|
execErrState: Alerting
|
||||||
|
for: 5m
|
||||||
|
annotations:
|
||||||
|
summary: Probe failed for Laconicd GQL endpoint
|
||||||
|
labels:
|
||||||
|
probe_success: '{{ index $values "probe" }}'
|
||||||
|
isPaused: false
|
||||||
@ -49,7 +49,7 @@
|
|||||||
},
|
},
|
||||||
"gridPos": {
|
"gridPos": {
|
||||||
"h": 3,
|
"h": 3,
|
||||||
"w": 3,
|
"w": 4,
|
||||||
"x": 0,
|
"x": 0,
|
||||||
"y": 0
|
"y": 0
|
||||||
},
|
},
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,20 +0,0 @@
|
|||||||
apiVersion: 1
|
|
||||||
|
|
||||||
datasources:
|
|
||||||
- name: Graph Node Postgres
|
|
||||||
type: postgres
|
|
||||||
jsonData:
|
|
||||||
database: graph-node
|
|
||||||
sslmode: 'disable'
|
|
||||||
maxOpenConns: 100
|
|
||||||
maxIdleConns: 100
|
|
||||||
maxIdleConnsAuto: true
|
|
||||||
connMaxLifetime: 14400
|
|
||||||
postgresVersion: 1411 # 903=9.3, 1000=10, 1411=14.11
|
|
||||||
timescaledb: false
|
|
||||||
user: graph-node
|
|
||||||
# # Add URL for graph-node database
|
|
||||||
# url: graph-node-db:5432
|
|
||||||
# # Set password for graph-node database
|
|
||||||
# secureJsonData:
|
|
||||||
# password: 'password'
|
|
||||||
@ -24,9 +24,10 @@ scrape_configs:
|
|||||||
params:
|
params:
|
||||||
module: [http_2xx]
|
module: [http_2xx]
|
||||||
static_configs:
|
static_configs:
|
||||||
# Add URLs to be monitored below
|
# Add URLs for targets to be monitored below
|
||||||
- targets:
|
# - targets: [https://github.com]
|
||||||
# - https://github.com
|
# labels:
|
||||||
|
# destination: 'github'
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__address__]
|
- source_labels: [__address__]
|
||||||
regex: (.*)(:80)?
|
regex: (.*)(:80)?
|
||||||
@ -45,18 +46,7 @@ scrape_configs:
|
|||||||
metrics_path: /metrics
|
metrics_path: /metrics
|
||||||
scheme: http
|
scheme: http
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['ethereum-chain-head-exporter:5000']
|
- targets: ['chain-head-exporter:5000']
|
||||||
labels:
|
|
||||||
instance: 'external'
|
|
||||||
chain: 'ethereum'
|
|
||||||
- targets: ['filecoin-chain-head-exporter:5000']
|
|
||||||
labels:
|
|
||||||
instance: 'external'
|
|
||||||
chain: 'filecoin'
|
|
||||||
- targets: ['graph-node-upstream-head-exporter:5000']
|
|
||||||
labels:
|
|
||||||
instance: 'graph-node'
|
|
||||||
chain: 'filecoin'
|
|
||||||
|
|
||||||
- job_name: 'postgres'
|
- job_name: 'postgres'
|
||||||
scrape_interval: 30s
|
scrape_interval: 30s
|
||||||
@ -85,11 +75,3 @@ scrape_configs:
|
|||||||
# - targets: ['example-host:1317']
|
# - targets: ['example-host:1317']
|
||||||
params:
|
params:
|
||||||
format: ['prometheus']
|
format: ['prometheus']
|
||||||
|
|
||||||
- job_name: graph-node
|
|
||||||
metrics_path: /metrics
|
|
||||||
scrape_interval: 30s
|
|
||||||
scheme: http
|
|
||||||
static_configs:
|
|
||||||
# Add graph-node targets to be monitored below
|
|
||||||
# - targets: ['graph-node:8040']
|
|
||||||
|
|||||||
@ -1,64 +0,0 @@
|
|||||||
apiVersion: 1
|
|
||||||
groups:
|
|
||||||
- orgId: 1
|
|
||||||
name: subgraph
|
|
||||||
folder: SubgraphAlerts
|
|
||||||
interval: 30s
|
|
||||||
rules:
|
|
||||||
- uid: b2a9144b-6104-46fc-92b5-352f4e643c4c
|
|
||||||
title: subgraph_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: ethereum_chain_head_number - on(network) group_right deployment_head{deployment=~"REPLACE_WITH_SUBGRAPH_IDS"}
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 15
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: diff
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: threshold
|
|
||||||
noDataState: OK
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 5m
|
|
||||||
annotations:
|
|
||||||
summary: Subgraph deployment {{ index $labels "deployment" }} is falling behind head by {{ index $values "diff" }}
|
|
||||||
labels: {}
|
|
||||||
isPaused: false
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
echo Using CERC_GRAFANA_ALERTS_SUBGRAPH_IDS ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
|
|
||||||
|
|
||||||
# Replace subgraph ids in subgraph alerting config
|
|
||||||
# Note: Requires the grafana container to be run with user root
|
|
||||||
if [ -n "$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS" ]; then
|
|
||||||
sed -i "s/REPLACE_WITH_SUBGRAPH_IDS/$CERC_GRAFANA_ALERTS_SUBGRAPH_IDS/g" /etc/grafana/provisioning/alerting/subgraph-alert-rules.yml
|
|
||||||
fi
|
|
||||||
@ -24,7 +24,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -59,29 +59,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -100,7 +100,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -135,29 +135,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -176,7 +176,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -211,29 +211,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -252,7 +252,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -287,29 +287,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -328,7 +328,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -363,29 +363,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -404,7 +404,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -439,29 +439,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -480,7 +480,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -515,29 +515,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -556,7 +556,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -591,29 +591,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -634,7 +634,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -669,29 +669,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -710,7 +710,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -745,29 +745,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
params:
|
||||||
|
- diff
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
@ -788,7 +788,7 @@ groups:
|
|||||||
uid: PBFA97CFB590B2093
|
uid: PBFA97CFB590B2093
|
||||||
disableTextWrap: false
|
disableTextWrap: false
|
||||||
editorMode: code
|
editorMode: code
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
|
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
|
||||||
fullMetaSearch: false
|
fullMetaSearch: false
|
||||||
includeNullMetadata: true
|
includeNullMetadata: true
|
||||||
instant: true
|
instant: true
|
||||||
@ -823,107 +823,29 @@ groups:
|
|||||||
conditions:
|
conditions:
|
||||||
- evaluator:
|
- evaluator:
|
||||||
params:
|
params:
|
||||||
- 0
|
- 15
|
||||||
- 0
|
- 0
|
||||||
type: gt
|
type: gt
|
||||||
operator:
|
operator:
|
||||||
type: and
|
type: when
|
||||||
query:
|
query:
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
|
|
||||||
# Secured Finance
|
|
||||||
- uid: secured_finance_diff_external
|
|
||||||
title: secured_finance_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{instance="external"} - on(chain) group_right sync_status_block_number{job="secured-finance", instance="secured-finance", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="filecoin"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
params:
|
||||||
- 0
|
- diff
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
reducer:
|
||||||
params: []
|
params: []
|
||||||
type: avg
|
type: last
|
||||||
type: query
|
type: query
|
||||||
datasource:
|
datasource:
|
||||||
name: Expression
|
name: Expression
|
||||||
type: __expr__
|
type: __expr__
|
||||||
uid: __expr__
|
uid: __expr__
|
||||||
expression: ${diff} >= 16
|
expression: ""
|
||||||
intervalMs: 1000
|
hide: false
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
refId: condition
|
||||||
type: math
|
type: classic_conditions
|
||||||
noDataState: Alerting
|
noDataState: Alerting
|
||||||
execErrState: Alerting
|
execErrState: Alerting
|
||||||
for: 15m
|
for: 5m
|
||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
|
gqlPath = "/"
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -21,22 +22,15 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
|
||||||
rpcSupportsBlockHashParam = false
|
|
||||||
|
|
||||||
# Server GQL config
|
|
||||||
[server.gql]
|
|
||||||
path = "/"
|
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# Use -1 for skipping check on block range.
|
# Use -1 for skipping check on block range.
|
||||||
maxEventsBlockRange = 1000
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# Log directory for GQL requests
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
logDir = "./gql-logs"
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# GQL cache settings
|
||||||
[server.gql.cache]
|
[server.gqlCache]
|
||||||
enabled = true
|
enabled = true
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
@ -64,7 +58,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -91,9 +85,6 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
# Number of blocks by which block processing lags behind head
|
|
||||||
blockProcessingOffset = 0
|
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -105,6 +96,3 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
|
||||||
maxNewBlockRetries = 3
|
|
||||||
|
|||||||
@ -4,19 +4,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
|
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
# Replace env variables in template TOML file
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
|
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
|
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
|
|||||||
@ -4,19 +4,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL_ENDPOINT}"
|
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
# Replace env variables in template TOML file
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}|g; \
|
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT|${CERC_IPLD_ETH_GQL_ENDPOINT}|g; \
|
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
[server]
|
[server]
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
[server.gql]
|
|
||||||
maxSimultaneousRequests = -1
|
maxSimultaneousRequests = -1
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
@ -14,8 +13,8 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL_ENDPOINT"
|
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
|
||||||
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
|
||||||
|
|
||||||
[jobQueue]
|
[jobQueue]
|
||||||
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
|
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
|
gqlPath = '/'
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -21,22 +22,15 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
|
||||||
rpcSupportsBlockHashParam = false
|
|
||||||
|
|
||||||
# Server GQL config
|
|
||||||
[server.gql]
|
|
||||||
path = "/"
|
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# Use -1 for skipping check on block range.
|
# Use -1 for skipping check on block range.
|
||||||
maxEventsBlockRange = 1000
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# Log directory for GQL requests
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
logDir = "./gql-logs"
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# GQL cache settings
|
||||||
[server.gql.cache]
|
[server.gqlCache]
|
||||||
enabled = true
|
enabled = true
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
@ -64,7 +58,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -75,7 +69,7 @@
|
|||||||
# Boolean flag to filter event logs by contracts
|
# Boolean flag to filter event logs by contracts
|
||||||
filterLogsByAddresses = true
|
filterLogsByAddresses = true
|
||||||
# Boolean flag to filter event logs by topics
|
# Boolean flag to filter event logs by topics
|
||||||
filterLogsByTopics = true
|
filterLogsByTopics = false
|
||||||
|
|
||||||
[upstream.cache]
|
[upstream.cache]
|
||||||
name = "requests"
|
name = "requests"
|
||||||
@ -91,9 +85,6 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
# Number of blocks by which block processing lags behind head
|
|
||||||
blockProcessingOffset = 0
|
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -105,6 +96,3 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
|
||||||
maxNewBlockRetries = 3
|
|
||||||
|
|||||||
@ -940,3 +940,4 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -6,16 +6,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
echo "Using ETH RPC endpoints ${CERC_ETH_RPC_ENDPOINTS}"
|
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||||
|
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
|
|
||||||
# Convert the comma-separated list in CERC_ETH_RPC_ENDPOINTS to a JSON array
|
|
||||||
RPC_ENDPOINTS_ARRAY=$(echo "$CERC_ETH_RPC_ENDPOINTS" | tr ',' '\n' | awk '{print "\"" $0 "\""}' | paste -sd, - | sed 's/^/[/; s/$/]/')
|
|
||||||
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS|${RPC_ENDPOINTS_ARRAY}| ")
|
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
|
gqlPath = "/"
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -21,22 +22,15 @@
|
|||||||
# Interval in number of blocks at which to clear entities cache.
|
# Interval in number of blocks at which to clear entities cache.
|
||||||
clearEntitiesCacheInterval = 1000
|
clearEntitiesCacheInterval = 1000
|
||||||
|
|
||||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
|
||||||
rpcSupportsBlockHashParam = false
|
|
||||||
|
|
||||||
# Server GQL config
|
|
||||||
[server.gql]
|
|
||||||
path = "/"
|
|
||||||
|
|
||||||
# Max block range for which to return events in eventsInRange GQL query.
|
# Max block range for which to return events in eventsInRange GQL query.
|
||||||
# Use -1 for skipping check on block range.
|
# Use -1 for skipping check on block range.
|
||||||
maxEventsBlockRange = 1000
|
maxEventsBlockRange = 1000
|
||||||
|
|
||||||
# Log directory for GQL requests
|
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||||
logDir = "./gql-logs"
|
rpcSupportsBlockHashParam = false
|
||||||
|
|
||||||
# GQL cache settings
|
# GQL cache settings
|
||||||
[server.gql.cache]
|
[server.gqlCache]
|
||||||
enabled = true
|
enabled = true
|
||||||
|
|
||||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||||
@ -64,7 +58,7 @@
|
|||||||
|
|
||||||
[upstream]
|
[upstream]
|
||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
rpcProviderEndpoints = REPLACE_WITH_CERC_ETH_RPC_ENDPOINTS
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
||||||
|
|
||||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||||
rpcClient = true
|
rpcClient = true
|
||||||
@ -91,9 +85,6 @@
|
|||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
|
||||||
# Number of blocks by which block processing lags behind head
|
|
||||||
blockProcessingOffset = 0
|
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||||
@ -105,6 +96,3 @@
|
|||||||
# Max block range of historical processing after which it waits for completion of events processing
|
# Max block range of historical processing after which it waits for completion of events processing
|
||||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||||
historicalMaxFetchAhead = 10000
|
historicalMaxFetchAhead = 10000
|
||||||
|
|
||||||
# Max number of retries to fetch new block after which watcher will failover to other RPC endpoints
|
|
||||||
maxNewBlockRetries = 3
|
|
||||||
|
|||||||
@ -18,3 +18,4 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -68,5 +68,5 @@ ENV PATH="${PATH}:/scripts"
|
|||||||
COPY entrypoint.sh .
|
COPY entrypoint.sh .
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
# Placeholder CMD : generally this will be overridden at run time like :
|
# Placeholder CMD : generally this will be overridden at run time like :
|
||||||
# docker run -it -v /home/builder/cerc/registry-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
|
# docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
|
||||||
CMD node --version
|
CMD node --version
|
||||||
|
|||||||
@ -11,8 +11,6 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data["el_premine"].items():
|
for key, value in data['el_premine'].items():
|
||||||
acct = w3.eth.account.from_mnemonic(
|
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
||||||
data["mnemonic"], account_path=key, passphrase=""
|
|
||||||
)
|
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Build cerc/laconic-console-host
|
# Build cerc/laconic-registry-cli
|
||||||
|
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
|
|||||||
@ -5,16 +5,16 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
registry_command="laconic registry"
|
registry_command="laconic cns"
|
||||||
demo_records_dir="scripts/demo-records"
|
demo_records_dir="scripts/demo-records"
|
||||||
|
|
||||||
# Check we have funds
|
# Check we have funds
|
||||||
funds_response=$(${registry_command} account get --address $(cat my-address.txt))
|
funds_response=$(${registry_command} account get --address $(cat my-address.txt))
|
||||||
funds_balance=$(echo ${funds_response} | jq -r ".[0].balance[0].quantity")
|
funds_balance=$(echo ${funds_response} | jq -r .[0].balance[0].quantity)
|
||||||
echo "Balance is: ${funds_balance}"
|
echo "Balance is: ${funds_balance}"
|
||||||
|
|
||||||
# Create a bond
|
# Create a bond
|
||||||
bond_create_result=$(${registry_command} bond create --type alnt --quantity 1000000000)
|
bond_create_result=$(${registry_command} bond create --type aphoton --quantity 1000000000)
|
||||||
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
||||||
echo "Created bond with id: ${bond_id}"
|
echo "Created bond with id: ${bond_id}"
|
||||||
|
|
||||||
|
|||||||
@ -7,9 +7,9 @@ record:
|
|||||||
env:
|
env:
|
||||||
ENV_VAR_A: A
|
ENV_VAR_A: A
|
||||||
ENV_VAR_B: B
|
ENV_VAR_B: B
|
||||||
lrn:
|
crn:
|
||||||
- lrn://foo.bar
|
- crn://foo.bar
|
||||||
- lrn://bar.baz
|
- crn://bar.baz
|
||||||
meta:
|
meta:
|
||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
|
|||||||
@ -28,8 +28,6 @@ RUN \
|
|||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
|
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
|
||||||
# Install pnpm
|
# Install pnpm
|
||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g pnpm" \
|
&& su ${USERNAME} -c "umask 0002 && npm install -g pnpm" \
|
||||||
# Install bun
|
|
||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g bun@1.1.x" \
|
|
||||||
&& npm cache clean --force > /dev/null 2>&1
|
&& npm cache clean --force > /dev/null 2>&1
|
||||||
|
|
||||||
# [Optional] Uncomment this section to install additional OS packages.
|
# [Optional] Uncomment this section to install additional OS packages.
|
||||||
|
|||||||
@ -14,8 +14,6 @@ if [ -z "$CERC_BUILD_TOOL" ]; then
|
|||||||
CERC_BUILD_TOOL=pnpm
|
CERC_BUILD_TOOL=pnpm
|
||||||
elif [ -f "yarn.lock" ]; then
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
elif [ -f "bun.lockb" ]; then
|
|
||||||
CERC_BUILD_TOOL=bun
|
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -5,19 +5,14 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_MIN_NEXTVER=13.4.2
|
CERC_MIN_NEXTVER=13.4.2
|
||||||
CERC_DEFAULT_WEBPACK_VER="5.93.0"
|
|
||||||
|
|
||||||
CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}"
|
CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}"
|
||||||
CERC_WEBPACK_VERSION="${CERC_WEBPACK_VERSION:-keep}"
|
|
||||||
|
|
||||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||||
if [ -f "pnpm-lock.yaml" ]; then
|
if [ -f "pnpm-lock.yaml" ]; then
|
||||||
CERC_BUILD_TOOL=pnpm
|
CERC_BUILD_TOOL=pnpm
|
||||||
elif [ -f "yarn.lock" ]; then
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
elif [ -f "bun.lockb" ]; then
|
|
||||||
CERC_BUILD_TOOL=bun
|
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
fi
|
fi
|
||||||
@ -26,29 +21,15 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
|
||||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
./build-webapp.sh || exit 1
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
|
||||||
IMPORT_OR_REQUIRE="import"
|
|
||||||
else
|
|
||||||
NEXT_CONFIG_JS="next.config.js"
|
|
||||||
IMPORT_OR_REQUIRE="require"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If this file doesn't exist at all, we'll get errors below.
|
# If this file doesn't exist at all, we'll get errors below.
|
||||||
if [ ! -f "${NEXT_CONFIG_JS}" ]; then
|
if [ ! -f "next.config.js" ]; then
|
||||||
touch ${NEXT_CONFIG_JS}
|
touch next.config.js
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "next.config.dist" ]; then
|
if [ ! -f "next.config.dist" ]; then
|
||||||
cp $NEXT_CONFIG_JS next.config.dist
|
cp next.config.js next.config.dist
|
||||||
fi
|
fi
|
||||||
|
|
||||||
which js-beautify >/dev/null
|
which js-beautify >/dev/null
|
||||||
@ -56,34 +37,17 @@ if [ $? -ne 0 ]; then
|
|||||||
npm i -g js-beautify
|
npm i -g js-beautify
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# js-beautify formats NEXTJS_CONFIG_FILE (ie next.config.js / next.config.mjs) so we can reliably transformable later
|
js-beautify next.config.dist > next.config.js
|
||||||
js-beautify next.config.dist > ${NEXT_CONFIG_JS}
|
echo "" >> next.config.js
|
||||||
echo "" >> ${NEXT_CONFIG_JS}
|
|
||||||
|
|
||||||
if [ "${IMPORT_OR_REQUIRE}" == "require" ]; then
|
WEBPACK_REQ_LINE=$(grep -n "require([\'\"]webpack[\'\"])" next.config.js | cut -d':' -f1)
|
||||||
WEBPACK_REQ_LINE=$(grep -n "require([\'\"]webpack[\'\"])" ${NEXT_CONFIG_JS} | cut -d':' -f1)
|
if [ -z "$WEBPACK_REQ_LINE" ]; then
|
||||||
if [ -z "$WEBPACK_REQ_LINE" ]; then
|
cat > next.config.js.0 <<EOF
|
||||||
cat > ${NEXT_CONFIG_JS}.0 <<EOF
|
|
||||||
const webpack = require('webpack');
|
const webpack = require('webpack');
|
||||||
EOF
|
EOF
|
||||||
fi
|
|
||||||
else
|
|
||||||
WEBPACK_IMPORT_LINE=$(grep -n "^import .*[\'\"]webpack[\'\"];?$" ${NEXT_CONFIG_JS} | cut -d':' -f1)
|
|
||||||
if [ -z "$WEBPACK_IMPORT_LINE" ]; then
|
|
||||||
cat > ${NEXT_CONFIG_JS}.0 <<EOF
|
|
||||||
import webpack from 'webpack';
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
CREATE_REQUIRE_LINE=$(grep -n "require = createRequire" ${NEXT_CONFIG_JS} | cut -d':' -f1)
|
|
||||||
if [ -z "$CREATE_REQUIRE_LINE" ]; then
|
|
||||||
cat >> ${NEXT_CONFIG_JS}.0 <<EOF
|
|
||||||
import { createRequire } from "module";
|
|
||||||
const require = createRequire(import.meta.url);
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat > ${NEXT_CONFIG_JS}.1 <<EOF
|
cat > next.config.js.1 <<EOF
|
||||||
let envMap;
|
let envMap;
|
||||||
try {
|
try {
|
||||||
// .env-list.json provides us a list of identifiers which should be replaced at runtime.
|
// .env-list.json provides us a list of identifiers which should be replaced at runtime.
|
||||||
@ -91,8 +55,7 @@ try {
|
|||||||
a[v] = \`"CERC_RUNTIME_ENV_\${v.split(/\./).pop()}"\`;
|
a[v] = \`"CERC_RUNTIME_ENV_\${v.split(/\./).pop()}"\`;
|
||||||
return a;
|
return a;
|
||||||
}, {});
|
}, {});
|
||||||
} catch (e) {
|
} catch {
|
||||||
console.error(e);
|
|
||||||
// If .env-list.json cannot be loaded, we are probably running in dev mode, so use process.env instead.
|
// If .env-list.json cannot be loaded, we are probably running in dev mode, so use process.env instead.
|
||||||
envMap = Object.keys(process.env).reduce((a, v) => {
|
envMap = Object.keys(process.env).reduce((a, v) => {
|
||||||
if (v.startsWith('CERC_')) {
|
if (v.startsWith('CERC_')) {
|
||||||
@ -101,80 +64,40 @@ try {
|
|||||||
return a;
|
return a;
|
||||||
}, {});
|
}, {});
|
||||||
}
|
}
|
||||||
console.log(envMap);
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
grep 'withPWA' ${NEXT_CONFIG_JS} >/dev/null && HAS_WITHPWA=true || HAS_WITHPWA=false
|
CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }')
|
||||||
|
ENV_LINE=$(grep -n 'env:' next.config.js | cut -d':' -f1)
|
||||||
|
WEBPACK_CONF_LINE=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d':' -f1)
|
||||||
|
NEXT_SECTION_ADJUSTMENT=0
|
||||||
|
|
||||||
if [ "$HAS_WITHPWA" == "true" ]; then
|
if [ -n "$WEBPACK_CONF_LINE" ]; then
|
||||||
if [ "$IMPORT_OR_REQUIRE" == "import" ]; then
|
WEBPACK_CONF_VAR=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d',' -f1 | cut -d'(' -f2)
|
||||||
cat > ${NEXT_CONFIG_JS}.2 <<EOF
|
head -$(( ${WEBPACK_CONF_LINE} )) next.config.js > next.config.js.2
|
||||||
const __xPWA__ = (p) => {
|
cat > next.config.js.3 <<EOF
|
||||||
const realPWA = withPWA(p);
|
$WEBPACK_CONF_VAR.plugins.push(new webpack.DefinePlugin(envMap));
|
||||||
return (nextConfig) => {
|
EOF
|
||||||
const modConfig = {...nextConfig};
|
NEXT_SECTION_LINE=$((WEBPACK_CONF_LINE))
|
||||||
|
elif [ -n "$ENV_LINE" ]; then
|
||||||
modConfig.webpack = (config) => {
|
head -$(( ${ENV_LINE} - 1 )) next.config.js > next.config.js.2
|
||||||
|
cat > next.config.js.3 <<EOF
|
||||||
|
webpack: (config) => {
|
||||||
config.plugins.push(new webpack.DefinePlugin(envMap));
|
config.plugins.push(new webpack.DefinePlugin(envMap));
|
||||||
return nextConfig.webpack ? nextConfig.webpack(config) : config;
|
return config;
|
||||||
};
|
},
|
||||||
|
|
||||||
return realPWA(modConfig);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
EOF
|
EOF
|
||||||
else
|
NEXT_SECTION_ADJUSTMENT=1
|
||||||
cat > ${NEXT_CONFIG_JS}.3 <<EOF
|
NEXT_SECTION_LINE=$ENV_LINE
|
||||||
const __xPWA__ = (nextConfig) => {
|
|
||||||
const modConfig = {...nextConfig};
|
|
||||||
|
|
||||||
modConfig.webpack = (config) => {
|
|
||||||
config.plugins.push(new webpack.DefinePlugin(envMap));
|
|
||||||
return nextConfig.webpack ? nextConfig.webpack(config) : config;
|
|
||||||
};
|
|
||||||
|
|
||||||
return withPWA(modConfig);
|
|
||||||
};
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat ${NEXT_CONFIG_JS} | js-beautify | sed 's/withPWA(/__xPWA__(/g' > ${NEXT_CONFIG_JS}.4
|
|
||||||
else
|
else
|
||||||
cat > ${NEXT_CONFIG_JS}.3 <<EOF
|
echo "WARNING: Cannot find location to insert environment variable map in next.config.js" 1>&2
|
||||||
const __xCfg__ = (nextConfig) => {
|
rm -f next.config.js.*
|
||||||
const modConfig = {...nextConfig};
|
NEXT_SECTION_LINE=0
|
||||||
|
|
||||||
modConfig.webpack = (config) => {
|
|
||||||
config.plugins.push(new webpack.DefinePlugin(envMap));
|
|
||||||
return nextConfig.webpack ? nextConfig.webpack(config) : config;
|
|
||||||
};
|
|
||||||
|
|
||||||
return modConfig;
|
|
||||||
};
|
|
||||||
EOF
|
|
||||||
if [ "$IMPORT_OR_REQUIRE" == "import" ]; then
|
|
||||||
cat ${NEXT_CONFIG_JS} | js-beautify | sed 's/export\s\+default\s\+/const __orig_cfg__ = /g' > ${NEXT_CONFIG_JS}.4
|
|
||||||
echo "export default __xCfg__(__orig_cfg__);" > ${NEXT_CONFIG_JS}.5
|
|
||||||
else
|
|
||||||
cat ${NEXT_CONFIG_JS} | js-beautify | sed 's/module.exports\s\+=\s\+/const __orig_cfg__ = /g' > ${NEXT_CONFIG_JS}.4
|
|
||||||
echo "module.exports = __xCfg__(__orig_cfg__);" > ${NEXT_CONFIG_JS}.5
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
tail -$(( ${CONFIG_LINES} - ${NEXT_SECTION_LINE} + ${NEXT_SECTION_ADJUSTMENT} )) next.config.js > next.config.js.5
|
||||||
|
|
||||||
rm -f ${NEXT_CONFIG_JS}
|
cat next.config.js.* | sed 's/^ *//g' | js-beautify | grep -v 'process\.\env\.' | js-beautify > next.config.js
|
||||||
for ((i=0; i <= 10; i++)); do
|
rm next.config.js.*
|
||||||
if [ -s "${NEXT_CONFIG_JS}.${i}" ]; then
|
|
||||||
if [ $i -le 2 ] ; then
|
|
||||||
cat ${NEXT_CONFIG_JS}.${i} >> ${NEXT_CONFIG_JS}
|
|
||||||
else
|
|
||||||
cat ${NEXT_CONFIG_JS}.${i} | sed 's/^ *//g' | js-beautify | grep -v 'process\.\env\.' | js-beautify >> ${NEXT_CONFIG_JS}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rm ${NEXT_CONFIG_JS}.*
|
|
||||||
cat ${NEXT_CONFIG_JS} | js-beautify > ${NEXT_CONFIG_JS}.pretty
|
|
||||||
mv ${NEXT_CONFIG_JS}.pretty ${NEXT_CONFIG_JS}
|
|
||||||
|
|
||||||
"${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json
|
"${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json
|
||||||
|
|
||||||
@ -182,6 +105,8 @@ if [ ! -f "package.dist" ]; then
|
|||||||
cp package.json package.dist
|
cp package.json package.dist
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq '.scripts.cerc_generate = "next experimental-generate"' > package.json
|
||||||
|
|
||||||
CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`"
|
CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`"
|
||||||
|
|
||||||
if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then
|
if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then
|
||||||
@ -190,38 +115,10 @@ if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VE
|
|||||||
mv package.json.$$ package.json
|
mv package.json.$$ package.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CUR_WEBPACK_VERSION="`jq -r '.dependencies.webpack' package.json`"
|
|
||||||
if [ -z "$CUR_WEBPACK_VERSION" ]; then
|
|
||||||
CUR_WEBPACK_VERSION="`jq -r '.devDependencies.webpack' package.json`"
|
|
||||||
fi
|
|
||||||
if [ "${CERC_WEBPACK_VERSION}" != "keep" ] || [ "${CUR_WEBPACK_VERSION}" == "null" ]; then
|
|
||||||
if [ -z "$CERC_WEBPACK_VERSION" ] || [ "$CERC_WEBPACK_VERSION" == "keep" ]; then
|
|
||||||
CERC_WEBPACK_VERSION="${CERC_DEFAULT_WEBPACK_VER}"
|
|
||||||
fi
|
|
||||||
echo "Webpack is required for env variable substitution. Adding to webpack@$CERC_WEBPACK_VERSION to dependencies..." 1>&2
|
|
||||||
cat package.json | jq ".dependencies.webpack = \"$CERC_WEBPACK_VERSION\"" > package.json.$$
|
|
||||||
mv package.json.$$ package.json
|
|
||||||
fi
|
|
||||||
|
|
||||||
time $CERC_BUILD_TOOL install || exit 1
|
time $CERC_BUILD_TOOL install || exit 1
|
||||||
|
|
||||||
CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json`
|
CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json`
|
||||||
|
|
||||||
# See https://github.com/vercel/next.js/discussions/46544
|
|
||||||
semver -p -r ">=14.2.0" "$CUR_NEXT_VERSION"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
# For >= 14.2.0
|
|
||||||
CERC_NEXT_COMPILE_COMMAND="next build --experimental-build-mode compile"
|
|
||||||
CERC_NEXT_GENERATE_COMMAND="next build --experimental-build-mode generate"
|
|
||||||
else
|
|
||||||
# For 13.4.2 to 14.1.x
|
|
||||||
CERC_NEXT_COMPILE_COMMAND="next experimental-compile"
|
|
||||||
CERC_NEXT_GENERATE_COMMAND="next experimental-generate"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat package.json | jq ".scripts.cerc_compile = \"$CERC_NEXT_COMPILE_COMMAND\"" | jq ".scripts.cerc_generate = \"$CERC_NEXT_GENERATE_COMMAND\"" > package.json.$$
|
|
||||||
mv package.json.$$ package.json
|
|
||||||
|
|
||||||
semver -p -r ">=$CERC_MIN_NEXTVER" $CUR_NEXT_VERSION
|
semver -p -r ">=$CERC_MIN_NEXTVER" $CUR_NEXT_VERSION
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|||||||
@ -20,11 +20,9 @@ for d in $(find . -maxdepth 1 -type d | grep -v '\./\.' | grep '/' | cut -d'/' -
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
NEXT_CONF="next.config.mjs next.config.js next.config.dist"
|
NEXT_CONF="next.config.js next.config.dist"
|
||||||
for f in $NEXT_CONF; do
|
for f in $NEXT_CONF; do
|
||||||
if [ -f "$f" ]; then
|
|
||||||
cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF
|
cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
cat $TMPF | sort -u | jq --raw-input . | jq --slurp .
|
cat $TMPF | sort -u | jq --raw-input . | jq --slurp .
|
||||||
|
|||||||
@ -5,7 +5,7 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
CERC_MAX_GENERATE_TIME=${CERC_MAX_GENERATE_TIME:-120}
|
CERC_MAX_GENERATE_TIME=${CERC_MAX_GENERATE_TIME:-60}
|
||||||
tpid=""
|
tpid=""
|
||||||
|
|
||||||
ctrl_c() {
|
ctrl_c() {
|
||||||
@ -20,8 +20,6 @@ if [ -z "$CERC_BUILD_TOOL" ]; then
|
|||||||
CERC_BUILD_TOOL=pnpm
|
CERC_BUILD_TOOL=pnpm
|
||||||
elif [ -f "yarn.lock" ]; then
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
elif [ -f "bun.lockb" ]; then
|
|
||||||
CERC_BUILD_TOOL=bun
|
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
fi
|
fi
|
||||||
@ -30,18 +28,11 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
if [ -f "./run-webapp.sh" ]; then
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
echo "Running webapp with run-webapp.sh ..."
|
mv .next .next.old
|
||||||
cd "${WORK_DIR}" || exit 1
|
mv .next-r/.next .
|
||||||
./run-webapp.sh &
|
|
||||||
tpid=$!
|
|
||||||
wait $tpid
|
|
||||||
else
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
|
||||||
mv .next .next.old
|
|
||||||
mv .next-r/.next .
|
|
||||||
|
|
||||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
npm run cerc_generate > gen.out 2>&1 &
|
npm run cerc_generate > gen.out 2>&1 &
|
||||||
@ -67,7 +58,6 @@ else
|
|||||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||||
tpid=""
|
tpid=""
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
FROM cerc/ping-pub-base:local
|
|
||||||
|
|
||||||
COPY ./scripts/update-explorer-config.sh /scripts
|
|
||||||
COPY ./scripts/start-serving-explorer.sh /scripts
|
|
||||||
COPY ./config/laconic-chaindata-template.json /config/chains/laconic-chaindata-template.json
|
|
||||||
|
|
||||||
EXPOSE 5173
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
CMD ["/scripts/start-serving-explorer.sh"]
|
|
||||||
@ -1,7 +0,0 @@
|
|||||||
FROM cerc/webapp-base:local
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN yarn
|
|
||||||
@ -1,12 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Build the ping pub image
|
# Build the ping pub image
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|
||||||
|
|
||||||
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
docker build -t cerc/ping-pub:local ${build_command_args} -f $CERC_REPO_BASE_DIR/explorer/Dockerfile $CERC_REPO_BASE_DIR/explorer
|
||||||
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/cosmos-explorer
|
|
||||||
if [[ $? -ne 0 ]]; then
|
|
||||||
echo "FATAL: Base container build failed, exiting"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user