Compare commits

..

5 Commits

Author SHA1 Message Date
dc3103ca84 Change delimeter while replacing deny multiaddrs list
Former-commit-id: db27b07d99
2023-05-05 13:28:57 +05:30
c7cae95114 Use provided domain for relay multiaddr in peer config
Former-commit-id: 164a019d69
2023-05-05 10:27:26 +05:30
cc9a0a5c36 Update package versions
Former-commit-id: ff51e47dfb
2023-05-04 20:01:38 +05:30
74db89c285 Add watcher config option for blacklisted multiaddrs
Former-commit-id: 379332d5ec
2023-05-04 20:00:58 +05:30
46de299032 Add env variable for web apps config denyMultiaddrs
Former-commit-id: 859a0f684f
2023-05-04 18:42:45 +05:30
774 changed files with 4213 additions and 103947 deletions

View File

@ -1,21 +1,18 @@
name: Fixturenet-Eth Test name: Fixturenet-Eth-Test
on: on:
push: push:
branches: '*' branches: 'ci-test'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-eth-test'
jobs: jobs:
test: test:
name: "Run fixturenet-eth test suite" name: "Run an Ethereum fixturenet test"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: "Clone project repository" - name: "Clone project repository"
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: "Install Python" - name: "Install Python"
uses: actions/setup-python@v4 uses: cerc-io/setup-python@v4
with: with:
python-version: '3.8' python-version: '3.8'
- name: "Print Python version" - name: "Print Python version"

View File

@ -1,66 +0,0 @@
name: Fixturenet-Laconicd-Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
schedule:
- cron: '1 13 * * *'
jobs:
test:
name: "Run Laconicd fixturenet and Laconic CLI tests"
runs-on: ubuntu-latest
steps:
- name: 'Update'
run: apt-get update
- name: 'Setup jq'
run: apt-get install jq -y
- name: 'Check jq'
run: |
which jq
jq --version
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh
- name: "Run laconic CLI tests"
run: ./tests/fixturenet-laconicd/run-cli-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,37 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -5,8 +5,6 @@ on:
branches: branches:
- main - main
- publish-test - publish-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs: jobs:
publish: publish:
@ -20,22 +18,14 @@ jobs:
run: | run: |
build_tag=$(./scripts/create_build_tag_file.sh) build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
# At present the stock setup-python action fails on Linux/aarch64 - name: "Install Python"
# Conditional steps below workaroud this by using deadsnakes for that case only uses: cerc-io/setup-python@v4
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with: with:
python-version: '3.8' python-version: '3.8'
- name: "Print Python version" - name: "Print Python version"
run: python3 --version run: python3 --version
- name: "Install shiv" - name: "Install shiv"
run: pip install shiv==1.0.6 run: pip install shiv
- name: "Build local shiv package" - name: "Build local shiv package"
id: build id: build
run: | run: |
@ -47,26 +37,10 @@ jobs:
run: | run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release" - name: "Create release"
uses: https://gitea.com/cerc-io/action-gh-release@gitea-v2 uses: cerc-io/action-gh-release@gitea-v1
with: with:
tag_name: ${{ steps.build-info.outputs.build-tag }} tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft # On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test" # Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }} draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so files: ./laconic-so
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,69 +0,0 @@
name: Container Registry Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-container-registry'
- '.gitea/workflows/test-container-registry.yml'
- 'tests/container-registry/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '6 19 * * *'
jobs:
test:
name: "Run contaier registry hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Install ed" # Only needed until we remove the need to edit the spec file
run: apt update && apt install -y ed
- name: "Run container registry deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/container-registry/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,67 +0,0 @@
name: Database Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-database'
- '.gitea/workflows/test-database.yml'
- 'tests/database/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '5 18 * * *'
jobs:
test:
name: "Run database hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run database deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/database/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,59 +0,0 @@
name: Deploy Test
on:
pull_request:
branches:
- main
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,58 +0,0 @@
name: External Stack Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-external-stack'
- '.gitea/workflows/test-external-stack.yml'
- 'tests/external-stack/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '8 19 * * *'
jobs:
test:
name: "Run external stack test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run external stack tests"
run: ./tests/external-stack/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,70 +0,0 @@
name: K8s Deploy Test
on:
pull_request:
branches:
- main
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deploy'
- '.gitea/workflows/test-k8s-deploy.yml'
- 'tests/k8s-deploy/run-deploy-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 15 * * *'
jobs:
test:
name: "Run deploy test suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,70 +0,0 @@
name: K8s Deployment Control Test
on:
pull_request:
branches:
- main
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deployment-control'
- '.gitea/workflows/test-k8s-deployment-control.yml'
- 'tests/k8s-deployment-control/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 30 * * *'
jobs:
test:
name: "Run deployment control suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment control test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deployment-control/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,60 +0,0 @@
name: Webapp Test
on:
pull_request:
branches:
- main
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
run: apt update && apt install -y wget
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,4 +1,4 @@
name: Smoke Test name: Integration Test
on: on:
pull_request: pull_request:
@ -7,9 +7,10 @@ on:
branches: branches:
- main - main
- ci-test - ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs: jobs:
test: test:
@ -18,41 +19,21 @@ jobs:
steps: steps:
- name: "Clone project repository" - name: "Clone project repository"
uses: actions/checkout@v3 uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64 - name: "Install Python"
# Conditional steps below workaroud this by using deadsnakes for that case only uses: cerc-io/setup-python@v4
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with: with:
python-version: '3.8' python-version: '3.8'
- name: "Print Python version" - name: "Print Python version"
run: python3 --version run: python3 --version
- name: "Install shiv" - name: "Install shiv"
run: pip install shiv==1.0.6 run: pip install shiv
- name: "Generate build version file" - name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package" - name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run smoke tests" - name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh run: ./tests/smoke-test/run-smoke-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,10 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger

View File

@ -1,3 +0,0 @@
Change this file to trigger running the test-container-registry CI job
Triggered: 2026-01-21
Triggered: 2026-01-21 19:28:29

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-database CI job
Trigger test run

View File

@ -1,2 +0,0 @@
Change this file to trigger running the external-stack CI job
trigger

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-k8s-deploy CI job
Trigger test on PR branch

View File

@ -1,30 +0,0 @@
name: Fixturenet-Laconicd Test
on:
push:
branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-laconicd-test'
jobs:
test:
name: "Run fixturenet-laconicd test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh

View File

@ -1,21 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2

View File

@ -1,29 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,29 +0,0 @@
name: Webapp Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh

View File

@ -1,4 +1,4 @@
name: Smoke Test name: Test
on: on:
pull_request: pull_request:

View File

@ -1 +0,0 @@
Change this file to trigger running the fixturenet-eth-test CI job

View File

@ -1,3 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
trigger

3
.gitignore vendored
View File

@ -6,5 +6,4 @@ laconic_stack_orchestrator.egg-info
__pycache__ __pycache__
*~ *~
package package
stack_orchestrator/data/build_tag.txt app/data/build_tag.txt
/build

View File

@ -1,34 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
args: ['--allow-multiple-documents']
- id: check-json
- id: check-merge-conflict
- id: check-added-large-files
- repo: https://github.com/psf/black
rev: 23.12.1
hooks:
- id: black
language_version: python3
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
- repo: https://github.com/RobertCraigie/pyright-python
rev: v1.1.345
hooks:
- id: pyright
- repo: https://github.com/adrienverge/yamllint
rev: v1.35.1
hooks:
- id: yamllint
args: [-d, relaxed]

View File

@ -1,151 +0,0 @@
# Plan: Make Stack-Orchestrator AI-Friendly
## Goal
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
---
## Part 1: Documentation & Context Files
### 1.1 Add CLAUDE.md
Create a root-level context file for AI assistants.
**File:** `CLAUDE.md`
Contents:
- Project overview (what stack-orchestrator does)
- Stack creation workflow (step-by-step)
- File naming conventions
- Required vs optional fields in stack.yml
- Common patterns and anti-patterns
- Links to example stacks (simple, medium, complex)
### 1.2 Add JSON Schema for stack.yml
Create formal validation schema.
**File:** `schemas/stack-schema.json`
Benefits:
- AI tools can validate generated stacks
- IDEs provide autocomplete
- CI can catch errors early
### 1.3 Add Template Stack with Comments
Create an annotated template for reference.
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
```yaml
# Stack definition template - copy this directory to create a new stack
version: "1.2" # Required: 1.0, 1.1, or 1.2
name: my-stack # Required: lowercase, hyphens only
description: "Human-readable description" # Optional
repos: # Git repositories to clone
- github.com/org/repo
containers: # Container images to build (must have matching container-build/)
- cerc/my-container
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
- my-pod
```
### 1.4 Document Validation Rules
Create explicit documentation of constraints currently scattered in code.
**File:** `docs/stack-format.md`
Contents:
- Container names must start with `cerc/`
- Pod names must match compose file: `docker-compose-{pod}.yml`
- Repository format: `host/org/repo[@ref]`
- Stack directory name should match `name` field
- Version field options and differences
---
## Part 2: Add `create-stack` Command
### 2.1 Command Overview
```bash
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
```
**Behavior:**
1. Parse repo URL to extract app name (if --name not provided)
2. Create `stacks/{name}/stack.yml`
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
4. Create `compose/docker-compose-{name}.yml`
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
### 2.2 Files to Create
| File | Purpose |
|------|---------|
| `stack_orchestrator/create/__init__.py` | Package init |
| `stack_orchestrator/create/create_stack.py` | Command implementation |
### 2.3 Files to Modify
| File | Change |
|------|--------|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
### 2.4 Command Options
| Option | Required | Description |
|--------|----------|-------------|
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
| `--name` | No | Stack name (defaults to repo name) |
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
| `--force` | No | Overwrite existing files |
### 2.5 Template Types
| Type | Base Image | Port | Use Case |
|------|------------|------|----------|
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
| service | python:3.11-slim | 8080 | Python backend services |
| empty | none | none | Custom from scratch |
---
## Part 3: Implementation Summary
### New Files (6)
1. `CLAUDE.md` - AI assistant context
2. `schemas/stack-schema.json` - Validation schema
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
4. `docs/stack-format.md` - Stack format documentation
5. `stack_orchestrator/create/__init__.py` - Package init
6. `stack_orchestrator/create/create_stack.py` - Command implementation
### Modified Files (1)
1. `stack_orchestrator/main.py` - Register create-stack command
---
## Verification
```bash
# 1. Command appears in help
laconic-so --help | grep create-stack
# 2. Dry run works
laconic-so --dry-run create-stack --repo github.com/org/test-app
# 3. Creates all expected files
laconic-so create-stack --repo github.com/org/test-app
ls stack_orchestrator/data/stacks/test-app/
ls stack_orchestrator/data/container-build/cerc-test-app/
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
# 4. Build works with generated stack
laconic-so --stack test-app build-containers
```

View File

@ -1,50 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code when working with the stack-orchestrator project.
## Some rules to follow
NEVER speculate about the cause of something
NEVER assume your hypotheses are true without evidence
ALWAYS clearly state when something is a hypothesis
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
## Key Principles
### Development Guidelines
- **Single responsibility** - Each component has one clear purpose
- **Fail fast** - Let errors propagate, don't hide failures
- **DRY/KISS** - Minimize duplication and complexity
## Development Philosophy: Conversational Literate Programming
### Approach
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
### Core Principles
- **Documentation-First**: All changes begin with discussion of intent and reasoning
- **Narrative-Driven**: Complex systems are explained through conversational exploration
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
### Working Method
1. **Explore and Understand**: Read existing code to understand current state
2. **Discuss Architecture**: Workshop complex design decisions through conversation
3. **Document Intent**: Update TODO.md with clear justification before coding
4. **Explain Changes**: Each modification includes reasoning and context
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
### Implementation Guidelines
- Treat conversations as primary documentation
- Explain architectural decisions before implementing
- Use TODO.md as the "literate document" that justifies all work
- Maintain clear narrative threads across sessions
- Workshop complex ideas before coding
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
## Insights and Observations
### Design Principles
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**

View File

@ -6,7 +6,7 @@ Stack Orchestrator allows building and deployment of a Laconic Stack on a single
## Install ## Install
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-linux.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine. **To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-ubuntu.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.** For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
@ -16,7 +16,6 @@ Ensure that the following are already installed:
- [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go) - [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go)
- [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21` - [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21`
- [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5` - [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5`
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git): `git --version` >= `2.10.3`
Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. : Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. :
@ -29,10 +28,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory. a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable: Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
```bash ```bash
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
``` ```
Give it execute permissions: Give it execute permissions:
@ -49,27 +48,15 @@ Verify operation (your version will probably be different, just check here that
laconic-so version laconic-so version
Version: 1.1.0-7a607c2-202304260513 Version: 1.1.0-7a607c2-202304260513
``` ```
Save the distribution url to `~/.laconic-so/config.yml`:
```bash
mkdir ~/.laconic-so
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
```
### Update
If Stack Orchestrator was installed using the process described above, it is able to subsequently self-update to the current latest version by running:
```bash
laconic-so update
```
## Usage ## Usage
The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: The various [stacks](/app/data/stacks) each contain instructions for running different stacks based on your use case. For example:
- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) - [self-hosted Gitea](/app/data/stacks/build-support)
- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism) - [an Optimism Fixturenet](/app/data/stacks/fixturenet-optimism)
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded) - [laconicd with console and CLI](app/data/stacks/fixturenet-laconic-loaded)
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo) - [kubo (IPFS)](app/data/stacks/kubo)
## Contributing ## Contributing
@ -78,3 +65,5 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
## Platform Support ## Platform Support
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested). Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).

View File

@ -1,413 +0,0 @@
# Implementing `laconic-so create-stack` Command
A plan for adding a new CLI command to scaffold stack files automatically.
---
## Overview
Add a `create-stack` command that generates all required files for a new stack:
```bash
laconic-so create-stack --name my-stack --type webapp
```
**Output:**
```
stack_orchestrator/data/
├── stacks/my-stack/stack.yml
├── container-build/cerc-my-stack/
│ ├── Dockerfile
│ └── build.sh
└── compose/docker-compose-my-stack.yml
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
```
---
## CLI Architecture Summary
### Command Registration Pattern
Commands are Click functions registered in `main.py`:
```python
# main.py (line ~70)
from stack_orchestrator.create import create_stack
cli.add_command(create_stack.command, "create-stack")
```
### Global Options Access
```python
from stack_orchestrator.opts import opts
if not opts.o.quiet:
print("message")
if opts.o.dry_run:
print("(would create files)")
```
### Key Utilities
| Function | Location | Purpose |
|----------|----------|---------|
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
---
## Files to Create
### 1. Command Module
**`stack_orchestrator/create/__init__.py`**
```python
# Empty file to make this a package
```
**`stack_orchestrator/create/create_stack.py`**
```python
import click
import os
from pathlib import Path
from shutil import copy
from stack_orchestrator.opts import opts
from stack_orchestrator.util import error_exit, get_yaml
# Template types
STACK_TEMPLATES = {
"webapp": {
"description": "Web application with Node.js",
"base_image": "node:20-bullseye-slim",
"port": 3000,
},
"service": {
"description": "Backend service",
"base_image": "python:3.11-slim",
"port": 8080,
},
"empty": {
"description": "Minimal stack with no defaults",
"base_image": None,
"port": None,
},
}
def get_data_dir() -> Path:
"""Get path to stack_orchestrator/data directory"""
return Path(__file__).absolute().parent.parent.joinpath("data")
def validate_stack_name(name: str) -> None:
"""Validate stack name follows conventions"""
import re
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
if name.startswith("cerc-"):
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
"""Create stack.yml file"""
config = {
"version": "1.2",
"name": name,
"description": template.get("description", f"Stack: {name}"),
"repos": [repo_url] if repo_url else [],
"containers": [f"cerc/{name}"],
"pods": [name],
}
stack_dir.mkdir(parents=True, exist_ok=True)
with open(stack_dir / "stack.yml", "w") as f:
get_yaml().dump(config, f)
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
"""Create Dockerfile"""
base_image = template.get("base_image", "node:20-bullseye-slim")
port = template.get("port", 3000)
dockerfile_content = f'''# Build stage
FROM {base_image} AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
# Production stage
FROM {base_image}
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY --from=builder /app/dist ./dist
EXPOSE {port}
CMD ["npm", "run", "start"]
'''
container_dir.mkdir(parents=True, exist_ok=True)
with open(container_dir / "Dockerfile", "w") as f:
f.write(dockerfile_content)
def create_build_script(container_dir: Path, name: str) -> None:
"""Create build.sh script"""
build_script = f'''#!/usr/bin/env bash
# Build cerc/{name}
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
docker build -t cerc/{name}:local \\
-f ${{SCRIPT_DIR}}/Dockerfile \\
${{build_command_args}} \\
${{CERC_REPO_BASE_DIR}}/{name}
'''
build_path = container_dir / "build.sh"
with open(build_path, "w") as f:
f.write(build_script)
# Make executable
os.chmod(build_path, 0o755)
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
"""Create docker-compose file"""
port = template.get("port", 3000)
compose_content = {
"version": "3.8",
"services": {
name: {
"image": f"cerc/{name}:local",
"restart": "unless-stopped",
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
"environment": {
"NODE_ENV": "${NODE_ENV:-production}",
},
}
}
}
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
get_yaml().dump(compose_content, f)
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
"""Add entry to a list file if not already present"""
list_path = data_dir / filename
# Read existing entries
existing = set()
if list_path.exists():
with open(list_path, "r") as f:
existing = set(line.strip() for line in f if line.strip())
# Add new entry
if entry not in existing:
with open(list_path, "a") as f:
f.write(f"{entry}\n")
@click.command()
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
@click.option("--type", "stack_type", default="webapp",
type=click.Choice(list(STACK_TEMPLATES.keys())),
help="Stack template type")
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
@click.option("--force", is_flag=True, help="Overwrite existing files")
@click.pass_context
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
"""Create a new stack with all required files.
Examples:
laconic-so create-stack --name my-app --type webapp
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
"""
# Validate
validate_stack_name(name)
template = STACK_TEMPLATES[stack_type]
data_dir = get_data_dir()
# Define paths
stack_dir = data_dir / "stacks" / name
container_dir = data_dir / "container-build" / f"cerc-{name}"
compose_dir = data_dir / "compose"
# Check for existing files
if not force:
if stack_dir.exists():
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
if container_dir.exists():
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
# Dry run check
if opts.o.dry_run:
print(f"Would create stack '{name}' with template '{stack_type}':")
print(f" - {stack_dir}/stack.yml")
print(f" - {container_dir}/Dockerfile")
print(f" - {container_dir}/build.sh")
print(f" - {compose_dir}/docker-compose-{name}.yml")
print(f" - Update repository-list.txt")
print(f" - Update container-image-list.txt")
print(f" - Update pod-list.txt")
return
# Create files
if not opts.o.quiet:
print(f"Creating stack '{name}' with template '{stack_type}'...")
create_stack_yml(stack_dir, name, template, repo)
if opts.o.verbose:
print(f" Created {stack_dir}/stack.yml")
create_dockerfile(container_dir, name, template)
if opts.o.verbose:
print(f" Created {container_dir}/Dockerfile")
create_build_script(container_dir, name)
if opts.o.verbose:
print(f" Created {container_dir}/build.sh")
create_compose_file(compose_dir, name, template)
if opts.o.verbose:
print(f" Created {compose_dir}/docker-compose-{name}.yml")
# Update list files
if repo:
update_list_file(data_dir, "repository-list.txt", repo)
if opts.o.verbose:
print(f" Added {repo} to repository-list.txt")
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
if opts.o.verbose:
print(f" Added cerc/{name} to container-image-list.txt")
update_list_file(data_dir, "pod-list.txt", name)
if opts.o.verbose:
print(f" Added {name} to pod-list.txt")
# Summary
if not opts.o.quiet:
print(f"\nStack '{name}' created successfully!")
print(f"\nNext steps:")
print(f" 1. Edit {stack_dir}/stack.yml")
print(f" 2. Customize {container_dir}/Dockerfile")
print(f" 3. Run: laconic-so --stack {name} build-containers")
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
```
### 2. Register Command in main.py
**Edit `stack_orchestrator/main.py`**
Add import:
```python
from stack_orchestrator.create import create_stack
```
Add command registration (after line ~78):
```python
cli.add_command(create_stack.command, "create-stack")
```
---
## Implementation Steps
### Step 1: Create module structure
```bash
mkdir -p stack_orchestrator/create
touch stack_orchestrator/create/__init__.py
```
### Step 2: Create the command file
Create `stack_orchestrator/create/create_stack.py` with the code above.
### Step 3: Register in main.py
Add the import and `cli.add_command()` line.
### Step 4: Test the command
```bash
# Show help
laconic-so create-stack --help
# Dry run
laconic-so --dry-run create-stack --name test-app --type webapp
# Create a stack
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
# Verify
ls -la stack_orchestrator/data/stacks/test-app/
cat stack_orchestrator/data/stacks/test-app/stack.yml
```
---
## Template Types
| Type | Base Image | Port | Use Case |
|------|------------|------|----------|
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
| `service` | python:3.11-slim | 8080 | Python backend services |
| `empty` | none | none | Custom from scratch |
---
## Future Enhancements
1. **Interactive mode** - Prompt for values if not provided
2. **More templates** - Go, Rust, database stacks
3. **Template from existing** - `--from-stack existing-stack`
4. **External stack support** - Create in custom directory
5. **Validation command** - `laconic-so validate-stack --name my-stack`
---
## Files Modified
| File | Change |
|------|--------|
| `stack_orchestrator/create/__init__.py` | New (empty) |
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
---
## Verification
```bash
# 1. Command appears in help
laconic-so --help | grep create-stack
# 2. Dry run works
laconic-so --dry-run create-stack --name verify-test --type webapp
# 3. Full creation works
laconic-so create-stack --name verify-test --type webapp
ls stack_orchestrator/data/stacks/verify-test/
ls stack_orchestrator/data/container-build/cerc-verify-test/
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
# 4. Build works
laconic-so --stack verify-test build-containers
# 5. Cleanup
rm -rf stack_orchestrator/data/stacks/verify-test
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
```

16
TODO.md
View File

@ -1,16 +0,0 @@
# TODO
## Features Needed
### Update Stack Command
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
## Architecture Refactoring
### Separate Deployer from Stack Orchestrator CLI
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
### Separate Stacks from Stack Orchestrator Repo
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.

View File

@ -1,4 +1,4 @@
# Copyright © 2022, 2023 Vulcanize # Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -15,18 +15,18 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from stack_orchestrator.deploy.deploy import get_stack_status from .deploy_system import get_stack_status
from decouple import config
def get_stack(config, stack): def get_stack(config, stack):
if stack == "package-registry": if stack == "package-registry":
return package_registry_stack(config, stack) return package_registry_stack(config, stack)
else: else:
return default_stack(config, stack) return base_stack(config, stack)
class base_stack(ABC): class base_stack(ABC):
def __init__(self, config, stack): def __init__(self, config, stack):
self.config = config self.config = config
self.stack = stack self.stack = stack
@ -40,27 +40,15 @@ class base_stack(ABC):
pass pass
class default_stack(base_stack):
"""Default stack implementation for stacks without specific handling."""
def ensure_available(self):
return True
def get_url(self):
return None
class package_registry_stack(base_stack): class package_registry_stack(base_stack):
def ensure_available(self): def ensure_available(self):
self.url = "<no registry url set>" self.url = "<no registry url set>"
# Check if we were given an external registry URL # Check if we were given an external registry URL
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment: if url_from_environment:
if self.config.verbose: if self.config.verbose:
print( print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
f"{url_from_environment}"
)
self.url = url_from_environment self.url = url_from_environment
else: else:
# Otherwise we expect to use the local package-registry stack # Otherwise we expect to use the local package-registry stack
@ -73,29 +61,11 @@ class package_registry_stack(base_stack):
# TODO: get url from deploy-stack # TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else: else:
# If not, print a message about how to start it and return fail to the # If not, print a message about how to start it and return fail to the caller
# caller print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
print( print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
"ERROR: The package-registry stack is not running, "
"and no external registry specified with CERC_NPM_REGISTRY_URL"
)
print(
"ERROR: Start the local package registry with: "
"laconic-so --stack package-registry deploy-system up"
)
return False return False
return True return True
def get_url(self): def get_url(self):
return self.url return self.url
def get_npm_registry_url():
# If an auth token is not defined, we assume the default should be the cerc registry
# If an auth token is defined, we assume the local gitea should be used.
default_npm_registry_url = (
"http://gitea.local:3000/api/packages/cerc-io/npm/"
if config("CERC_NPM_AUTH_TOKEN", default=None)
else "https://git.vdb.to/api/packages/cerc-io/npm/"
)
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)

141
app/build_containers.py Normal file
View File

@ -0,0 +1,141 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds or pulls containers for the system components
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import os
import sys
from decouple import config
import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@click.command()
@click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers")
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args):
'''build the set of containers required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
debug = ctx.obj.debug
local_stack = ctx.obj.local_stack
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
container_build_dir = Path(__file__).absolute().parent.joinpath("data", "container-build")
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines()
containers_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
containers_in_scope = stack_config['containers']
else:
containers_in_scope = all_containers
if verbose:
print(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
# TODO: make this configurable
container_build_env = {
"CERC_NPM_REGISTRY_URL": config("CERC_NPM_REGISTRY_URL", default="http://gitea.local:3000/api/packages/cerc-io/npm/"),
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": "0"
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env:
container_build_env.update({"DOCKER_HOST": docker_host_env})
def process_container(container):
if not quiet:
print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")

View File

@ -1,4 +1,4 @@
# Copyright © 2022, 2023 Vulcanize # Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -25,25 +25,19 @@ from decouple import config
import click import click
import importlib.resources import importlib.resources
from python_on_whales import docker, DockerException from python_on_whales import docker, DockerException
from stack_orchestrator.base import get_stack from .base import get_stack
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local" builder_js_image_name = "cerc/builder-js:local"
@click.command() @click.command()
@click.option("--include", help="only build these packages") @click.option('--include', help="only build these packages")
@click.option("--exclude", help="don't build these packages") @click.option('--exclude', help="don\'t build these packages")
@click.option( @click.option("--force-rebuild", is_flag=True, default=False, help="Override existing target package version check -- force rebuild")
"--force-rebuild",
is_flag=True,
default=False,
help="Override existing target package version check -- force rebuild",
)
@click.option("--extra-build-args", help="Supply extra arguments to build") @click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context @click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args): def command(ctx, include, exclude, force_rebuild, extra_build_args):
"""build the set of npm packages required for a complete stack""" '''build the set of npm packages required for a complete stack'''
quiet = ctx.obj.quiet quiet = ctx.obj.quiet
verbose = ctx.obj.verbose verbose = ctx.obj.verbose
@ -69,54 +63,45 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
sys.exit(1) sys.exit(1)
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print( print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
config("CERC_REPO_BASE_DIR", default="~/cerc")
)
build_root_path = os.path.join(dev_root_path, "build-trees") build_root_path = os.path.join(dev_root_path, "build-trees")
if verbose: if verbose:
print(f"Dev Root is: {dev_root_path}") print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
print("Dev root directory doesn't exist, creating") print('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path) os.makedirs(dev_root_path)
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
print("Build root directory doesn't exist, creating") print('Build root directory doesn\'t exist, creating')
os.makedirs(build_root_path) os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from . import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
with importlib.resources.open_text(
data, "npm-package-list.txt"
) as package_list_file:
all_packages = package_list_file.read().splitlines() all_packages = package_list_file.read().splitlines()
packages_in_scope = [] packages_in_scope = []
if stack: if stack:
stack_config = get_parsed_stack_config(stack) stack_config = get_parsed_stack_config(stack)
# TODO: syntax check the input here # TODO: syntax check the input here
packages_in_scope = stack_config["npms"] packages_in_scope = stack_config['npms']
else: else:
packages_in_scope = all_packages packages_in_scope = all_packages
if verbose: if verbose:
print(f"Packages: {packages_in_scope}") print(f'Packages: {packages_in_scope}')
def build_package(package): def build_package(package):
if not quiet: if not quiet:
print(f"Building npm package: {package}") print(f"Building npm package: {package}")
repo_dir = package repo_dir = package
repo_full_path = os.path.join(dev_root_path, repo_dir) repo_full_path = os.path.join(dev_root_path, repo_dir)
# Copy the repo and build that to avoid propagating # Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
# JS tooling file changes back into the cloned repo
repo_copy_path = os.path.join(build_root_path, repo_dir) repo_copy_path = os.path.join(build_root_path, repo_dir)
# First delete any old build tree # First delete any old build tree
if os.path.isdir(repo_copy_path): if os.path.isdir(repo_copy_path):
@ -129,63 +114,41 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}") print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
if not dry_run: if not dry_run:
copytree(repo_full_path, repo_copy_path) copytree(repo_full_path, repo_copy_path)
build_command = [ build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
"sh",
"-c",
"cd /workspace && "
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
]
if not dry_run: if not dry_run:
if verbose: if verbose:
print(f"Executing: {build_command}") print(f"Executing: {build_command}")
# Originally we used the PEP 584 merge operator: # Originally we used the PEP 584 merge operator:
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | # envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) # but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
# but that isn't available in Python 3.8 (default in Ubuntu 20) envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
# so for now we use dict.update: "LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
envs = {
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
# Convention used by our web app packages
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
} }
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update( envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
if extra_build_args
else {}
)
try: try:
docker.run( docker.run(builder_js_image_name,
builder_js_image_name,
remove=True, remove=True,
interactive=True, interactive=True,
tty=True, tty=True,
user=f"{os.getuid()}:{os.getgid()}", user=f"{os.getuid()}:{os.getgid()}",
envs=envs, envs=envs,
# TODO: detect this host name in npm_registry_url # TODO: detect this host name in npm_registry_url rather than hard-wiring it
# rather than hard-wiring it
add_hosts=[("gitea.local", "host-gateway")], add_hosts=[("gitea.local", "host-gateway")],
volumes=[(repo_copy_path, "/workspace")], volumes=[(repo_copy_path, "/workspace")],
command=build_command, command=build_command
) )
# Note that although the docs say that build_result should # Note that although the docs say that build_result should contain
# contain the command output as a string, in reality it is # the command output as a string, in reality it is always the empty string.
# always the empty string. Since we detect errors via catching # Since we detect errors via catching exceptions below, we can safely ignore it here.
# exceptions below, we can safely ignore it here.
except DockerException as e: except DockerException as e:
print(f"Error executing build for {package} in container:\n {e}") print(f"Error executing build for {package} in container:\n {e}")
if not continue_on_error: if not continue_on_error:
print( print("FATAL Error: build failed and --continue-on-error not set, exiting")
"FATAL Error: build failed and --continue-on-error "
"not set, exiting"
)
sys.exit(1) sys.exit(1)
else: else:
print( print("****** Build Error, continuing because --continue-on-error is set")
"****** Build Error, continuing because "
"--continue-on-error is set"
)
else: else:
print("Skipped") print("Skipped")
@ -203,12 +166,6 @@ def _ensure_prerequisites():
# Tell the user how to build it if not # Tell the user how to build it if not
images = docker.image.list(builder_js_image_name) images = docker.image.list(builder_js_image_name)
if len(images) == 0: if len(images) == 0:
print( print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
f"FATAL: builder image: {builder_js_image_name} is required " print("Please run this command to create it: laconic-so --stack build-support build-containers")
"but was not found"
)
print(
"Please run this command to create it: "
"laconic-so --stack build-support build-containers"
)
sys.exit(1) sys.exit(1)

View File

@ -2,7 +2,6 @@ version: '3.7'
services: services:
fixturenet-eth-bootnode-geth: fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth hostname: fixturenet-eth-bootnode-geth
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
@ -16,13 +15,12 @@ services:
- "30303" - "30303"
fixturenet-eth-geth-1: fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1 hostname: fixturenet-eth-geth-1
cap_add: cap_add:
- SYS_PTRACE - SYS_PTRACE
environment: environment:
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect} CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1 CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file: env_file:
@ -40,12 +38,10 @@ services:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
ports: ports:
- "8545" - "8545"
- "8546"
- "40000" - "40000"
- "6060" - "6060"
fixturenet-eth-geth-2: fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2 hostname: fixturenet-eth-geth-2
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
@ -62,19 +58,14 @@ services:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
volumes: volumes:
- fixturenet_eth_geth_2_data:/root/ethdata - fixturenet_eth_geth_2_data:/root/ethdata
ports:
- "8545"
- "8546"
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse hostname: fixturenet-eth-bootnode-lighthouse
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1: fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1 hostname: fixturenet-eth-lighthouse-1
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -100,7 +91,6 @@ services:
- "8001" - "8001"
fixturenet-eth-lighthouse-2: fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2 hostname: fixturenet-eth-lighthouse-2
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]

View File

@ -3,7 +3,6 @@ services:
restart: unless-stopped restart: unless-stopped
image: cerc/laconic-console-host:local image: cerc/laconic-console-host:local
environment: environment:
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production} - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473}
ports: ports:
- "80" - "80"

View File

@ -1,15 +1,10 @@
version: "3.2"
services: services:
laconicd: laconicd:
restart: unless-stopped restart: unless-stopped
image: cerc/laconicd:local image: cerc/laconicd:local
command: ["bash", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
environment:
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED:-false}
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY:-false}
ONBOARDING_ENABLED: ${ONBOARDING_ENABLED:-false}
volumes: volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd
# TODO: look at folding these scripts into the container # TODO: look at folding these scripts into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
@ -19,15 +14,13 @@ services:
- "6060" - "6060"
- "26657" - "26657"
- "26656" - "26656"
- "9473" - "9473:9473"
- "8545"
- "8546"
- "9090" - "9090"
- "9091"
- "1317" - "1317"
cli: cli:
image: cerc/laconic-registry-cli:local image: cerc/laconic-registry-cli:local
volumes: volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
- ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli
volumes:
laconicd-data:

View File

@ -5,9 +5,8 @@ services:
# Creates / updates the configuration for L1 contracts deployment # Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment) # Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts: fixturenet-optimism-contracts:
restart: on-failure
image: cerc/optimism-contracts:local
hostname: fixturenet-optimism-contracts hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file: env_file:
- ../config/fixturenet-optimism/l1-params.env - ../config/fixturenet-optimism/l1-params.env
environment: environment:
@ -17,49 +16,26 @@ services:
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS} CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY} CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
volumes: CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
- ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh # Waits for L1 endpoint to be up before running the script
- l2_accounts:/l2-accounts
- l1_deployment:/l1-deployment
- l2_config:/l2-config
# Waits for L1 endpoint to be up before running the contract deploy script
command: | command: |
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh" "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
restart: always
image: cerc/optimism-l2geth:local
hostname: op-geth
depends_on:
op-node:
condition: service_started
volumes: volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh - ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- l2_config:/l2-config:ro - ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- l2_accounts:/l2-accounts:ro - ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- l2_geth_data:/datadir - ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
entrypoint: "sh" - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
command: "/run-op-geth.sh" - ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
ports: - l2_accounts:/l2-accounts
- "8545" - l1_deployment:/app/packages/contracts-bedrock
- "8546"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 100
start_period: 10s
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Runs the L2 consensus client (Sequencer node) # Generates the config files required for L2 (outputs to volume l2_config)
# Generates the L2 config files if not already present (outputs to volume l2_config) op-node-l2-config-gen:
op-node:
restart: always
image: cerc/optimism-op-node:local image: cerc/optimism-op-node:local
hostname: op-node
depends_on: depends_on:
fixturenet-optimism-contracts: fixturenet-optimism-contracts:
condition: service_completed_successfully condition: service_completed_successfully
@ -69,28 +45,65 @@ services:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC} CERC_L1_RPC: ${CERC_L1_RPC}
volumes: volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/l1-deployment:ro - l1_deployment:/contracts-bedrock:ro
- l2_config:/l2-config - l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro - l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh" entrypoint: "sh"
command: "/run-op-node.sh" command: "/run-op-geth.sh"
ports: ports:
- "8547" - "0.0.0.0:8545:8545"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
image: cerc/optimism-op-node:local
depends_on:
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
- "0.0.0.0:8547:8547"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8547"] test: ["CMD", "nc", "-vz", "localhost:8547"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 100 retries: 10
start_period: 10s start_period: 10s
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1) # Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher: op-batcher:
restart: always
image: cerc/optimism-op-batcher:local image: cerc/optimism-op-batcher:local
hostname: op-batcher
depends_on: depends_on:
op-node: op-node:
condition: service_healthy condition: service_healthy
@ -102,7 +115,7 @@ services:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC} CERC_L1_RPC: ${CERC_L1_RPC}
volumes: volumes:
- ../config/network/wait-for-it.sh:/wait-for-it.sh - ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh - ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh
- l2_accounts:/l2-accounts:ro - l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"] entrypoint: ["sh", "-c"]
@ -110,37 +123,32 @@ services:
command: | command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh" "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
ports: ports:
- "8548" - "127.0.0.1:8548:8548"
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Runs the proposer (periodically submits new state roots to L1) # Runs the proposer (periodically submits new state roots to L1)
op-proposer: op-proposer:
restart: always
image: cerc/optimism-op-proposer:local image: cerc/optimism-op-proposer:local
hostname: op-proposer
depends_on: depends_on:
op-node: op-node:
condition: service_healthy condition: service_healthy
op-geth:
condition: service_healthy
env_file: env_file:
- ../config/fixturenet-optimism/l1-params.env - ../config/fixturenet-optimism/l1-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC} CERC_L1_RPC: ${CERC_L1_RPC}
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
volumes: volumes:
- ../config/network/wait-for-it.sh:/wait-for-it.sh - ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh - ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
- l1_deployment:/l1-deployment:ro - l1_deployment:/contracts-bedrock:ro
- l2_accounts:/l2-accounts:ro - l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"] entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the proposer # Waits for L1 endpoint to be up before running the proposer
command: | command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh" "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
ports: ports:
- "8560" - "127.0.0.1:8560:8560"
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"

View File

@ -1,7 +1,6 @@
# Add-on pod to include foundry tooling within a fixturenet # Add-on pod to include foundry tooling within a fixturenet
services: services:
foundry: foundry:
restart: always
image: cerc/foundry:local image: cerc/foundry:local
command: ["while :; do sleep 600; done"] command: ["while :; do sleep 600; done"]
volumes: volumes:

View File

@ -7,9 +7,11 @@ services:
condition: service_healthy condition: service_healthy
image: cerc/ipld-eth-server:local image: cerc/ipld-eth-server:local
environment: environment:
SERVER_HTTP_PATH: 0.0.0.0:8081 IPLD_SERVER_GRAPHQL: "true"
SERVER_GRAPHQL: "true" IPLD_POSTGRAPHILEPATH: http://graphql:5000
SERVER_GRAPHQLPATH: 0.0.0.0:8082 ETH_SERVER_HTTPPATH: 0.0.0.0:8081
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve" VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json" ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing DATABASE_NAME: cerc_testing
@ -25,8 +27,8 @@ services:
PROM_HTTP: "true" PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0" PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090" PROM_HTTP_PORT: "8090"
LOG_LEVEL: "debug" LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
volumes: volumes:
- type: bind - type: bind
source: ../config/ipld-eth-server/chain.json source: ../config/ipld-eth-server/chain.json

View File

@ -0,0 +1,13 @@
version: "3.2"
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
services:
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
restart: always
volumes:
- ./ipfs/import:/import
- ./ipfs/data:/data/ipfs
ports:
- "0.0.0.0:8080:8080"
- "0.0.0.0:4001:4001"
- "0.0.0.0:5001:5001"

View File

@ -14,3 +14,4 @@ services:
- "9090" - "9090"
- "9091" - "9091"
- "1317" - "1317"

View File

@ -14,19 +14,16 @@ services:
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL} CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES} CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS} CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_PUBSUB: ${CERC_PUBSUB} CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
CERC_RELEASE: "v0.1.7"
CERC_USE_NPM: true
CERC_CONFIG_FILE: "src/config.json"
working_dir: /scripts working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"] command: ["sh", "mobymask-app-start.sh"]
volumes: volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "127.0.0.1:3002:80" - "0.0.0.0:3002:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s
@ -49,19 +46,16 @@ services:
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL} CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES} CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS} CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_PUBSUB: ${CERC_PUBSUB} CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
CERC_RELEASE: "v0.1.7-lxdao-0.1.1"
CERC_USE_NPM: false
CERC_CONFIG_FILE: "src/utils/config.json"
working_dir: /scripts working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"] command: ["sh", "mobymask-app-start.sh"]
volumes: volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "127.0.0.1:3004:80" - "0.0.0.0:3004:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -1,9 +1,8 @@
version: '3.2' version: '3.2'
services: services:
# Builds and serves the peer-test react-app
peer-test-app: peer-test-app:
restart: unless-stopped # Builds and serves the peer-test react-app
image: cerc/react-peer:local image: cerc/react-peer:local
working_dir: /scripts working_dir: /scripts
env_file: env_file:
@ -14,11 +13,11 @@ services:
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS} CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
command: ["sh", "test-app-start.sh"] command: ["sh", "test-app-start.sh"]
volumes: volumes:
- ../config/network/wait-for-it.sh:/scripts/wait-for-it.sh - ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh - ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
- peers_ids:/peers - peers_ids:/peers
ports: ports:
- "127.0.0.1:3003:80" - "0.0.0.0:3003:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "80"] test: ["CMD", "nc", "-v", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -0,0 +1,7 @@
version: "3.2"
services:
test:
image: cerc/test-container:local
restart: always
ports:
- "80"

View File

@ -0,0 +1,304 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watchers
watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
# Starts the azimuth-watcher server
azimuth-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/azimuth-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
ports:
- "3001"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the censures-watcher server
censures-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/censures-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
ports:
- "3002"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the claims-watcher server
claims-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/claims-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
ports:
- "3003"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3003"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the conditional-star-release-watcher server
conditional-star-release-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/conditional-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
ports:
- "3004"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the delegated-sending-watcher server
delegated-sending-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/delegated-sending-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
ports:
- "3005"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3005"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the ecliptic-watcher server
ecliptic-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/ecliptic-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
ports:
- "3006"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3006"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the linear-star-release-watcher server
linear-star-release-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/linear-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
ports:
- "3007"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3007"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the polls-watcher server
polls-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/polls-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
ports:
- "3008"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3008"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the gateway-server for proxying queries
gateway-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
azimuth-watcher-server:
condition: service_healthy
censures-watcher-server:
condition: service_healthy
claims-watcher-server:
condition: service_healthy
conditional-star-release-watcher-server:
condition: service_healthy
delegated-sending-watcher-server:
condition: service_healthy
ecliptic-watcher-server:
condition: service_healthy
linear-star-release-watcher-server:
condition: service_healthy
polls-watcher-server:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/gateway-server
command: "yarn server"
volumes:
- ../config/watcher-azimuth/gateway-watchers.json:/app/packages/gateway-server/dist/watchers.json
ports:
- "0.0.0.0:4000:4000"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "4000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
watcher_db_data:

View File

@ -34,7 +34,7 @@ services:
- ETH_RPC_URL=http://go-ethereum:8545 - ETH_RPC_URL=http://go-ethereum:8545
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-erc20/erc20-watcher.toml:/app/environments/local.toml - ../config/watcher-erc20/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3002:3001" - "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001" - "0.0.0.0:9002:9001"

View File

@ -14,7 +14,7 @@ services:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data - mobymask_watcher_db_data:/var/lib/postgresql/data
ports: ports:
- "127.0.0.1:15432:5432" - "0.0.0.0:15432:5432"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"] test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s interval: 20s
@ -44,7 +44,7 @@ services:
CERC_L2_NODE_PORT: ${CERC_L2_NODE_PORT} CERC_L2_NODE_PORT: ${CERC_L2_NODE_PORT}
command: ["sh", "deploy-and-generate-invite.sh"] command: ["sh", "deploy-and-generate-invite.sh"]
volumes: volumes:
- ../config/network/wait-for-it.sh:/app/packages/server/wait-for-it.sh - ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh
- ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json - ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json
- ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh - ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh
- mobymask_deployment:/app/packages/server - mobymask_deployment:/app/packages/server
@ -84,7 +84,6 @@ services:
CERC_PRIVATE_KEY_PEER: ${CERC_PRIVATE_KEY_PEER} CERC_PRIVATE_KEY_PEER: ${CERC_PRIVATE_KEY_PEER}
CERC_RELAY_PEERS: ${CERC_RELAY_PEERS} CERC_RELAY_PEERS: ${CERC_RELAY_PEERS}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS} CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_PUBSUB: ${CERC_PUBSUB}
CERC_RELAY_ANNOUNCE_DOMAIN: ${CERC_RELAY_ANNOUNCE_DOMAIN} CERC_RELAY_ANNOUNCE_DOMAIN: ${CERC_RELAY_ANNOUNCE_DOMAIN}
CERC_ENABLE_PEER_L2_TXS: ${CERC_ENABLE_PEER_L2_TXS} CERC_ENABLE_PEER_L2_TXS: ${CERC_ENABLE_PEER_L2_TXS}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT} CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
@ -96,9 +95,9 @@ services:
- mobymask_deployment:/server - mobymask_deployment:/server
# Expose GQL, metrics and relay node ports # Expose GQL, metrics and relay node ports
ports: ports:
- "127.0.0.1:3001:3001" - "0.0.0.0:3001:3001"
- "127.0.0.1:9001:9001" - "0.0.0.0:9001:9001"
- "127.0.0.1:9090:9090" - "0.0.0.0:9090:9090"
healthcheck: healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"] test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s interval: 20s

View File

@ -24,37 +24,15 @@ services:
retries: 15 retries: 15
start_period: 10s start_period: 10s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
mobymask-watcher-server: mobymask-watcher-server:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
mobymask-watcher-db: mobymask-watcher-db:
condition: service_healthy condition: service_healthy
mobymask-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-mobymask:local image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml - ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3001:3001" - "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001" - "0.0.0.0:9001:9001"
@ -67,5 +45,21 @@ services:
retries: 15 retries: 15
start_period: 5s start_period: 5s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-server:
condition: service_healthy
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
volumes: volumes:
mobymask_watcher_db_data: mobymask_watcher_db_data:

View File

@ -17,12 +17,7 @@ CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing" CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm" CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password" CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18} CERC_STATEDIFF_DB_GOOSE_MIN_VER=23
CERC_STATEDIFF_DB_LOG_STATEMENTS="${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false}" CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5" CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
# Used by Lighthouse
SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-3}

View File

@ -0,0 +1,118 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -1,9 +1,9 @@
services: services:
registry: cns:
rpcEndpoint: 'http://laconicd:26657' restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api' gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY userKey: REPLACE_WITH_MYKEY
bondId: bondId:
chainId: laconic_9000-1 chainId: laconic_9000-1
gas: 250000 gas: 250000
fees: 2000000alnt fees: 200000aphoton

View File

@ -0,0 +1,37 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
--outfile.l2 /app/genesis.json \
--outfile.rollup /app/rollup.json \
--l1-rpc $CERC_L1_RPC
openssl rand -hex 32 > /app/jwt.txt

View File

@ -0,0 +1,131 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
# Read JSON file into variable
KEYS_JSON=$(cat /l2-accounts/keys.json)
# Parse JSON into variables
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Get the private keys of L1 accounts
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using them from env"
fi
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
echo "Balances sent to L2 accounts"
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
echo "Updated the deployment config"
# Create a .env file
echo "L1_RPC=$CERC_L1_RPC" > .env
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
echo "Deploying the L1 smart contracts, this will take a while..."
# Deploy the L1 smart contracts
yarn hardhat deploy --network getting-started --tags l1
echo "Deployed the L1 smart contracts"
# Read Proxy contract's JSON and get the address
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
# First account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
# Second account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
echo "Balance sent to Proxy L2 contract"
echo "Use following accounts for transactions in L2:"
echo "${CERC_L1_ADDRESS}"
echo "${CERC_L1_ADDRESS_2}"
echo "Done"

View File

@ -0,0 +1,36 @@
const fs = require('fs')
// Get the command-line argument
const configFile = process.argv[2]
const adminAddress = process.argv[3]
const proposerAddress = process.argv[4]
const batcherAddress = process.argv[5]
const sequencerAddress = process.argv[6]
const blockHash = process.argv[7]
// Read the JSON file
const configData = fs.readFileSync(configFile)
const configObj = JSON.parse(configData)
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
configObj.finalSystemOwner =
configObj.portalGuardian =
configObj.controller =
configObj.l2OutputOracleChallenger =
configObj.proxyAdminOwner =
configObj.baseFeeVaultRecipient =
configObj.l1FeeVaultRecipient =
configObj.sequencerFeeVaultRecipient =
configObj.governanceTokenOwner =
adminAddress
configObj.l2OutputOracleProposer = proposerAddress
configObj.batchSenderAddress = batcherAddress
configObj.p2pSequencerAddress = sequencerAddress
configObj.l1StartingBlockTag = blockHash
// Write the updated JSON object back to the file
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))

View File

@ -6,14 +6,22 @@ fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Start op-batcher # Get Batcher key from keys.json
L2_RPC="http://op-geth:8545" BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
ROLLUP_RPC="http://op-node:8547"
BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey)
cleanup() {
echo "Signal received, cleaning up..."
kill ${batcher_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-batcher
op-batcher \ op-batcher \
--l2-eth-rpc=$L2_RPC \ --l2-eth-rpc=http://op-geth:8545 \
--rollup-rpc=$ROLLUP_RPC \ --rollup-rpc=http://op-node:8547 \
--poll-interval=1s \ --poll-interval=1s \
--sub-safety-margin=6 \ --sub-safety-margin=6 \
--num-confirmations=1 \ --num-confirmations=1 \
@ -24,4 +32,8 @@ op-batcher \
--rpc.enable-admin \ --rpc.enable-admin \
--max-channel-duration=1 \ --max-channel-duration=1 \
--l1-eth-rpc=$CERC_L1_RPC \ --l1-eth-rpc=$CERC_L1_RPC \
--private-key="${BATCHER_KEY#0x}" --private-key=$BATCHER_KEY \
&
batcher_pid=$!
wait $batcher_pid

View File

@ -0,0 +1,90 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO: Add in container build or use other tool
echo "Installing jq"
apk update && apk add jq
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Initialize op-geth if datadir/geth not found
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
echo "Found existing datadir, checking block signer key"
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
echo "Sequencer and block signer keys match, skipping initialization"
else
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
exit 1
fi
else
echo "Initializing op-geth"
mkdir -p datadir
echo "pwd" > datadir/password
echo $SEQUENCER_KEY > datadir/block-signer-key
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
while [ ! -f "/op-node/jwt.txt" ]
do
echo "Config files not created. Checking after 5 seconds."
sleep 5
done
echo "Config files created by op-node, proceeding with the initialization..."
geth init --datadir=datadir /op-node/genesis.json
echo "Node Initialized"
fi
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
cleanup() {
echo "Signal received, cleaning up..."
kill ${geth_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-geth
geth \
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=/op-node/jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=$SEQUENCER_ADDRESS \
--unlock=$SEQUENCER_ADDRESS \
&
geth_pid=$!
wait $geth_pid

View File

@ -0,0 +1,26 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Run op-node
op-node \
--l2=http://op-geth:8551 \
--l2.jwt-secret=/op-node-data/jwt.txt \
--sequencer.enabled \
--sequencer.l1-confs=3 \
--verifier.l1-confs=3 \
--rollup.config=/op-node-data/rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=$SEQUENCER_KEY \
--l1=$CERC_L1_RPC \
--l1.rpckind=any

View File

@ -0,0 +1,36 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Read the L2OutputOracle contract address from the deployment
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
# Get Proposer key from keys.json
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
cleanup() {
echo "Signal received, cleaning up..."
kill ${proposer_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-proposer
op-proposer \
--poll-interval 12s \
--rpc.port 8560 \
--rollup-rpc http://op-node:8547 \
--l2oo-address $L2OO_ADDR \
--private-key $PROPOSER_KEY \
--l1-eth-rpc $CERC_L1_RPC \
&
proposer_pid=$!
wait $proposer_pid

View File

@ -0,0 +1,27 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}"
CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}"
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
# Replace env variables in template TOML file
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/watcher-config.toml
# Merge SO watcher config with existing config file
node merge-toml.js
echo 'yarn server'
yarn server

View File

@ -0,0 +1,14 @@
[server]
host = "0.0.0.0"
maxSimultaneousRequests = -1
[database]
host = "watcher-db"
port = 5432
username = "vdbm"
password = "password"
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"

Some files were not shown because too many files have changed in this diff Show More