Compare commits

..

15 Commits

Author SHA1 Message Date
6396e43c68 Update for demo
Former-commit-id: 5fed0cdacace4eb7c490dd9df9a53ffc166ff4ad
2023-04-10 14:52:23 -06:00
6cb7bbf1d0 Remove test file
Former-commit-id: 0b6578a182
2023-04-10 11:46:52 -06:00
acbadc83fc Workaround for ref being different in production
Former-commit-id: f5c2e28065
2023-04-10 11:43:04 -06:00
728bf653cc Merge branch 'main' into publish-test
Former-commit-id: 9a657e05a1
2023-04-10 11:29:29 -06:00
a5b5054af5 Merge branch 'main' into publish-test
Former-commit-id: b2070dfaa9
2023-04-10 11:20:33 -06:00
3cdad5b2d6 Merge main
Former-commit-id: 4d1f339188
2023-04-10 09:20:26 -06:00
bb33cdffec Force build
Former-commit-id: df37e03287
2023-04-10 07:41:02 -06:00
79891b9128 Merge master
Former-commit-id: 00b7ddf1f0
2023-04-10 07:40:13 -06:00
1abde44a31 Try quotes
Former-commit-id: 008f42ad38
2023-04-10 07:35:55 -06:00
19eb67ed08 Force build
Former-commit-id: 32198aba12
2023-04-10 07:29:34 -06:00
c69ce45190 Merge branch 'main' into publish-test
Former-commit-id: 9a5fa577f2
2023-04-10 07:28:18 -06:00
9cd3ac2567 Force build
Former-commit-id: 77d5c8e9ea
2023-04-10 07:20:51 -06:00
0c1170e51c Merge branch 'publish-test' of github.com:cerc-io/stack-orchestrator into publish-test
Former-commit-id: c5a42b7ddf
2023-04-10 07:10:36 -06:00
3b9fe60e59 Add file to force a job
Former-commit-id: d01b2215c4
2023-04-10 07:09:56 -06:00
6bd84e5b2f Add test code
Former-commit-id: 355c3e179e
2023-04-10 07:02:05 -06:00
809 changed files with 4040 additions and 108312 deletions

View File

@ -1,66 +0,0 @@
name: Fixturenet-Laconicd-Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
schedule:
- cron: '1 13 * * *'
jobs:
test:
name: "Run Laconicd fixturenet and Laconic CLI tests"
runs-on: ubuntu-latest
steps:
- name: 'Update'
run: apt-get update
- name: 'Setup jq'
run: apt-get install jq -y
- name: 'Check jq'
run: |
which jq
jq --version
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh
- name: "Run laconic CLI tests"
run: ./tests/fixturenet-laconicd/run-cli-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,37 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -5,8 +5,6 @@ on:
branches: branches:
- main - main
- publish-test - publish-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs: jobs:
publish: publish:
@ -20,22 +18,14 @@ jobs:
run: | run: |
build_tag=$(./scripts/create_build_tag_file.sh) build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
# At present the stock setup-python action fails on Linux/aarch64 - name: "Install Python"
# Conditional steps below workaroud this by using deadsnakes for that case only uses: cerc-io/setup-python@v4
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with: with:
python-version: '3.8' python-version: '3.8'
- name: "Print Python version" - name: "Print Python version"
run: python3 --version run: python3 --version
- name: "Install shiv" - name: "Install shiv"
run: pip install shiv==1.0.6 run: pip install shiv
- name: "Build local shiv package" - name: "Build local shiv package"
id: build id: build
run: | run: |
@ -47,26 +37,11 @@ jobs:
run: | run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release" - name: "Create release"
uses: https://gitea.com/cerc-io/action-gh-release@gitea-v2 uses: cerc-io/action-gh-release@gitea-v1
with: with:
tag_name: ${{ steps.build-info.outputs.build-tag }} tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft # On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test" # Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }} draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so files: ./laconic-so
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,69 +0,0 @@
name: Container Registry Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-container-registry'
- '.gitea/workflows/test-container-registry.yml'
- 'tests/container-registry/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '6 19 * * *'
jobs:
test:
name: "Run contaier registry hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Install ed" # Only needed until we remove the need to edit the spec file
run: apt update && apt install -y ed
- name: "Run container registry deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/container-registry/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,67 +0,0 @@
name: Database Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-database'
- '.gitea/workflows/test-database.yml'
- 'tests/database/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '5 18 * * *'
jobs:
test:
name: "Run database hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run database deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/database/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,59 +0,0 @@
name: Deploy Test
on:
pull_request:
branches:
- main
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,58 +0,0 @@
name: External Stack Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-external-stack'
- '.gitea/workflows/test-external-stack.yml'
- 'tests/external-stack/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '8 19 * * *'
jobs:
test:
name: "Run external stack test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run external stack tests"
run: ./tests/external-stack/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,70 +0,0 @@
name: K8s Deploy Test
on:
pull_request:
branches:
- main
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deploy'
- '.gitea/workflows/test-k8s-deploy.yml'
- 'tests/k8s-deploy/run-deploy-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 15 * * *'
jobs:
test:
name: "Run deploy test suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,70 +0,0 @@
name: K8s Deployment Control Test
on:
pull_request:
branches:
- main
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deployment-control'
- '.gitea/workflows/test-k8s-deployment-control.yml'
- 'tests/k8s-deployment-control/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 30 * * *'
jobs:
test:
name: "Run deployment control suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment control test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deployment-control/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,60 +0,0 @@
name: Webapp Test
on:
pull_request:
branches:
- main
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv==1.0.6
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
run: apt update && apt install -y wget
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,15 +1,10 @@
name: Smoke Test name: Test
on: on:
pull_request: pull_request:
branches: '*' branches: '*'
push: push:
branches: branches: '*'
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs: jobs:
test: test:
@ -18,41 +13,15 @@ jobs:
steps: steps:
- name: "Clone project repository" - name: "Clone project repository"
uses: actions/checkout@v3 uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64 - name: "Install Python"
# Conditional steps below workaroud this by using deadsnakes for that case only uses: cerc-io/setup-python@v4
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with: with:
python-version: '3.8' python-version: '3.8'
- name: "Print Python version" - name: "Print Python version"
run: python3 --version run: python3 --version
- name: "Install shiv" - name: "Install shiv"
run: pip install shiv==1.0.6 run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package" - name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh run: ./scripts/build_shiv_package.sh
- name: "Run smoke tests" - name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh run: ./tests/smoke-test/run-smoke-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,10 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger

View File

@ -1,3 +0,0 @@
Change this file to trigger running the test-container-registry CI job
Triggered: 2026-01-21
Triggered: 2026-01-21 19:28:29

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-database CI job
Trigger test run

View File

@ -1,2 +0,0 @@
Change this file to trigger running the external-stack CI job
trigger

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-k8s-deploy CI job
Trigger test on PR branch

View File

@ -1,30 +0,0 @@
name: Fixturenet-Eth Test
on:
push:
branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-eth-test'
jobs:
test:
name: "Run fixturenet-eth test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth/run-test.sh

View File

@ -1,30 +0,0 @@
name: Fixturenet-Laconicd Test
on:
push:
branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-laconicd-test'
jobs:
test:
name: "Run fixturenet-laconicd test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh

View File

@ -1,21 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2

View File

@ -1,46 +0,0 @@
name: Publish
on:
push:
branches:
- main
- publish-test
jobs:
publish:
name: "Build and publish"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Get build info"
id: build-info
run: |
build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Build local shiv package"
id: build
run: |
./scripts/build_shiv_package.sh
result_code=$?
echo "package-file=$(ls ./package/*)" >> $GITHUB_OUTPUT
exit $result_code
- name: "Stage artifact file"
run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release"
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so

View File

@ -1,29 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,29 +0,0 @@
name: Webapp Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh

View File

@ -1,29 +0,0 @@
name: Smoke Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run basic test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh

View File

@ -1 +0,0 @@
Change this file to trigger running the fixturenet-eth-test CI job

View File

@ -1,3 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
trigger

4
.gitignore vendored
View File

@ -5,6 +5,4 @@ laconic-so
laconic_stack_orchestrator.egg-info laconic_stack_orchestrator.egg-info
__pycache__ __pycache__
*~ *~
package
stack_orchestrator/data/build_tag.txt
/build

View File

@ -1,34 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
args: ['--allow-multiple-documents']
- id: check-json
- id: check-merge-conflict
- id: check-added-large-files
- repo: https://github.com/psf/black
rev: 23.12.1
hooks:
- id: black
language_version: python3
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
- repo: https://github.com/RobertCraigie/pyright-python
rev: v1.1.345
hooks:
- id: pyright
- repo: https://github.com/adrienverge/yamllint
rev: v1.35.1
hooks:
- id: yamllint
args: [-d, relaxed]

View File

@ -1,151 +0,0 @@
# Plan: Make Stack-Orchestrator AI-Friendly
## Goal
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
---
## Part 1: Documentation & Context Files
### 1.1 Add CLAUDE.md
Create a root-level context file for AI assistants.
**File:** `CLAUDE.md`
Contents:
- Project overview (what stack-orchestrator does)
- Stack creation workflow (step-by-step)
- File naming conventions
- Required vs optional fields in stack.yml
- Common patterns and anti-patterns
- Links to example stacks (simple, medium, complex)
### 1.2 Add JSON Schema for stack.yml
Create formal validation schema.
**File:** `schemas/stack-schema.json`
Benefits:
- AI tools can validate generated stacks
- IDEs provide autocomplete
- CI can catch errors early
### 1.3 Add Template Stack with Comments
Create an annotated template for reference.
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
```yaml
# Stack definition template - copy this directory to create a new stack
version: "1.2" # Required: 1.0, 1.1, or 1.2
name: my-stack # Required: lowercase, hyphens only
description: "Human-readable description" # Optional
repos: # Git repositories to clone
- github.com/org/repo
containers: # Container images to build (must have matching container-build/)
- cerc/my-container
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
- my-pod
```
### 1.4 Document Validation Rules
Create explicit documentation of constraints currently scattered in code.
**File:** `docs/stack-format.md`
Contents:
- Container names must start with `cerc/`
- Pod names must match compose file: `docker-compose-{pod}.yml`
- Repository format: `host/org/repo[@ref]`
- Stack directory name should match `name` field
- Version field options and differences
---
## Part 2: Add `create-stack` Command
### 2.1 Command Overview
```bash
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
```
**Behavior:**
1. Parse repo URL to extract app name (if --name not provided)
2. Create `stacks/{name}/stack.yml`
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
4. Create `compose/docker-compose-{name}.yml`
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
### 2.2 Files to Create
| File | Purpose |
|------|---------|
| `stack_orchestrator/create/__init__.py` | Package init |
| `stack_orchestrator/create/create_stack.py` | Command implementation |
### 2.3 Files to Modify
| File | Change |
|------|--------|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
### 2.4 Command Options
| Option | Required | Description |
|--------|----------|-------------|
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
| `--name` | No | Stack name (defaults to repo name) |
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
| `--force` | No | Overwrite existing files |
### 2.5 Template Types
| Type | Base Image | Port | Use Case |
|------|------------|------|----------|
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
| service | python:3.11-slim | 8080 | Python backend services |
| empty | none | none | Custom from scratch |
---
## Part 3: Implementation Summary
### New Files (6)
1. `CLAUDE.md` - AI assistant context
2. `schemas/stack-schema.json` - Validation schema
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
4. `docs/stack-format.md` - Stack format documentation
5. `stack_orchestrator/create/__init__.py` - Package init
6. `stack_orchestrator/create/create_stack.py` - Command implementation
### Modified Files (1)
1. `stack_orchestrator/main.py` - Register create-stack command
---
## Verification
```bash
# 1. Command appears in help
laconic-so --help | grep create-stack
# 2. Dry run works
laconic-so --dry-run create-stack --repo github.com/org/test-app
# 3. Creates all expected files
laconic-so create-stack --repo github.com/org/test-app
ls stack_orchestrator/data/stacks/test-app/
ls stack_orchestrator/data/container-build/cerc-test-app/
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
# 4. Build works with generated stack
laconic-so --stack test-app build-containers
```

121
CLAUDE.md
View File

@ -1,121 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code when working with the stack-orchestrator project.
## Some rules to follow
NEVER speculate about the cause of something
NEVER assume your hypotheses are true without evidence
ALWAYS clearly state when something is a hypothesis
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
ALWAYS run `pre-commit run --all-files` before committing changes
## Key Principles
### Development Guidelines
- **Single responsibility** - Each component has one clear purpose
- **Fail fast** - Let errors propagate, don't hide failures
- **DRY/KISS** - Minimize duplication and complexity
## Development Philosophy: Conversational Literate Programming
### Approach
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
### Core Principles
- **Documentation-First**: All changes begin with discussion of intent and reasoning
- **Narrative-Driven**: Complex systems are explained through conversational exploration
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
### Working Method
1. **Explore and Understand**: Read existing code to understand current state
2. **Discuss Architecture**: Workshop complex design decisions through conversation
3. **Document Intent**: Update TODO.md with clear justification before coding
4. **Explain Changes**: Each modification includes reasoning and context
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
### Implementation Guidelines
- Treat conversations as primary documentation
- Explain architectural decisions before implementing
- Use TODO.md as the "literate document" that justifies all work
- Maintain clear narrative threads across sessions
- Workshop complex ideas before coding
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
## External Stacks Preferred
When creating new stacks for any reason, **use the external stack pattern** rather than adding stacks directly to this repository.
External stacks follow this structure:
```
my-stack/
└── stack-orchestrator/
├── stacks/
│ └── my-stack/
│ ├── stack.yml
│ └── README.md
├── compose/
│ └── docker-compose-my-stack.yml
└── config/
└── my-stack/
└── (config files)
```
### Usage
```bash
# Fetch external stack
laconic-so fetch-stack github.com/org/my-stack
# Use external stack
STACK_PATH=~/cerc/my-stack/stack-orchestrator/stacks/my-stack
laconic-so --stack $STACK_PATH deploy init --output spec.yml
laconic-so --stack $STACK_PATH deploy create --spec-file spec.yml --deployment-dir deployment
laconic-so deployment --dir deployment start
```
### Examples
- `zenith-karma-stack` - Karma watcher deployment
- `urbit-stack` - Fake Urbit ship for testing
- `zenith-desk-stack` - Desk deployment stack
## Architecture: k8s-kind Deployments
### One Cluster Per Host
One Kind cluster per host by design. Never request or expect separate clusters.
- `create_cluster()` in `helpers.py` reuses any existing cluster
- `cluster-id` in deployment.yml is an identifier, not a cluster request
- All deployments share: ingress controller, etcd, certificates
### Stack Resolution
- External stacks detected via `Path(stack).exists()` in `util.py`
- Config/compose resolution: external path first, then internal fallback
- External path structure: `stack_orchestrator/data/stacks/<name>/stack.yml`
### Secret Generation Implementation
- `GENERATE_TOKEN_PATTERN` in `deployment_create.py` matches `$generate:type:length$`
- `_generate_and_store_secrets()` creates K8s Secret
- `cluster_info.py` adds `envFrom` with `secretRef` to containers
- Non-secret config written to `config.env`
### Repository Cloning
`setup-repositories --git-ssh` clones repos defined in stack.yml's `repos:` field. Requires SSH agent.
### Key Files (for codebase navigation)
- `repos/setup_repositories.py`: `setup-repositories` command (git clone)
- `deployment_create.py`: `deploy create` command, secret generation
- `deployment.py`: `deployment start/stop/restart` commands
- `deploy_k8s.py`: K8s deployer, cluster management calls
- `helpers.py`: `create_cluster()`, etcd cleanup, kind operations
- `cluster_info.py`: K8s resource generation (Deployment, Service, Ingress)
## Insights and Observations
### Design Principles
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**

105
README.md
View File

@ -6,7 +6,7 @@ Stack Orchestrator allows building and deployment of a Laconic Stack on a single
## Install ## Install
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-linux.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine. **To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-ubuntu.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.** For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
@ -16,7 +16,6 @@ Ensure that the following are already installed:
- [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go) - [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go)
- [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21` - [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21`
- [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5` - [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5`
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git): `git --version` >= `2.10.3`
Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. : Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. :
@ -29,99 +28,64 @@ chmod +x ~/.docker/cli-plugins/docker-compose
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory. a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable: Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
```bash ```bash
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
``` ```
Give it execute permissions: Give it execute permissions:
```bash ```bash
chmod +x ~/bin/laconic-so chmod +x ~/bin/laconic-so
``` ```
Ensure `laconic-so` is on the [`PATH`](https://unix.stackexchange.com/a/26059) Ensure `laconic-so` is on the [`PATH`](https://unix.stackexchange.com/a/26059)
Verify operation (your version will probably be different, just check here that you see some version output and not an error): Verify operation (your version will probably be different, just check here that you see some version outut and not an error):
``` ```
laconic-so version laconic-so version
Version: 1.1.0-7a607c2-202304260513 Version: v1.0.27-7831078
```
Save the distribution url to `~/.laconic-so/config.yml`:
```bash
mkdir ~/.laconic-so
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
```
### Update
If Stack Orchestrator was installed using the process described above, it is able to subsequently self-update to the current latest version by running:
```bash
laconic-so update
``` ```
## Usage ## Usage
The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: Three sub-commands: `setup-repositories`, `build-containers` and `deploy-system` are generally run in order. The following is a slim example for standing up the `erc20-watcher`. Go further with the [erc20 watcher demo](/app/data/stacks/erc20) and other pieces of the stack, within the [`stacks` directory](/app/data/stacks).
- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) ### Setup Repositories
- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism)
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded)
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo)
## Deployment Types Clone the set of git repositories necessary to build a system:
- **compose**: Docker Compose on local machine
- **k8s**: External Kubernetes cluster (requires kubeconfig)
- **k8s-kind**: Local Kubernetes via Kind - one cluster per host, shared by all deployments
## External Stacks
Stacks can live in external git repositories. Required structure:
```
<repo>/
stack_orchestrator/data/
stacks/<stack-name>/stack.yml
compose/docker-compose-<pod-name>.yml
deployment/spec.yml
```
## Deployment Commands
```bash ```bash
# Create deployment from spec laconic-so --stack erc20 setup-repositories
laconic-so --stack <path> deploy create --spec-file <spec.yml> --deployment-dir <dir>
# Start (creates cluster on first run)
laconic-so deployment --dir <dir> start
# GitOps restart (git pull + redeploy, preserves data)
laconic-so deployment --dir <dir> restart
# Stop
laconic-so deployment --dir <dir> stop
``` ```
## spec.yml Reference This will default to cloning git reposiories into: `~/cerc` or - if set - the environment variable `CERC_REPO_BASE_DIR`
```yaml ### Build Containers
stack: stack-name-or-path
deploy-to: k8s-kind Build the set of docker container images required to run a system. It takes around 10 minutes to build all the containers from scratch.
network:
http-proxy: ```bash
- host-name: app.example.com laconic-so --stack erc20 build-containers
routes: ```
- path: /
proxy-to: service-name:port ### Deploy System
acme-email: admin@example.com
config: Uses `docker compose` to deploy a system (with most recently built container images).
ENV_VAR: value
SECRET_VAR: $generate:hex:32$ # Auto-generated, stored in K8s Secret ```bash
volumes: laconic-so --stack erc20 deploy-system up
volume-name: ```
Check out he GraphQL playground here: [http://localhost:3002/graphql](http://localhost:3002/graphql)
See the [erc20 watcher demo](/app/data/stacks/erc20) to continue further.
### Cleanup
```bash
laconic-so --stack erc20 deploy-system down
``` ```
## Contributing ## Contributing
@ -131,3 +95,4 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
## Platform Support ## Platform Support
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested). Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).

View File

@ -1,413 +0,0 @@
# Implementing `laconic-so create-stack` Command
A plan for adding a new CLI command to scaffold stack files automatically.
---
## Overview
Add a `create-stack` command that generates all required files for a new stack:
```bash
laconic-so create-stack --name my-stack --type webapp
```
**Output:**
```
stack_orchestrator/data/
├── stacks/my-stack/stack.yml
├── container-build/cerc-my-stack/
│ ├── Dockerfile
│ └── build.sh
└── compose/docker-compose-my-stack.yml
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
```
---
## CLI Architecture Summary
### Command Registration Pattern
Commands are Click functions registered in `main.py`:
```python
# main.py (line ~70)
from stack_orchestrator.create import create_stack
cli.add_command(create_stack.command, "create-stack")
```
### Global Options Access
```python
from stack_orchestrator.opts import opts
if not opts.o.quiet:
print("message")
if opts.o.dry_run:
print("(would create files)")
```
### Key Utilities
| Function | Location | Purpose |
|----------|----------|---------|
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
---
## Files to Create
### 1. Command Module
**`stack_orchestrator/create/__init__.py`**
```python
# Empty file to make this a package
```
**`stack_orchestrator/create/create_stack.py`**
```python
import click
import os
from pathlib import Path
from shutil import copy
from stack_orchestrator.opts import opts
from stack_orchestrator.util import error_exit, get_yaml
# Template types
STACK_TEMPLATES = {
"webapp": {
"description": "Web application with Node.js",
"base_image": "node:20-bullseye-slim",
"port": 3000,
},
"service": {
"description": "Backend service",
"base_image": "python:3.11-slim",
"port": 8080,
},
"empty": {
"description": "Minimal stack with no defaults",
"base_image": None,
"port": None,
},
}
def get_data_dir() -> Path:
"""Get path to stack_orchestrator/data directory"""
return Path(__file__).absolute().parent.parent.joinpath("data")
def validate_stack_name(name: str) -> None:
"""Validate stack name follows conventions"""
import re
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
if name.startswith("cerc-"):
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
"""Create stack.yml file"""
config = {
"version": "1.2",
"name": name,
"description": template.get("description", f"Stack: {name}"),
"repos": [repo_url] if repo_url else [],
"containers": [f"cerc/{name}"],
"pods": [name],
}
stack_dir.mkdir(parents=True, exist_ok=True)
with open(stack_dir / "stack.yml", "w") as f:
get_yaml().dump(config, f)
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
"""Create Dockerfile"""
base_image = template.get("base_image", "node:20-bullseye-slim")
port = template.get("port", 3000)
dockerfile_content = f'''# Build stage
FROM {base_image} AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
# Production stage
FROM {base_image}
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY --from=builder /app/dist ./dist
EXPOSE {port}
CMD ["npm", "run", "start"]
'''
container_dir.mkdir(parents=True, exist_ok=True)
with open(container_dir / "Dockerfile", "w") as f:
f.write(dockerfile_content)
def create_build_script(container_dir: Path, name: str) -> None:
"""Create build.sh script"""
build_script = f'''#!/usr/bin/env bash
# Build cerc/{name}
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
docker build -t cerc/{name}:local \\
-f ${{SCRIPT_DIR}}/Dockerfile \\
${{build_command_args}} \\
${{CERC_REPO_BASE_DIR}}/{name}
'''
build_path = container_dir / "build.sh"
with open(build_path, "w") as f:
f.write(build_script)
# Make executable
os.chmod(build_path, 0o755)
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
"""Create docker-compose file"""
port = template.get("port", 3000)
compose_content = {
"version": "3.8",
"services": {
name: {
"image": f"cerc/{name}:local",
"restart": "unless-stopped",
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
"environment": {
"NODE_ENV": "${NODE_ENV:-production}",
},
}
}
}
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
get_yaml().dump(compose_content, f)
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
"""Add entry to a list file if not already present"""
list_path = data_dir / filename
# Read existing entries
existing = set()
if list_path.exists():
with open(list_path, "r") as f:
existing = set(line.strip() for line in f if line.strip())
# Add new entry
if entry not in existing:
with open(list_path, "a") as f:
f.write(f"{entry}\n")
@click.command()
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
@click.option("--type", "stack_type", default="webapp",
type=click.Choice(list(STACK_TEMPLATES.keys())),
help="Stack template type")
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
@click.option("--force", is_flag=True, help="Overwrite existing files")
@click.pass_context
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
"""Create a new stack with all required files.
Examples:
laconic-so create-stack --name my-app --type webapp
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
"""
# Validate
validate_stack_name(name)
template = STACK_TEMPLATES[stack_type]
data_dir = get_data_dir()
# Define paths
stack_dir = data_dir / "stacks" / name
container_dir = data_dir / "container-build" / f"cerc-{name}"
compose_dir = data_dir / "compose"
# Check for existing files
if not force:
if stack_dir.exists():
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
if container_dir.exists():
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
# Dry run check
if opts.o.dry_run:
print(f"Would create stack '{name}' with template '{stack_type}':")
print(f" - {stack_dir}/stack.yml")
print(f" - {container_dir}/Dockerfile")
print(f" - {container_dir}/build.sh")
print(f" - {compose_dir}/docker-compose-{name}.yml")
print(f" - Update repository-list.txt")
print(f" - Update container-image-list.txt")
print(f" - Update pod-list.txt")
return
# Create files
if not opts.o.quiet:
print(f"Creating stack '{name}' with template '{stack_type}'...")
create_stack_yml(stack_dir, name, template, repo)
if opts.o.verbose:
print(f" Created {stack_dir}/stack.yml")
create_dockerfile(container_dir, name, template)
if opts.o.verbose:
print(f" Created {container_dir}/Dockerfile")
create_build_script(container_dir, name)
if opts.o.verbose:
print(f" Created {container_dir}/build.sh")
create_compose_file(compose_dir, name, template)
if opts.o.verbose:
print(f" Created {compose_dir}/docker-compose-{name}.yml")
# Update list files
if repo:
update_list_file(data_dir, "repository-list.txt", repo)
if opts.o.verbose:
print(f" Added {repo} to repository-list.txt")
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
if opts.o.verbose:
print(f" Added cerc/{name} to container-image-list.txt")
update_list_file(data_dir, "pod-list.txt", name)
if opts.o.verbose:
print(f" Added {name} to pod-list.txt")
# Summary
if not opts.o.quiet:
print(f"\nStack '{name}' created successfully!")
print(f"\nNext steps:")
print(f" 1. Edit {stack_dir}/stack.yml")
print(f" 2. Customize {container_dir}/Dockerfile")
print(f" 3. Run: laconic-so --stack {name} build-containers")
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
```
### 2. Register Command in main.py
**Edit `stack_orchestrator/main.py`**
Add import:
```python
from stack_orchestrator.create import create_stack
```
Add command registration (after line ~78):
```python
cli.add_command(create_stack.command, "create-stack")
```
---
## Implementation Steps
### Step 1: Create module structure
```bash
mkdir -p stack_orchestrator/create
touch stack_orchestrator/create/__init__.py
```
### Step 2: Create the command file
Create `stack_orchestrator/create/create_stack.py` with the code above.
### Step 3: Register in main.py
Add the import and `cli.add_command()` line.
### Step 4: Test the command
```bash
# Show help
laconic-so create-stack --help
# Dry run
laconic-so --dry-run create-stack --name test-app --type webapp
# Create a stack
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
# Verify
ls -la stack_orchestrator/data/stacks/test-app/
cat stack_orchestrator/data/stacks/test-app/stack.yml
```
---
## Template Types
| Type | Base Image | Port | Use Case |
|------|------------|------|----------|
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
| `service` | python:3.11-slim | 8080 | Python backend services |
| `empty` | none | none | Custom from scratch |
---
## Future Enhancements
1. **Interactive mode** - Prompt for values if not provided
2. **More templates** - Go, Rust, database stacks
3. **Template from existing** - `--from-stack existing-stack`
4. **External stack support** - Create in custom directory
5. **Validation command** - `laconic-so validate-stack --name my-stack`
---
## Files Modified
| File | Change |
|------|--------|
| `stack_orchestrator/create/__init__.py` | New (empty) |
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
---
## Verification
```bash
# 1. Command appears in help
laconic-so --help | grep create-stack
# 2. Dry run works
laconic-so --dry-run create-stack --name verify-test --type webapp
# 3. Full creation works
laconic-so create-stack --name verify-test --type webapp
ls stack_orchestrator/data/stacks/verify-test/
ls stack_orchestrator/data/container-build/cerc-verify-test/
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
# 4. Build works
laconic-so --stack verify-test build-containers
# 5. Cleanup
rm -rf stack_orchestrator/data/stacks/verify-test
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
```

16
TODO.md
View File

@ -1,16 +0,0 @@
# TODO
## Features Needed
### Update Stack Command
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
## Architecture Refactoring
### Separate Deployer from Stack Orchestrator CLI
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
### Separate Stacks from Stack Orchestrator Repo
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.

View File

@ -1,4 +1,4 @@
# Copyright © 2022, 2023 Vulcanize # Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -15,18 +15,18 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from stack_orchestrator.deploy.deploy import get_stack_status from .deploy_system import get_stack_status
from decouple import config
def get_stack(config, stack): def get_stack(config, stack):
if stack == "package-registry": if stack == "package-registry":
return package_registry_stack(config, stack) return package_registry_stack(config, stack)
else: else:
return default_stack(config, stack) return base_stack(config, stack)
class base_stack(ABC): class base_stack(ABC):
def __init__(self, config, stack): def __init__(self, config, stack):
self.config = config self.config = config
self.stack = stack self.stack = stack
@ -40,27 +40,15 @@ class base_stack(ABC):
pass pass
class default_stack(base_stack):
"""Default stack implementation for stacks without specific handling."""
def ensure_available(self):
return True
def get_url(self):
return None
class package_registry_stack(base_stack): class package_registry_stack(base_stack):
def ensure_available(self): def ensure_available(self):
self.url = "<no registry url set>" self.url = "<no registry url set>"
# Check if we were given an external registry URL # Check if we were given an external registry URL
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment: if url_from_environment:
if self.config.verbose: if self.config.verbose:
print( print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
f"{url_from_environment}"
)
self.url = url_from_environment self.url = url_from_environment
else: else:
# Otherwise we expect to use the local package-registry stack # Otherwise we expect to use the local package-registry stack
@ -73,29 +61,11 @@ class package_registry_stack(base_stack):
# TODO: get url from deploy-stack # TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else: else:
# If not, print a message about how to start it and return fail to the # If not, print a message about how to start it and return fail to the caller
# caller print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
print( print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
"ERROR: The package-registry stack is not running, "
"and no external registry specified with CERC_NPM_REGISTRY_URL"
)
print(
"ERROR: Start the local package registry with: "
"laconic-so --stack package-registry deploy-system up"
)
return False return False
return True return True
def get_url(self): def get_url(self):
return self.url return self.url
def get_npm_registry_url():
# If an auth token is not defined, we assume the default should be the cerc registry
# If an auth token is defined, we assume the local gitea should be used.
default_npm_registry_url = (
"http://gitea.local:3000/api/packages/cerc-io/npm/"
if config("CERC_NPM_AUTH_TOKEN", default=None)
else "https://git.vdb.to/api/packages/cerc-io/npm/"
)
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)

131
app/build_containers.py Normal file
View File

@ -0,0 +1,131 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds or pulls containers for the system components
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import os
import sys
from decouple import config
import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@click.command()
@click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers")
@click.pass_context
def command(ctx, include, exclude):
'''build the set of containers required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
local_stack = ctx.obj.local_stack
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
container_build_dir = Path(__file__).absolute().parent.joinpath("data", "container-build")
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines()
containers_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
containers_in_scope = stack_config['containers']
else:
containers_in_scope = all_containers
if verbose:
print(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
# TODO: make this configurable
container_build_env = {
"CERC_NPM_URL": "http://gitea.local:3000/api/packages/cerc-io/npm/",
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default="<token-not-supplied>"),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": "0"
}
def process_container(container):
if not quiet:
print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")

View File

@ -1,4 +1,4 @@
# Copyright © 2022, 2023 Vulcanize # Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -25,25 +25,17 @@ from decouple import config
import click import click
import importlib.resources import importlib.resources
from python_on_whales import docker, DockerException from python_on_whales import docker, DockerException
from stack_orchestrator.base import get_stack from .base import get_stack
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local" builder_js_image_name = "cerc/builder-js:local"
@click.command() @click.command()
@click.option("--include", help="only build these packages") @click.option('--include', help="only build these packages")
@click.option("--exclude", help="don't build these packages") @click.option('--exclude', help="don\'t build these packages")
@click.option(
"--force-rebuild",
is_flag=True,
default=False,
help="Override existing target package version check -- force rebuild",
)
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context @click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args): def command(ctx, include, exclude):
"""build the set of npm packages required for a complete stack""" '''build the set of npm packages required for a complete stack'''
quiet = ctx.obj.quiet quiet = ctx.obj.quiet
verbose = ctx.obj.verbose verbose = ctx.obj.verbose
@ -70,53 +62,44 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print( print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
f"{dev_root_path}"
)
else: else:
dev_root_path = os.path.expanduser( dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
config("CERC_REPO_BASE_DIR", default="~/cerc")
)
build_root_path = os.path.join(dev_root_path, "build-trees") build_root_path = os.path.join(dev_root_path, "build-trees")
if verbose: if verbose:
print(f"Dev Root is: {dev_root_path}") print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
print("Dev root directory doesn't exist, creating") print('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path) os.makedirs(dev_root_path)
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
print("Build root directory doesn't exist, creating") print('Build root directory doesn\'t exist, creating')
os.makedirs(build_root_path) os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from . import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
with importlib.resources.open_text(
data, "npm-package-list.txt"
) as package_list_file:
all_packages = package_list_file.read().splitlines() all_packages = package_list_file.read().splitlines()
packages_in_scope = [] packages_in_scope = []
if stack: if stack:
stack_config = get_parsed_stack_config(stack) stack_config = get_parsed_stack_config(stack)
# TODO: syntax check the input here # TODO: syntax check the input here
packages_in_scope = stack_config["npms"] packages_in_scope = stack_config['npms']
else: else:
packages_in_scope = all_packages packages_in_scope = all_packages
if verbose: if verbose:
print(f"Packages: {packages_in_scope}") print(f'Packages: {packages_in_scope}')
def build_package(package): def build_package(package):
if not quiet: if not quiet:
print(f"Building npm package: {package}") print(f"Building npm package: {package}")
repo_dir = package repo_dir = package
repo_full_path = os.path.join(dev_root_path, repo_dir) repo_full_path = os.path.join(dev_root_path, repo_dir)
# Copy the repo and build that to avoid propagating # Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
# JS tooling file changes back into the cloned repo
repo_copy_path = os.path.join(build_root_path, repo_dir) repo_copy_path = os.path.join(build_root_path, repo_dir)
# First delete any old build tree # First delete any old build tree
if os.path.isdir(repo_copy_path): if os.path.isdir(repo_copy_path):
@ -129,63 +112,37 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}") print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
if not dry_run: if not dry_run:
copytree(repo_full_path, repo_copy_path) copytree(repo_full_path, repo_copy_path)
build_command = [ build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
"sh",
"-c",
"cd /workspace && "
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
]
if not dry_run: if not dry_run:
if verbose: if verbose:
print(f"Executing: {build_command}") print(f"Executing: {build_command}")
# Originally we used the PEP 584 merge operator: # Originally we used the PEP 584 merge operator:
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | # envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) # but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
# but that isn't available in Python 3.8 (default in Ubuntu 20) envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token}
# so for now we use dict.update:
envs = {
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
# Convention used by our web app packages
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
}
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update(
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
if extra_build_args
else {}
)
try: try:
docker.run( docker.run(builder_js_image_name,
builder_js_image_name,
remove=True, remove=True,
interactive=True, interactive=True,
tty=True, tty=True,
user=f"{os.getuid()}:{os.getgid()}", user=f"{os.getuid()}:{os.getgid()}",
envs=envs, envs=envs,
# TODO: detect this host name in npm_registry_url # TODO: detect this host name in npm_registry_url rather than hard-wiring it
# rather than hard-wiring it
add_hosts=[("gitea.local", "host-gateway")], add_hosts=[("gitea.local", "host-gateway")],
volumes=[(repo_copy_path, "/workspace")], volumes=[(repo_copy_path, "/workspace")],
command=build_command, command=build_command
) )
# Note that although the docs say that build_result should # Note that although the docs say that build_result should contain
# contain the command output as a string, in reality it is # the command output as a string, in reality it is always the empty string.
# always the empty string. Since we detect errors via catching # Since we detect errors via catching exceptions below, we can safely ignore it here.
# exceptions below, we can safely ignore it here.
except DockerException as e: except DockerException as e:
print(f"Error executing build for {package} in container:\n {e}") print(f"Error executing build for {package} in container:\n {e}")
if not continue_on_error: if not continue_on_error:
print( print("FATAL Error: build failed and --continue-on-error not set, exiting")
"FATAL Error: build failed and --continue-on-error "
"not set, exiting"
)
sys.exit(1) sys.exit(1)
else: else:
print( print("****** Build Error, continuing because --continue-on-error is set")
"****** Build Error, continuing because "
"--continue-on-error is set"
)
else: else:
print("Skipped") print("Skipped")
@ -203,12 +160,6 @@ def _ensure_prerequisites():
# Tell the user how to build it if not # Tell the user how to build it if not
images = docker.image.list(builder_js_image_name) images = docker.image.list(builder_js_image_name)
if len(images) == 0: if len(images) == 0:
print( print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
f"FATAL: builder image: {builder_js_image_name} is required " print("Please run this command to create it: laconic-so --stack build-support build-containers")
"but was not found"
)
print(
"Please run this command to create it: "
"laconic-so --stack build-support build-containers"
)
sys.exit(1) sys.exit(1)

View File

@ -2,34 +2,30 @@ version: '3.7'
services: services:
fixturenet-eth-bootnode-geth: fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth hostname: fixturenet-eth-bootnode-geth
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_bootnode_geth_data:/root/ethdata
ports: ports:
- "9898" - "9898"
- "30303" - "30303"
fixturenet-eth-geth-1: fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1 hostname: fixturenet-eth-geth-1
cap_add: cap_add:
- SYS_PTRACE - SYS_PTRACE
environment: environment:
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect} CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1 CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
volumes: volumes:
- fixturenet_eth_geth_1_data:/root/ethdata - fixturenet_geth_accounts:/opt/testnet/build/el
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s interval: 30s
@ -40,12 +36,10 @@ services:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
ports: ports:
- "8545" - "8545"
- "8546"
- "40000" - "40000"
- "6060" - "6060"
fixturenet-eth-geth-2: fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2 hostname: fixturenet-eth-geth-2
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
@ -53,28 +47,19 @@ services:
timeout: 10s timeout: 10s
retries: 10 retries: 10
start_period: 3s start_period: 3s
environment:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
depends_on: depends_on:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
volumes:
- fixturenet_eth_geth_2_data:/root/ethdata
ports:
- "8545"
- "8546"
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse hostname: fixturenet-eth-bootnode-lighthouse
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1: fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1 hostname: fixturenet-eth-lighthouse-1
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -89,8 +74,6 @@ services:
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545" ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551" EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_1_data:/opt/testnet/build/cl
depends_on: depends_on:
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
condition: service_started condition: service_started
@ -100,7 +83,6 @@ services:
- "8001" - "8001"
fixturenet-eth-lighthouse-2: fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2 hostname: fixturenet-eth-lighthouse-2
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -116,8 +98,6 @@ services:
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551" EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0" LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_2_data:/opt/testnet/build/cl
depends_on: depends_on:
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
condition: service_started condition: service_started
@ -125,8 +105,4 @@ services:
condition: service_healthy condition: service_healthy
volumes: volumes:
fixturenet_eth_bootnode_geth_data: fixturenet_geth_accounts:
fixturenet_eth_geth_1_data:
fixturenet_eth_geth_2_data:
fixturenet_eth_lighthouse_1_data:
fixturenet_eth_lighthouse_2_data:

View File

@ -0,0 +1,6 @@
services:
laconic-console:
restart: unless-stopped
image: cerc/laconic-console-host:local
ports:
- "80"

View File

@ -1,33 +1,25 @@
version: "3.2"
services: services:
laconicd: laconicd:
restart: unless-stopped restart: unless-stopped
image: cerc/laconicd:local image: cerc/laconicd:local
command: ["bash", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
environment:
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED:-false}
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY:-false}
ONBOARDING_ENABLED: ${ONBOARDING_ENABLED:-false}
volumes: volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd
# TODO: look at folding these scripts into the container # TODO: look at folding these scripts into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed # TODO: determine which of the ports below is really needed
ports: ports:
- "6060" - "6060"
- "26657" - "26657"
- "26656" - "26656"
- "9473" - "9473:9473"
- "8545"
- "8546"
- "9090" - "9090"
- "9091"
- "1317" - "1317"
cli: cli:
image: cerc/laconic-registry-cli:local image: cerc/laconic-registry-cli:local
volumes: volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
- ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli
volumes:
laconicd-data:

View File

@ -0,0 +1,116 @@
version: '3.7'
services:
# Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts)
# Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts:
hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file:
- ../config/fixturenet-optimism/l1-params.env
# Waits for L1 endpoint to be up before running the script
command: |
"./wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- ./run.sh"
volumes:
- ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- fixturenet_geth_accounts:/geth-accounts:ro
- l2_accounts:/l2-accounts
- l1_deployment:/app/packages/contracts-bedrock
extra_hosts:
- "host.docker.internal:host-gateway"
# Generates the config files required for L2 (outputs to volume l2_config)
op-node-l2-config-gen:
image: cerc/optimism-op-node:local
depends_on:
fixturenet-optimism-contracts:
condition: service_completed_successfully
env_file:
- ../config/fixturenet-optimism/l1-params.env
volumes:
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/contracts-bedrock:ro
- l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh"
command: "/run-op-geth.sh"
ports:
- "0.0.0.0:8545:8545"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
env_file:
- ../config/fixturenet-optimism/l1-params.env
depends_on:
op-geth:
condition: service_healthy
image: cerc/optimism-op-node:local
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
- "0.0.0.0:8547:8547"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8547"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher:
env_file:
- ../config/fixturenet-optimism/l1-params.env
depends_on:
op-node:
condition: service_healthy
op-geth:
condition: service_healthy
image: cerc/optimism-op-batcher:local
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh
- l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the batcher
command: |
"/wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- /run-op-batcher.sh"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
fixturenet_geth_accounts:
l1_deployment:
l2_accounts:
l2_config:
l2_geth_data:

View File

@ -1,7 +1,6 @@
# Add-on pod to include foundry tooling within a fixturenet # Add-on pod to include foundry tooling within a fixturenet
services: services:
foundry: foundry:
restart: always
image: cerc/foundry:local image: cerc/foundry:local
command: ["while :; do sleep 600; done"] command: ["while :; do sleep 600; done"]
volumes: volumes:

View File

@ -7,9 +7,11 @@ services:
condition: service_healthy condition: service_healthy
image: cerc/ipld-eth-server:local image: cerc/ipld-eth-server:local
environment: environment:
SERVER_HTTP_PATH: 0.0.0.0:8081 IPLD_SERVER_GRAPHQL: "true"
SERVER_GRAPHQL: "true" IPLD_POSTGRAPHILEPATH: http://graphql:5000
SERVER_GRAPHQLPATH: 0.0.0.0:8082 ETH_SERVER_HTTPPATH: 0.0.0.0:8081
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve" VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json" ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing DATABASE_NAME: cerc_testing
@ -25,8 +27,8 @@ services:
PROM_HTTP: "true" PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0" PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090" PROM_HTTP_PORT: "8090"
LOG_LEVEL: "debug" LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
volumes: volumes:
- type: bind - type: bind
source: ../config/ipld-eth-server/chain.json source: ../config/ipld-eth-server/chain.json

View File

@ -0,0 +1,13 @@
version: "3.2"
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
services:
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
restart: always
volumes:
- ./ipfs/import:/import
- ./ipfs/data:/data/ipfs
ports:
- "8080"
- "4001"
- "5001"

View File

@ -14,3 +14,4 @@ services:
- "9090" - "9090"
- "9091" - "9091"
- "1317" - "1317"

View File

@ -0,0 +1,31 @@
version: '3.2'
services:
# Builds and serves the MobyMask react-app
mobymask-app:
image: cerc/mobymask-ui:local
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
# Waits for watcher server to be up before app build
# Required when running with watcher stack to get deployed contract address
command:
- sh
- -c
- ./wait-for-it.sh -h $${WATCHER_HOST} -p $${WATCHER_PORT} -s -t 0 -- ./mobymask-app-start.sh
volumes:
- ../config/wait-for-it.sh:/app/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/app/mobymask-app-start.sh
- mobymask_deployment:/server
ports:
- "0.0.0.0:3002:3000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
shm_size: '1GB'
volumes:
mobymask_deployment:

View File

@ -0,0 +1,21 @@
version: '3.2'
services:
peer-test-app:
# Builds and serves the peer-test react-app
image: cerc/react-peer:local
working_dir: /app/packages/test-app
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
command: ["sh", "./test-app-start.sh"]
volumes:
- ../config/watcher-mobymask-v2/test-app-config.json:/app/packages/test-app/src/test-app-config.json
- ../config/watcher-mobymask-v2/test-app-start.sh:/app/packages/test-app/test-app-start.sh
ports:
- "0.0.0.0:3003:3000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s

View File

@ -0,0 +1,7 @@
version: "3.2"
services:
test:
image: cerc/test-container:local
restart: always
ports:
- "80"

View File

@ -34,7 +34,7 @@ services:
- ETH_RPC_URL=http://go-ethereum:8545 - ETH_RPC_URL=http://go-ethereum:8545
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-erc20/erc20-watcher.toml:/app/environments/local.toml - ../config/watcher-erc20/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3002:3001" - "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001" - "0.0.0.0:9002:9001"

View File

@ -0,0 +1,90 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watcher
mobymask-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue
- POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
# Deploys MobyMask contract and generates an invite link
# Deployment is skipped if DEPLOYED_CONTRACT env is already set
mobymask:
image: cerc/mobymask:local
working_dir: /app/packages/server
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
- ENV=PROD
# Waits for L2 Optimism Geth and Node servers to be up before deploying contract
command:
- sh
- -c
- |
./wait-for-it.sh -h $${L2_GETH_HOST} -p $${L2_GETH_PORT} -s -t 0 && \
./wait-for-it.sh -h $${L2_NODE_HOST} -p $${L2_NODE_PORT} -s -t 0 && \
./deploy-and-generate-invite.sh
volumes:
- ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh
- ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json
- ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh
- mobymask_deployment:/app/packages/server
- fixturenet_geth_accounts:/geth-accounts:ro
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the mobymask-v2-watcher server
mobymask-watcher-server:
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
mobymask:
condition: service_completed_successfully
image: cerc/watcher-mobymask-v2:local
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
command: ["sh", "start-server.sh"]
volumes:
- ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml
- ../config/watcher-mobymask-v2/peer.env:/app/packages/peer/.env
- ../config/watcher-mobymask-v2/relay-id.json:/app/packages/mobymask-v2-watcher/relay-id.json
- ../config/watcher-mobymask-v2/peer-id.json:/app/packages/mobymask-v2-watcher/peer-id.json
- ../config/watcher-mobymask-v2/start-server.sh:/app/packages/mobymask-v2-watcher/start-server.sh
- mobymask_deployment:/server
- fixturenet_geth_accounts:/geth-accounts:ro
# Expose GQL, metrics and relay node ports
ports:
- "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001"
- "0.0.0.0:9090:9090"
healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
mobymask_watcher_db_data:
mobymask_deployment:
fixturenet_geth_accounts:

View File

@ -24,37 +24,15 @@ services:
retries: 15 retries: 15
start_period: 10s start_period: 10s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
mobymask-watcher-server: mobymask-watcher-server:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
mobymask-watcher-db: mobymask-watcher-db:
condition: service_healthy condition: service_healthy
mobymask-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-mobymask:local image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml - ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3001:3001" - "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001" - "0.0.0.0:9001:9001"
@ -67,5 +45,21 @@ services:
retries: 15 retries: 15
start_period: 5s start_period: 5s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-server:
condition: service_healthy
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
volumes: volumes:
mobymask_watcher_db_data: mobymask_watcher_db_data:

View File

@ -17,12 +17,7 @@ CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing" CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm" CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password" CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18} CERC_STATEDIFF_DB_GOOSE_MIN_VER=23
CERC_STATEDIFF_DB_LOG_STATEMENTS="${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false}" CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5" CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
# Used by Lighthouse
SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-3}

View File

@ -0,0 +1,118 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -1,9 +1,7 @@
services: services:
registry: cns:
rpcEndpoint: 'http://laconicd:26657' restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api' gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY userKey: REPLACE_WITH_MYKEY
bondId: bondId:
chainId: laconic_9000-1 chainId: laconic_9000-1
gas: 350000
fees: 2000000alnt

View File

@ -0,0 +1,35 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
--outfile.l2 /app/genesis.json \
--outfile.rollup /app/rollup.json \
--l1-rpc $L1_RPC
openssl rand -hex 32 > /app/jwt.txt

View File

@ -0,0 +1,14 @@
# Change if pointing to an external L1 endpoint
# L1 endpoint
L1_CHAIN_ID=1212
L1_RPC="http://fixturenet-eth-geth-1:8545"
L1_HOST="fixturenet-eth-geth-1"
L1_PORT=8545
# Credentials for accounts on L1 to send balance to Optimism Proxy contract from
# (enables them to do transactions on L2)
L1_ADDRESS=
L1_PRIV_KEY=
L1_ADDRESS_2=
L1_PRIV_KEY_2=

View File

@ -0,0 +1,120 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
echo "Using L1 RPC endpoint ${L1_RPC}"
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
# Read JSON file into variable
KEYS_JSON=$(cat /l2-accounts/keys.json)
# Parse JSON into variables
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Read the private key of L1 accounts
if [ -f /geth-accounts/accounts.csv ]; then
echo "Using L1 account credentials from the mounted volume"
L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Using L1 account credentials from env"
fi
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${L1_PRIV_KEY}" --network getting-started
echo "Balances sent to L2 accounts"
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
echo "Updated the deployment config"
# Create a .env file
echo "L1_RPC=$L1_RPC" > .env
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
echo "Deploying the L1 smart contracts, this will take a while..."
# Deploy the L1 smart contracts
yarn hardhat deploy --network getting-started
echo "Deployed the L1 smart contracts"
# Read Proxy contract's JSON and get the address
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
# First account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${L1_PRIV_KEY}" --network getting-started
# Second account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${L1_PRIV_KEY_2}" --network getting-started
echo "Balance sent to Proxy L2 contract"
echo "Use following accounts for transactions in L2:"
echo "${L1_ADDRESS}"
echo "${L1_ADDRESS_2}"
echo "Done"

View File

@ -0,0 +1,36 @@
const fs = require('fs')
// Get the command-line argument
const configFile = process.argv[2]
const adminAddress = process.argv[3]
const proposerAddress = process.argv[4]
const batcherAddress = process.argv[5]
const sequencerAddress = process.argv[6]
const blockHash = process.argv[7]
// Read the JSON file
const configData = fs.readFileSync(configFile)
const configObj = JSON.parse(configData)
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
configObj.finalSystemOwner =
configObj.portalGuardian =
configObj.controller =
configObj.l2OutputOracleChallenger =
configObj.proxyAdminOwner =
configObj.baseFeeVaultRecipient =
configObj.l1FeeVaultRecipient =
configObj.sequencerFeeVaultRecipient =
configObj.governanceTokenOwner =
adminAddress
configObj.l2OutputOracleProposer = proposerAddress
configObj.batchSenderAddress = batcherAddress
configObj.p2pSequencerAddress = sequencerAddress
configObj.l1StartingBlockTag = blockHash
// Write the updated JSON object back to the file
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))

View File

@ -4,16 +4,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" # Get BACTHER_KEY from keys.json
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
# Start op-batcher
L2_RPC="http://op-geth:8545"
ROLLUP_RPC="http://op-node:8547"
BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey)
op-batcher \ op-batcher \
--l2-eth-rpc=$L2_RPC \ --l2-eth-rpc=http://op-geth:8545 \
--rollup-rpc=$ROLLUP_RPC \ --rollup-rpc=http://op-node:8547 \
--poll-interval=1s \ --poll-interval=1s \
--sub-safety-margin=6 \ --sub-safety-margin=6 \
--num-confirmations=1 \ --num-confirmations=1 \
@ -23,5 +19,6 @@ op-batcher \
--rpc.port=8548 \ --rpc.port=8548 \
--rpc.enable-admin \ --rpc.enable-admin \
--max-channel-duration=1 \ --max-channel-duration=1 \
--l1-eth-rpc=$CERC_L1_RPC \ --target-l1-tx-size-bytes=2048 \
--private-key="${BATCHER_KEY#0x}" --l1-eth-rpc=$L1_RPC \
--private-key=$BATCHER_KEY

View File

@ -0,0 +1,77 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO: Add in container build or use other tool
echo "Installing jq"
apk update && apk add jq
# Get SEQUENCER key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Initialize op-geth if datadir/geth not found
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
echo "Found existing datadir, checking block signer key"
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
echo "Sequencer and block signer keys match, skipping initialization"
else
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
exit 1
fi
else
echo "Initializing op-geth"
mkdir -p datadir
echo "pwd" > datadir/password
echo $SEQUENCER_KEY > datadir/block-signer-key
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
while [ ! -f "/op-node/jwt.txt" ]
do
echo "Config files not created. Checking after 5 seconds."
sleep 5
done
echo "Config files created by op-node, proceeding with the initialization..."
geth init --datadir=datadir /op-node/genesis.json
echo "Node Initialized"
fi
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
# Run op-geth
geth \
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=full \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=/op-node/jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=$SEQUENCER_ADDRESS \
--unlock=$SEQUENCER_ADDRESS

View File

@ -0,0 +1,23 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Get SEQUENCER KEY from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
op-node \
--l2=http://op-geth:8551 \
--l2.jwt-secret=/op-node-data/jwt.txt \
--sequencer.enabled \
--sequencer.l1-confs=3 \
--verifier.l1-confs=3 \
--rollup.config=/op-node-data/rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=$SEQUENCER_KEY \
--l1=$L1_RPC \
--l1.rpckind=any

View File

@ -0,0 +1,69 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
echo "Using L2 RPC endpoint ${L2_GETH_RPC}"
if [ -f /geth-accounts/accounts.csv ]; then
echo "Using L1 private key from the mounted volume"
# Read the private key of L1 account to deploy contract
PRIVATE_KEY_DEPLOYER=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
else
echo "Using PRIVATE_KEY_DEPLOYER from env"
fi
# Set the private key
jq --arg privateKey "$PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json
# Set the RPC URL
jq --arg rpcUrl "$L2_GETH_RPC" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
# Set the MobyMask app base URI
jq --arg baseURI "$MOBYMASK_APP_BASE_URI" '.baseURI = $baseURI' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
export RPC_URL="${L2_GETH_RPC}"
# Check if DEPLOYED_CONTRACT environment variable set to skip contract deployment
if [[ -n "$DEPLOYED_CONTRACT" ]]; then
echo "DEPLOYED_CONTRACT is set to '$DEPLOYED_CONTRACT'"
echo "Exiting without deploying contract"
exit 0
fi
# Check and exit if a deployment already exists (on restarts)
if [ -f ./config.json ]; then
echo "config.json already exists, checking the contract deployment"
# Read JSON file
DEPLOYMENT_DETAILS=$(cat config.json)
CONTRACT_ADDRESS=$(echo "$DEPLOYMENT_DETAILS" | jq -r '.address')
cd ../hardhat
if yarn verifyDeployment --network optimism --contract "${CONTRACT_ADDRESS}"; then
echo "Deployment verfication successful"
cd ../server
else
echo "Deployment verfication failed, please clear MobyMask deployment volume before starting"
exit 1
fi
fi
# Wait until balance for deployer account is updated
cd ../hardhat
while true; do
ACCOUNT_BALANCE=$(yarn balance --network optimism "$PRIVATE_KEY_DEPLOYER" | grep ETH)
if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then
echo "Account balance updated: $ACCOUNT_BALANCE"
break # exit the loop
fi
echo "Account balance not updated: $ACCOUNT_BALANCE"
echo "Checking after 2 seconds"
sleep 2
done
cd ../server
npm run deployAndGenerateInvite

View File

@ -0,0 +1,9 @@
{
"name": "MobyMask",
"relayNodes": [
"/ip4/127.0.0.1/tcp/9090/ws/p2p/12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t"
],
"peer": {
"enableDebugInfo": true
}
}

View File

@ -0,0 +1,25 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Use config from mounted volume if available (when running web-app along with watcher stack)
if [ -f /server/config.json ]; then
echo "Merging config for deployed contract from mounted volume"
# Merging config files to get deployed contract address
jq -s '.[0] * .[1]' /app/src/mobymask-app-config.json /server/config.json > /app/src/config.json
else
echo "Setting deployed contract details from env"
# Set config values from environment variables
jq --arg address "$DEPLOYED_CONTRACT" \
--argjson chainId $CHAIN_ID \
--argjson relayNodes "$RELAY_NODES" \
'.address = $address | .chainId = $chainId | .relayNodes = $relayNodes' \
/app/src/mobymask-app-config.json > /app/src/config.json
fi
REACT_APP_WATCHER_URI="$APP_WATCHER_URL/graphql" npm run build
serve -s build

View File

@ -0,0 +1,20 @@
# Change if pointing web app to external watcher endpoint
WATCHER_HOST="mobymask-watcher-server"
WATCHER_PORT=3001
APP_WATCHER_URL="http://localhost:3001"
# Base URI for mobymask-app (used for generating invite)
MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#"
# Set to false for disabling watcher peer to send txs to L2
ENABLE_PEER_L2_TXS=true
# Set deployed MobyMask contract address to avoid deploying contract in stack
# mobymask-app will use this contract address in config if run separately
DEPLOYED_CONTRACT=
# Chain ID is used by mobymask web-app for txs
CHAIN_ID=42069
# Set relay nodes to be used by web-apps
RELAY_NODES=["/ip4/127.0.0.1/tcp/9090/ws/p2p/12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t"]

View File

@ -0,0 +1,13 @@
# Change if pointing to an external optimism geth endpoint
# L2 endpoints
L2_GETH_RPC="http://op-geth:8545"
L2_GETH_HOST="op-geth"
L2_GETH_PORT=8545
L2_NODE_HOST="op-node"
L2_NODE_PORT=8547
# Credentials for accounts to perform txs on L2
PRIVATE_KEY_DEPLOYER=
PRIVATE_KEY_PEER=

View File

@ -0,0 +1,5 @@
{
"id": "12D3KooWK6myjc8r1KBnfP9igp31qJkPaVfsKDjKrjoSefV5SDEo",
"privKey": "CAESQJMHbMaH+UEOtjGOzXYtoPO/cdHakCtN1hcnknIWzx/6ie1lxb+8kfzBjwt7apfj8fHlTCYSIVK8Q2AWu9a2h3g=",
"pubKey": "CAESIIntZcW/vJH8wY8Le2qX4/Hx5UwmEiFSvENgFrvWtod4"
}

View File

@ -0,0 +1 @@
RELAY="/ip4/127.0.0.1/tcp/9090/ws/p2p/12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t"

View File

@ -0,0 +1,5 @@
{
"id": "12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t",
"privKey": "CAESQGsqG5o4VlWJZM9XlA3MjabyZOXWQ2MLZU5AhBQsjXGt9iSlGtTuNOrHX5xSRgLBxLuMoqWsjGxE/dDB9c46RI8=",
"pubKey": "CAESIPYkpRrU7jTqx1+cUkYCwcS7jKKlrIxsRP3QwfXOOkSP"
}

Some files were not shown because too many files have changed in this diff Show More