Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
88db2ff766 | ||
|
5f556e127a | ||
|
926997b21c | ||
|
0d91a62f84 | ||
|
7d27eaef0f | ||
|
8ad2b692ec | ||
|
54cc993fa4 |
47
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
47
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
name: Fixturenet-Eth-Plugeth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
48
.gitea/workflows/fixturenet-eth-test.yml
Normal file
48
.gitea/workflows/fixturenet-eth-test.yml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
name: Fixturenet-Eth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
|
@ -6,12 +6,15 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
||||||
schedule:
|
|
||||||
- cron: '1 13 * * *'
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run Laconicd fixturenet and Laconic CLI tests"
|
name: "Run an Laconicd fixturenet test"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Update'
|
- name: 'Update'
|
||||||
@ -39,28 +42,14 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run fixturenet-laconicd tests"
|
- name: "Run fixturenet-laconicd tests"
|
||||||
run: ./tests/fixturenet-laconicd/run-test.sh
|
run: ./tests/fixturenet-laconicd/run-test.sh
|
||||||
- name: "Run laconic CLI tests"
|
|
||||||
run: ./tests/fixturenet-laconicd/run-cli-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
@ -1,37 +0,0 @@
|
|||||||
name: Lint Checks
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: '*'
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run linter"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: "Install Python"
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name : "Run flake8"
|
|
||||||
uses: py-actions/flake8@v2
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
@ -54,19 +54,3 @@ jobs:
|
|||||||
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
|
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
|
||||||
draft: ${{ endsWith('publish-test', github.ref ) }}
|
draft: ${{ endsWith('publish-test', github.ref ) }}
|
||||||
files: ./laconic-so
|
files: ./laconic-so
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
name: Container Registry Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-container-registry'
|
|
||||||
- '.gitea/workflows/test-container-registry.yml'
|
|
||||||
- 'tests/container-registry/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '6 19 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run contaier registry hosting test on kind/k8s"
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv==1.0.6
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Check cgroups version"
|
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Install ed" # Only needed until we remove the need to edit the spec file
|
|
||||||
run: apt update && apt install -y ed
|
|
||||||
- name: "Run container registry deployment test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/container-registry/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -1,67 +0,0 @@
|
|||||||
name: Database Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-database'
|
|
||||||
- '.gitea/workflows/test-database.yml'
|
|
||||||
- 'tests/database/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '5 18 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run database hosting test on kind/k8s"
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv==1.0.6
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Check cgroups version"
|
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run database deployment test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/database/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -10,6 +10,9 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -33,26 +36,14 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run deploy tests"
|
- name: "Run deploy tests"
|
||||||
run: ./tests/deploy/run-deploy-test.sh
|
run: ./tests/deploy/run-deploy-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
name: External Stack Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-external-stack'
|
|
||||||
- '.gitea/workflows/test-external-stack.yml'
|
|
||||||
- 'tests/external-stack/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '8 19 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run external stack test suite"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv==1.0.6
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Run external stack tests"
|
|
||||||
run: ./tests/external-stack/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -4,19 +4,20 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches:
|
||||||
paths:
|
- main
|
||||||
- '!**'
|
- ci-test
|
||||||
- '.gitea/workflows/triggers/test-k8s-deploy'
|
paths-ignore:
|
||||||
- '.gitea/workflows/test-k8s-deploy.yml'
|
- '.gitea/workflows/triggers/*'
|
||||||
- 'tests/k8s-deploy/run-deploy-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
- cron: '3 15 * * *'
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run deploy test suite on kind/k8s"
|
name: "Run deploy test suite"
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -35,35 +36,20 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Check cgroups version"
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment test"
|
|
||||||
run: |
|
run: |
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
join_cgroup
|
sleep 5
|
||||||
./tests/k8s-deploy/run-deploy-test.sh
|
- name: "Install Go"
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
uses: actions/setup-go@v4
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
with:
|
||||||
status: ${{ job.status }}
|
go-version: '1.21'
|
||||||
notify_when: 'failure'
|
- name: "Install Kind"
|
||||||
env:
|
run: go install sigs.k8s.io/kind@v0.20.0
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
- name: "Debug Kind"
|
||||||
- name: Notify DeepStack Slack on CI failure
|
run: kind create cluster --retain && docker logs kind-control-plane
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
name: K8s Deployment Control Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: '*'
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
|
||||||
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
|
||||||
- 'tests/k8s-deployment-control/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '3 30 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run deployment control suite on kind/k8s"
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv==1.0.6
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Check cgroups version"
|
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment control test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/k8s-deployment-control/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -10,6 +10,10 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run webapp test suite"
|
name: "Run webapp test suite"
|
||||||
@ -32,28 +36,14 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
run: apt update && apt install -y wget
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run webapp tests"
|
- name: "Run webapp tests"
|
||||||
run: ./tests/webapp-test/run-webapp-test.sh
|
run: ./tests/webapp-test/run-webapp-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
@ -10,6 +10,9 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '.gitea/workflows/triggers/*'
|
- '.gitea/workflows/triggers/*'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -33,26 +36,16 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run smoke tests"
|
- name: "Run smoke tests"
|
||||||
run: ./tests/smoke-test/run-smoke-test.sh
|
run: ./tests/smoke-test/run-smoke-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
|
2
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
2
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||||
|
trigger
|
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
@ -1,10 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-laconicd-test CI job
|
Change this file to trigger running the fixturenet-laconicd-test CI job
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the test-database CI job
|
|
||||||
Trigger test run
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the external-stack CI job
|
|
||||||
trigger
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the test-k8s-deploy CI job
|
|
||||||
Trigger test on PR branch
|
|
@ -29,10 +29,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose
|
|||||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||||
|
|
||||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
```
|
```
|
||||||
|
|
||||||
Give it execute permissions:
|
Give it execute permissions:
|
||||||
@ -52,7 +52,7 @@ Version: 1.1.0-7a607c2-202304260513
|
|||||||
Save the distribution url to `~/.laconic-so/config.yml`:
|
Save the distribution url to `~/.laconic-so/config.yml`:
|
||||||
```bash
|
```bash
|
||||||
mkdir ~/.laconic-so
|
mkdir ~/.laconic-so
|
||||||
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
|
echo "distribution-url: https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so" > ~/.laconic-so/config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Update
|
### Update
|
||||||
|
@ -26,7 +26,7 @@ In addition to the pre-requisites listed in the [README](/README.md), the follow
|
|||||||
|
|
||||||
1. Clone this repository:
|
1. Clone this repository:
|
||||||
```
|
```
|
||||||
$ git clone https://git.vdb.to/cerc-io/stack-orchestrator.git
|
$ git clone https://github.com/cerc-io/stack-orchestrator.git
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Enter the project directory:
|
2. Enter the project directory:
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Adding a new stack
|
# Adding a new stack
|
||||||
|
|
||||||
See [this PR](https://git.vdb.to/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://git.vdb.to/cerc-io/stack-orchestrator/pull/435) is another good example.
|
See [this PR](https://github.com/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://github.com/cerc-io/stack-orchestrator/pull/435) is another good example.
|
||||||
|
|
||||||
For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done.
|
For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done.
|
||||||
|
|
||||||
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://github.com/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ $ laconic-so build-npms --include <package-name>
|
|||||||
```
|
```
|
||||||
e.g.
|
e.g.
|
||||||
```
|
```
|
||||||
$ laconic-so build-npms --include registry-sdk
|
$ laconic-so build-npms --include laconic-sdk
|
||||||
```
|
```
|
||||||
Build the packages for a stack:
|
Build the packages for a stack:
|
||||||
```
|
```
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
# Fetching pre-built container images
|
|
||||||
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
|
||||||
|
|
||||||
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
|
||||||
## Usage
|
|
||||||
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
|
||||||
```
|
|
||||||
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
|
||||||
```
|
|
@ -56,7 +56,7 @@ laconic-so --stack fixturenet-laconicd build-npms
|
|||||||
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
||||||
|
|
||||||
- `@cerc-io/laconic-registry-cli`
|
- `@cerc-io/laconic-registry-cli`
|
||||||
- `@cerc-io/registry-sdk`
|
- `@cerc-io/laconic-sdk`
|
||||||
|
|
||||||
### Build and deploy fixturenet containers
|
### Build and deploy fixturenet containers
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ laconic-so --stack fixturenet-laconicd deploy logs
|
|||||||
### Test with the registry CLI
|
### Test with the registry CLI
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-laconicd deploy exec cli "laconic registry status"
|
laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status"
|
||||||
```
|
```
|
||||||
|
|
||||||
Try additional CLI commands, documented [here](https://github.com/cerc-io/laconic-registry-cli#operations).
|
Try additional CLI commands, documented [here](https://github.com/cerc-io/laconic-registry-cli#operations).
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
# K8S Deployment Enhancements
|
|
||||||
## Controlling pod placement
|
|
||||||
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
|
||||||
### Pod/Node Affinity
|
|
||||||
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
|
||||||
|
|
||||||
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-affinities:
|
|
||||||
- label: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
|
||||||
### Node Taint Toleration
|
|
||||||
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
|
||||||
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
|
||||||
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
|
||||||
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
|
||||||
|
|
||||||
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-tolerations:
|
|
||||||
- key: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
|||||||
# Running a laconicd fixturenet with console
|
# Running a laconicd fixturenet with console
|
||||||
|
|
||||||
The following tutorial explains the steps to run a laconicd fixturenet with CLI and web console that displays records in the registry. It is designed as an introduction to Stack Orchestrator and to showcase one component of the Laconic Stack. Prior to Stack Orchestrator, the following repositories had to be cloned and setup manually:
|
The following tutorial explains the steps to run a laconicd fixturenet with CLI and web console that displays records in the registry. It is designed as an introduction to Stack Orchestrator and to showcase one component of the Laconic Stack. Prior to Stack Orchestrator, the following 4 repositories had to be cloned and setup manually:
|
||||||
|
|
||||||
- https://git.vdb.to/cerc-io/laconicd
|
- https://git.vdb.to/cerc-io/laconicd
|
||||||
|
- https://git.vdb.to/cerc-io/laconic-sdk
|
||||||
- https://git.vdb.to/cerc-io/laconic-registry-cli
|
- https://git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
- https://git.vdb.to/cerc-io/laconic-console
|
- https://git.vdb.to/cerc-io/laconic-console
|
||||||
|
|
||||||
@ -14,140 +15,132 @@ To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the
|
|||||||
16 GB Memory / 8 Intel vCPUs / 160 GB Disk.
|
16 GB Memory / 8 Intel vCPUs / 160 GB Disk.
|
||||||
|
|
||||||
1. Login to the droplet as root (either by SSH key or password set in the DO console)
|
1. Login to the droplet as root (either by SSH key or password set in the DO console)
|
||||||
```
|
|
||||||
ssh root@IP
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Get the install script, give it executable permissions, and run it:
|
```
|
||||||
|
ssh root@IP
|
||||||
|
```
|
||||||
|
|
||||||
```
|
2. Get the install script, give it executable permissions, and run it:
|
||||||
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
|
|
||||||
```
|
|
||||||
```
|
|
||||||
chmod +x install.sh
|
|
||||||
```
|
|
||||||
```
|
|
||||||
bash install.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Confirm docker was installed and activate the changes in `~/.profile`:
|
```
|
||||||
|
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
|
||||||
|
```
|
||||||
|
```
|
||||||
|
chmod +x install.sh
|
||||||
|
```
|
||||||
|
```
|
||||||
|
bash install.sh
|
||||||
|
```
|
||||||
|
|
||||||
```
|
3. Confirm docker was installed and activate the changes in `~/.profile`:
|
||||||
docker run hello-world
|
|
||||||
```
|
|
||||||
```
|
|
||||||
source ~/.profile
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Verify installation:
|
```
|
||||||
|
docker run hello-world
|
||||||
|
```
|
||||||
|
```
|
||||||
|
source ~/.profile
|
||||||
|
```
|
||||||
|
|
||||||
```
|
4. Verify installation:
|
||||||
laconic-so version
|
|
||||||
```
|
```
|
||||||
|
laconic-so version
|
||||||
|
```
|
||||||
|
|
||||||
## Setup the laconic fixturenet stack
|
## Setup the laconic fixturenet stack
|
||||||
|
|
||||||
1. Get the repositories
|
1. Get the repositories
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/laconic-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Build the containers:
|
2. Set this environment variable to the Laconic self-hosted Gitea instance:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
export CERC_NPM_REGISTRY_URL=https://git.vdb.to/api/packages/cerc-io/npm/
|
||||||
```
|
```
|
||||||
|
|
||||||
It's possible to run into an `ESOCKETTIMEDOUT` error, e.g., `error An unexpected error occurred: "https://registry.yarnpkg.com/@material-ui/icons/-/icons-4.11.3.tgz: ESOCKETTIMEDOUT"`. This may happen even if you have a great internet connection. In that case, re-run the `build-containers` command.
|
3. Build the containers:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
```
|
||||||
|
|
||||||
1. Set this environment variable to your droplet's IP address or fully qualified DNS host name if it has one:
|
It's possible to run into an `ESOCKETTIMEDOUT` error, e.g., `error An unexpected error occurred: "https://registry.yarnpkg.com/@material-ui/icons/-/icons-4.11.3.tgz: ESOCKETTIMEDOUT"`. This may happen even if you have a great internet connection. In that case, re-run the `build-containers` command.
|
||||||
|
|
||||||
```
|
4. Set this environment variable to your droplet's IP address:
|
||||||
export BACKEND_ENDPOINT=http://<your-IP-or-hostname>:9473
|
|
||||||
```
|
|
||||||
e.g.
|
|
||||||
```
|
|
||||||
export BACKEND_ENDPOINT=http://my-test-server.example.com:9473
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Create a deployment directory for the stack:
|
```
|
||||||
```
|
export LACONIC_HOSTED_ENDPOINT=http://<your-IP>
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy init --output laconic-loaded.spec --map-ports-to-host any-same --config LACONIC_HOSTED_ENDPOINT=$BACKEND_ENDPOINT
|
```
|
||||||
|
|
||||||
# Update port mapping in the laconic-loaded.spec file to resolve port conflicts on host if any
|
5. Deploy the stack:
|
||||||
```
|
|
||||||
```
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy create --deployment-dir laconic-loaded-deployment --spec-file laconic-loaded.spec
|
|
||||||
```
|
|
||||||
2. Start the stack:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment start
|
laconic-so --stack fixturenet-laconic-loaded deploy up
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Check the logs:
|
6. Check the logs:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment logs
|
laconic-so --stack fixturenet-laconic-loaded deploy logs
|
||||||
```
|
```
|
||||||
|
|
||||||
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconicd-1 | 6:12AM INF indexed block events height=16 module=txindex
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:29PM INF indexed block exents height=12 module=txindex server=node
|
||||||
laconicd-1 | 6:12AM INF Timed out dur=2993.893332 height=17 module=consensus round=0 step=RoundStepNewHeight
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF Timed out dur=4976.960115 height=13 module=consensus round=0 server=node step=1
|
||||||
laconicd-1 | 6:12AM INF received proposal module=consensus proposal="Proposal{17/0 (E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267:1:DF138772FEF0, -1) 6A6F3B0A42B3 @ 2024-07-25T06:12:31.952967053Z}" proposer=86970D950BC9C16F3991A52D9C6DC55BA478A7C6
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received proposal module=consensus proposal={"Type":32,"block_id":{"hash":"D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4","parts":{"hash":"22411A20B7F14CDA33244420FBDDAF24450C0628C7A06034FF22DAC3699DDCC8","total":1}},"height":13,"pol_round":-1,"round":0,"signature":"DEuqnaQmvyYbUwckttJmgKdpRu6eVm9i+9rQ1pIrV2PidkMNdWRZBLdmNghkIrUzGbW8Xd7UVJxtLRmwRASgBg==","timestamp":"2023-04-18T21:30:01.49450663Z"} server=node
|
||||||
laconicd-1 | 6:12AM INF received complete proposal block hash=E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267 height=17 module=consensus
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received complete proposal block hash=D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4 height=13 module=consensus server=node
|
||||||
laconicd-1 | 6:12AM INF finalizing commit of block hash=E15D03C180CE607AE8340A1325A0C134DFB4E1ADD992E173C701EBD362523267 height=17 module=consensus num_txs=0 root=AF4941107DC718ED1425E77A3DC7F1154FB780B7A7DE20288DC43442203527E3
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF finalizing commit of block hash={} height=13 module=consensus num_txs=0 root=1A8CA1AF139CCC80EC007C6321D8A63A46A793386EE2EDF9A5CA0AB2C90728B7 server=node
|
||||||
laconicd-1 | 6:12AM INF finalized block block_app_hash=26A665360BB1EE64E54F97F2A5AB7F621B33A86D9896574000C05DE63F43F788 height=17 module=state num_txs_res=0 num_val_updates=0
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF minted coins from module account amount=2059730459416582643aphoton from=mint module=x/bank
|
||||||
laconicd-1 | 6:12AM INF executed block app_hash=26A665360BB1EE64E54F97F2A5AB7F621B33A86D9896574000C05DE63F43F788 height=17 module=state
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF executed block height=13 module=state num_invalid_txs=0 num_valid_txs=0 server=node
|
||||||
laconicd-1 | 6:12AM INF committed state block_app_hash=AF4941107DC718ED1425E77A3DC7F1154FB780B7A7DE20288DC43442203527E3 height=17 module=state
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF commit synced commit=436F6D6D697449447B5B363520313037203630203232372039352038352032303820313334203231392032303520313433203130372031343920313431203139203139322038362031323720362031383520323533203137362031333820313735203135392031383620323334203135382031323120313431203230342037335D3A447D
|
||||||
laconicd-1 | 6:12AM INF indexed block events height=17 module=txindex
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF committed state app_hash=416B3CE35F55D086DBCD8F6B958D13C0567F06B9FDB08AAF9FBAEA9E798DCC49 height=13 module=state num_txs=0 server=node
|
||||||
```
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed block exents height=13 module=txindex server=node
|
||||||
|
```
|
||||||
|
|
||||||
4. Confirm operation of the registry CLI:
|
7. Confirm operation of the registry CLI:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"version": "0.3.0",
|
"version": "0.3.0",
|
||||||
"node": {
|
"node": {
|
||||||
"id": "6e072894aa1f5d9535a1127a0d7a7f8e65100a2c",
|
"id": "4216af2ac9f68bda33a38803fc1b5c9559312c1d",
|
||||||
"network": "laconic_9000-1",
|
"network": "laconic_9000-1",
|
||||||
"moniker": "localtestnet"
|
"moniker": "localtestnet"
|
||||||
},
|
},
|
||||||
"sync": {
|
"sync": {
|
||||||
"latestBlockHash": "260102C283D0411CFBA0270F7DC182650FFCA737A2F6F652A985F6065696F590",
|
"latest_block_hash": "1BDF4CB9AE2390DA65BCF997C83133C18014FCDDCAE03708488F0B56FCEEA429",
|
||||||
"latestBlockHeight": "49",
|
"latest_block_height": "5",
|
||||||
"latestBlockTime": "2024-07-25 06:14:05.626744215 +0000 UTC",
|
"latest_block_time": "2023-08-09 16:00:30.386903172 +0000 UTC",
|
||||||
"catchingUp": false
|
"catching_up": false
|
||||||
},
|
},
|
||||||
"validator": {
|
"validator": {
|
||||||
"address": "86970D950BC9C16F3991A52D9C6DC55BA478A7C6",
|
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
|
||||||
"votingPower": "1000000000000000"
|
"voting_power": "1000000000000000"
|
||||||
},
|
},
|
||||||
"validators": [
|
"validators": [
|
||||||
{
|
{
|
||||||
"address": "86970D950BC9C16F3991A52D9C6DC55BA478A7C6",
|
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
|
||||||
"votingPower": "1000000000000000",
|
"voting_power": "1000000000000000",
|
||||||
"proposerPriority": "0"
|
"proposer_priority": "0"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"numPeers": "0",
|
"num_peers": "0",
|
||||||
"peers": [],
|
"peers": [],
|
||||||
"diskUsage": "688K"
|
"disk_usage": "292.0K"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configure Digital Ocean firewall
|
## Configure Digital Ocean firewall
|
||||||
|
|
||||||
(Note this step may not be necessary depending on the droplet image used)
|
|
||||||
|
|
||||||
Let's open some ports.
|
Let's open some ports.
|
||||||
|
|
||||||
1. In the Digital Ocean web console, navigate to your droplet's main page. Select the "Networking" tab and scroll down to "Firewall".
|
1. In the Digital Ocean web console, navigate to your droplet's main page. Select the "Networking" tab and scroll down to "Firewall".
|
||||||
@ -186,13 +179,13 @@ wns
|
|||||||
1. The following command will create a bond and publish a record:
|
1. The following command will create a bond and publish a record:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli ./scripts/create-demo-records.sh
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-demo-records.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
You'll get an output like:
|
You'll get an output like:
|
||||||
|
|
||||||
```
|
```
|
||||||
Balance is: 9.9999e+25
|
Balance is: 99998999999999998999600000
|
||||||
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
```
|
```
|
||||||
@ -223,5 +216,5 @@ record:
|
|||||||
- e.g,:
|
- e.g,:
|
||||||
|
|
||||||
```
|
```
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry record list"
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns record list"
|
||||||
```
|
```
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Specification
|
# Specification
|
||||||
|
|
||||||
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315).
|
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://github.com/cerc-io/stack-orchestrator/issues/315).
|
||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
|
|
||||||
|
@ -1,64 +0,0 @@
|
|||||||
### Building and Running Webapps
|
|
||||||
|
|
||||||
It is possible to build and run Next.js webapps using the `build-webapp` and `run-webapp` subcommands.
|
|
||||||
|
|
||||||
To make it easier to build once and deploy into different environments and with different configuration,
|
|
||||||
compilation and static page generation are separated in the `build-webapp` and `run-webapp` steps.
|
|
||||||
|
|
||||||
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
|
||||||
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
|
||||||
not their build environment.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
Building usually requires no additional configuration. By default, the Next.js version specified in `package.json`
|
|
||||||
is used, and either `yarn` or `npm` will be used automatically depending on which lock files are present. These
|
|
||||||
can be overidden with the build arguments `CERC_NEXT_VERSION` and `CERC_BUILD_TOOL` respectively. For example: `--extra-build-args "--build-arg CERC_NEXT_VERSION=13.4.12"`
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```
|
|
||||||
$ cd ~/cerc
|
|
||||||
$ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git
|
|
||||||
$ laconic-so build-webapp --source-repo ~/cerc/test-progressive-web-app
|
|
||||||
...
|
|
||||||
|
|
||||||
Built host container for ~/cerc/test-progressive-web-app with tag:
|
|
||||||
|
|
||||||
cerc/test-progressive-web-app:local
|
|
||||||
|
|
||||||
To test locally run:
|
|
||||||
|
|
||||||
laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment.env
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Running
|
|
||||||
|
|
||||||
With `run-webapp` a new container will be launched on the local machine, with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration.
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```
|
|
||||||
# Production env
|
|
||||||
$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/production.env
|
|
||||||
|
|
||||||
Image: cerc/test-progressive-web-app:local
|
|
||||||
ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05
|
|
||||||
URL: http://localhost:32768
|
|
||||||
|
|
||||||
# Dev env
|
|
||||||
$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/dev.env
|
|
||||||
|
|
||||||
Image: cerc/test-progressive-web-app:local
|
|
||||||
ID: 9ab96494f563aafb6c057d88df58f9eca81b90f8721a4e068493a289a976051c
|
|
||||||
URL: http://localhost:32769
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploying
|
|
||||||
|
|
||||||
Use the subcommand `deploy-webapp create` to make a deployment directory that can be subsequently deployed to a Kubernetes cluster.
|
|
||||||
Example commands are shown below, assuming that the webapp container image `cerc/test-progressive-web-app:local` has already been built:
|
|
||||||
```
|
|
||||||
$ laconic-so deploy-webapp create --kube-config ~/kubectl/k8s-kubeconfig.yaml --image-registry registry.digitalocean.com/laconic-registry --deployment-dir webapp-k8s-deployment --image cerc/test-progressive-web-app:local --url https://test-pwa-app.hosting.laconic.com/ --env-file test-webapp.env
|
|
||||||
$ laconic-so deployment --dir webapp-k8s-deployment push-images
|
|
||||||
$ laconic-so deployment --dir webapp-k8s-deployment start
|
|
||||||
```
|
|
@ -10,6 +10,3 @@ pydantic==1.10.9
|
|||||||
tomli==2.0.1
|
tomli==2.0.1
|
||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
|
||||||
python-gnupg>=0.5.2
|
|
||||||
requests>=2.3.2
|
|
||||||
|
@ -41,4 +41,4 @@ runcmd:
|
|||||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
- systemctl enable docker
|
- systemctl enable docker
|
||||||
- systemctl start docker
|
- systemctl start docker
|
||||||
- git clone https://git.vdb.to/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
- git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
||||||
|
@ -31,5 +31,5 @@ runcmd:
|
|||||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
- systemctl enable docker
|
- systemctl enable docker
|
||||||
- systemctl start docker
|
- systemctl start docker
|
||||||
- curl -L -o /usr/local/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
- curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
- chmod +x /usr/local/bin/laconic-so
|
- chmod +x /usr/local/bin/laconic-so
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Beginnings of a script to quickly spin up and test a deployment
|
|
||||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
if [[ -n "$1" ]]; then
|
|
||||||
stack_name=$1
|
|
||||||
else
|
|
||||||
stack_name="test"
|
|
||||||
fi
|
|
||||||
spec_file_name="${stack_name}-spec.yml"
|
|
||||||
deployment_dir_name="${stack_name}-deployment"
|
|
||||||
rm -f ${spec_file_name}
|
|
||||||
rm -rf ${deployment_dir_name}
|
|
||||||
laconic-so --stack ${stack_name} deploy --deploy-to compose init --output ${spec_file_name}
|
|
||||||
laconic-so --stack ${stack_name} deploy --deploy-to compose create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name}
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} start
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} ps
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} stop
|
|
@ -137,7 +137,7 @@ fi
|
|||||||
echo "**************************************************************************************"
|
echo "**************************************************************************************"
|
||||||
echo "Installing laconic-so"
|
echo "Installing laconic-so"
|
||||||
# install latest `laconic-so`
|
# install latest `laconic-so`
|
||||||
distribution_url=https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
distribution_url=https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
install_filename=${install_dir}/laconic-so
|
install_filename=${install_dir}/laconic-so
|
||||||
mkdir -p ${install_dir}
|
mkdir -p ${install_dir}
|
||||||
curl -L -o ${install_filename} ${distribution_url}
|
curl -L -o ${install_filename} ${distribution_url}
|
||||||
|
6
setup.py
6
setup.py
@ -4,18 +4,16 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
requirements = fh.read()
|
requirements = fh.read()
|
||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name='laconic-stack-orchestrator',
|
||||||
version=version,
|
version='1.0.12',
|
||||||
author='Cerc',
|
author='Cerc',
|
||||||
author_email='info@cerc.io',
|
author_email='info@cerc.io',
|
||||||
license='GNU Affero General Public License',
|
license='GNU Affero General Public License',
|
||||||
description='Orchestrates deployment of the Laconic stack',
|
description='Orchestrates deployment of the Laconic stack',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url='https://github.com/cerc-io/stack-orchestrator',
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=['stack_orchestrator'],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
|
@ -25,18 +25,14 @@ import sys
|
|||||||
from decouple import config
|
from decouple import config
|
||||||
import subprocess
|
import subprocess
|
||||||
import click
|
import click
|
||||||
|
import importlib.resources
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config
|
||||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit
|
|
||||||
from stack_orchestrator.base import get_npm_registry_url
|
from stack_orchestrator.base import get_npm_registry_url
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
|
||||||
from stack_orchestrator.build.publish import publish_image
|
|
||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(dev_root_path: str,
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
@ -62,73 +58,65 @@ def make_container_build_env(dev_root_path: str,
|
|||||||
return container_build_env
|
return container_build_env
|
||||||
|
|
||||||
|
|
||||||
def process_container(build_context: BuildContext) -> bool:
|
def process_container(container,
|
||||||
if not opts.o.quiet:
|
container_build_dir: str,
|
||||||
print(f"Building: {build_context.container}")
|
container_build_env: dict,
|
||||||
|
dev_root_path: str,
|
||||||
default_container_tag = f"{build_context.container}:local"
|
quiet: bool,
|
||||||
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
verbose: bool,
|
||||||
|
dry_run: bool,
|
||||||
# Check if this is in an external stack
|
continue_on_error: bool,
|
||||||
if stack_is_external(build_context.stack):
|
):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
if not quiet:
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
print(f"Building: {container}")
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
|
||||||
# Now check if the container exists in the external stack.
|
build_script_filename = os.path.join(build_dir, "build.sh")
|
||||||
if not temp_build_script_filename.exists():
|
if verbose:
|
||||||
# If not, revert to building an internal container
|
|
||||||
container_parent_dir = build_context.container_build_dir
|
|
||||||
else:
|
|
||||||
container_parent_dir = build_context.container_build_dir
|
|
||||||
|
|
||||||
build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
|
||||||
build_script_filename = build_dir.joinpath("build.sh")
|
|
||||||
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Build script filename: {build_script_filename}")
|
print(f"Build script filename: {build_script_filename}")
|
||||||
if os.path.exists(build_script_filename):
|
if os.path.exists(build_script_filename):
|
||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(f"No script file found: {build_script_filename}, using default build script")
|
||||||
repo_dir = build_context.container.split('/')[1]
|
repo_dir = container.split('/')[1]
|
||||||
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
||||||
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
build_command = os.path.join(build_context.container_build_dir,
|
build_command = os.path.join(container_build_dir,
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
"default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
|
||||||
if not opts.o.dry_run:
|
if not dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
if verbose:
|
||||||
if "PATH" not in build_context.container_build_env:
|
print(f"Executing: {build_command} with environment: {container_build_env}")
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
|
||||||
if opts.o.verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
|
||||||
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
return False
|
print(f"Error running build for {container}")
|
||||||
|
if not continue_on_error:
|
||||||
|
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
|
||||||
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
return True
|
print("****** Container Build Error, continuing because --continue-on-error is set")
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option('--include', help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option('--exclude', help="don\'t build these containers")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
|
||||||
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
'''build the set of containers required for a complete stack'''
|
'''build the set of containers required for a complete stack'''
|
||||||
|
|
||||||
|
quiet = ctx.obj.quiet
|
||||||
|
verbose = ctx.obj.verbose
|
||||||
|
dry_run = ctx.obj.dry_run
|
||||||
|
debug = ctx.obj.debug
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
continue_on_error = ctx.obj.continue_on_error
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
@ -139,45 +127,39 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
|
|
||||||
if publish_images:
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
if not image_registry:
|
from stack_orchestrator import data
|
||||||
error_exit("--image-registry must be supplied with --publish-images")
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
|
all_containers = container_list_file.read().splitlines()
|
||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = []
|
||||||
|
if stack:
|
||||||
|
stack_config = get_parsed_stack_config(stack)
|
||||||
|
containers_in_scope = stack_config['containers']
|
||||||
|
else:
|
||||||
|
containers_in_scope = all_containers
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f'Containers: {containers_in_scope}')
|
||||||
|
if stack:
|
||||||
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(dev_root_path,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
opts.o.debug,
|
debug,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
extra_build_args)
|
extra_build_args)
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
process_container(container, container_build_dir, container_build_env,
|
||||||
build_context = BuildContext(
|
dev_root_path, quiet, verbose, dry_run, continue_on_error)
|
||||||
stack,
|
|
||||||
container,
|
|
||||||
container_build_dir,
|
|
||||||
container_build_env,
|
|
||||||
dev_root_path
|
|
||||||
)
|
|
||||||
result = process_container(build_context)
|
|
||||||
if result:
|
|
||||||
if publish_images:
|
|
||||||
publish_image(f"{container}:local", image_registry)
|
|
||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
if verbose:
|
||||||
if not opts.o.continue_on_error:
|
|
||||||
error_exit("container build failed and --continue-on-error not set, exiting")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
print("****** Container Build Error, continuing because --continue-on-error is set")
|
|
||||||
else:
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
# Copyright © 2024 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Mapping
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BuildContext:
|
|
||||||
stack: str
|
|
||||||
container: str
|
|
||||||
container_build_dir: Path
|
|
||||||
container_build_env: Mapping[str,str]
|
|
||||||
dev_root_path: str
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
|||||||
# Copyright © 2024 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import importlib.resources
|
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|
||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
|
||||||
|
|
||||||
containers_in_scope = []
|
|
||||||
if stack:
|
|
||||||
stack_config = get_parsed_stack_config(stack)
|
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
|
||||||
containers_in_scope = stack_config['containers']
|
|
||||||
else:
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
|
||||||
from stack_orchestrator import data
|
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
|
||||||
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f'Containers: {containers_in_scope}')
|
|
||||||
if stack:
|
|
||||||
print(f"Stack: {stack}")
|
|
||||||
|
|
||||||
return containers_in_scope
|
|
@ -21,69 +21,48 @@
|
|||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
|
|
||||||
from decouple import config
|
from decouple import config
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.build import build_containers
|
from stack_orchestrator.build import build_containers
|
||||||
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option('--base-container', default="cerc/nextjs-base")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args):
|
||||||
'''build the specified webapp container'''
|
'''build the specified webapp container'''
|
||||||
logger = TimedLogger()
|
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
debug = ctx.obj.debug
|
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
|
dry_run = ctx.obj.dry_run
|
||||||
|
debug = ctx.obj.debug
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
continue_on_error = ctx.obj.continue_on_error
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
|
||||||
if verbose:
|
if not quiet:
|
||||||
logger.log(f'Dev Root is: {dev_root_path}')
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
|
||||||
base_container = determine_base_container(source_repo)
|
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
force_rebuild, extra_build_args)
|
force_rebuild, extra_build_args)
|
||||||
|
|
||||||
if verbose:
|
build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
||||||
logger.log(f"Building base container: {base_container}")
|
verbose, dry_run, continue_on_error)
|
||||||
|
|
||||||
build_context_1 = BuildContext(
|
|
||||||
stack,
|
|
||||||
base_container,
|
|
||||||
container_build_dir,
|
|
||||||
container_build_env,
|
|
||||||
dev_root_path,
|
|
||||||
)
|
|
||||||
ok = build_containers.process_container(build_context_1)
|
|
||||||
if not ok:
|
|
||||||
logger.log("ERROR: Build failed.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
@ -91,27 +70,8 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||||
base_container.replace("/", "-"),
|
base_container.replace("/", "-"),
|
||||||
"Dockerfile.webapp")
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
|
||||||
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag
|
build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
||||||
|
verbose, dry_run, continue_on_error)
|
||||||
if verbose:
|
|
||||||
logger.log(f"Building app container: {tag}")
|
|
||||||
|
|
||||||
build_context_2 = BuildContext(
|
|
||||||
stack,
|
|
||||||
base_container,
|
|
||||||
container_build_dir,
|
|
||||||
container_build_env,
|
|
||||||
dev_root_path,
|
|
||||||
)
|
|
||||||
ok = build_containers.process_container(build_context_2)
|
|
||||||
if not ok:
|
|
||||||
logger.log("ERROR: Build failed.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
logger.log(f"App container {base_container} build finished.")
|
|
||||||
logger.log("build-webapp complete", show_step_time=False, show_total_time=True)
|
|
||||||
|
@ -1,195 +0,0 @@
|
|||||||
# Copyright © 2024 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import click
|
|
||||||
from dataclasses import dataclass
|
|
||||||
import json
|
|
||||||
import platform
|
|
||||||
from python_on_whales import DockerClient
|
|
||||||
from python_on_whales.components.manifest.cli_wrapper import ManifestCLI, ManifestList
|
|
||||||
from python_on_whales.utils import run
|
|
||||||
import requests
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import include_exclude_check, error_exit
|
|
||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
|
||||||
|
|
||||||
# Experimental fetch-container command
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class RegistryInfo:
|
|
||||||
registry: str
|
|
||||||
registry_username: str
|
|
||||||
registry_token: str
|
|
||||||
|
|
||||||
|
|
||||||
# Extending this code to support the --verbose option, cnosider contributing upstream
|
|
||||||
# https://github.com/gabrieldemarmiesse/python-on-whales/blob/master/python_on_whales/components/manifest/cli_wrapper.py#L129
|
|
||||||
class ExtendedManifestCLI(ManifestCLI):
|
|
||||||
def inspect_verbose(self, x: str) -> ManifestList:
|
|
||||||
"""Returns a Docker manifest list object."""
|
|
||||||
json_str = run(self.docker_cmd + ["manifest", "inspect", "--verbose", x])
|
|
||||||
return json.loads(json_str)
|
|
||||||
|
|
||||||
|
|
||||||
def _local_tag_for(container: str):
|
|
||||||
return f"{container}:local"
|
|
||||||
|
|
||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
|
||||||
# Emulate this:
|
|
||||||
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
|
||||||
registry_parts = registry_info.registry.split("/")
|
|
||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Fetching tags from: {url}")
|
|
||||||
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
|
||||||
if response.status_code == 200:
|
|
||||||
tag_info = response.json()
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"container tags list: {tag_info}")
|
|
||||||
tags_array = tag_info["tags"]
|
|
||||||
return tags_array
|
|
||||||
else:
|
|
||||||
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
|
||||||
# Lex sort should give us the latest first
|
|
||||||
sorted_candidates = sorted(candidate_tags)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"sorted candidates: {sorted_candidates}")
|
|
||||||
return sorted_candidates[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(container: str,
|
|
||||||
registry_info: RegistryInfo,
|
|
||||||
tag_list: List[str]) -> List[str] :
|
|
||||||
filtered_tags = []
|
|
||||||
this_machine = platform.machine()
|
|
||||||
# Translate between Python and docker platform names
|
|
||||||
if this_machine == "x86_64":
|
|
||||||
this_machine = "amd64"
|
|
||||||
if this_machine == "aarch64":
|
|
||||||
this_machine = "arm64"
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Python says the architecture is: {this_machine}")
|
|
||||||
docker = DockerClient()
|
|
||||||
for tag in tag_list:
|
|
||||||
remote_tag = f"{registry_info.registry}/{container}:{tag}"
|
|
||||||
manifest_cmd = ExtendedManifestCLI(docker.client_config)
|
|
||||||
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"manifest: {manifest}")
|
|
||||||
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"image_architecture: {image_architecture}")
|
|
||||||
if this_machine == image_architecture:
|
|
||||||
filtered_tags.append(tag)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Tags filtered for platform: {filtered_tags}")
|
|
||||||
return filtered_tags
|
|
||||||
|
|
||||||
|
|
||||||
def _get_latest_image(container: str, registry_info: RegistryInfo):
|
|
||||||
all_tags = _get_tags_for_container(container, registry_info)
|
|
||||||
tags_for_platform = _filter_for_platform(container, registry_info, all_tags)
|
|
||||||
if len(tags_for_platform) > 0:
|
|
||||||
latest_tag = _find_latest(tags_for_platform)
|
|
||||||
return f"{container}:{latest_tag}"
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _fetch_image(tag: str, registry_info: RegistryInfo):
|
|
||||||
docker = DockerClient()
|
|
||||||
remote_tag = f"{registry_info.registry}/{tag}"
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Attempting to pull this image: {remote_tag}")
|
|
||||||
docker.image.pull(remote_tag)
|
|
||||||
|
|
||||||
|
|
||||||
def _exists_locally(container: str):
|
|
||||||
docker = DockerClient()
|
|
||||||
return docker.image.exists(_local_tag_for(container))
|
|
||||||
|
|
||||||
|
|
||||||
def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|
||||||
docker = DockerClient()
|
|
||||||
docker.image.tag(f"{registry}/{remote_tag}", local_tag)
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option('--include', help="only fetch these containers")
|
|
||||||
@click.option('--exclude', help="don\'t fetch these containers")
|
|
||||||
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
|
||||||
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
|
||||||
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
|
||||||
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
|
||||||
@click.pass_context
|
|
||||||
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
|
||||||
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
|
||||||
docker = DockerClient()
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print("Logging into container registry:")
|
|
||||||
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
|
||||||
# Generate list of target containers
|
|
||||||
stack = ctx.obj.stack
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
|
||||||
all_containers_found = True
|
|
||||||
for container in containers_in_scope:
|
|
||||||
local_tag = _local_tag_for(container)
|
|
||||||
if include_exclude_check(container, include, exclude):
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Processing: {container}")
|
|
||||||
# For each container, attempt to find the latest of a set of
|
|
||||||
# images with the correct name and platform in the specified registry
|
|
||||||
image_to_fetch = _get_latest_image(container, registry_info)
|
|
||||||
if not image_to_fetch:
|
|
||||||
print(f"Warning: no image found to fetch for container: {container}")
|
|
||||||
all_containers_found = False
|
|
||||||
continue
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Fetching: {image_to_fetch}")
|
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
|
||||||
# Now check if the target container already exists exists locally already
|
|
||||||
if (_exists_locally(container)):
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Container image {container} already exists locally")
|
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
|
||||||
if (force_local_overwrite):
|
|
||||||
# In that case remove the existing :local tag
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Warning: overwriting local tag from this image: {container} because "
|
|
||||||
"--force-local-overwrite was specified")
|
|
||||||
else:
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Skipping local tagging for this image: {container} because that would "
|
|
||||||
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
|
||||||
continue
|
|
||||||
# Tag the fetched image with the :local tag
|
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
|
||||||
else:
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Excluding: {container}")
|
|
||||||
if not all_containers_found:
|
|
||||||
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
|
@ -1,48 +0,0 @@
|
|||||||
# Copyright © 2024 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from python_on_whales import DockerClient
|
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import error_exit
|
|
||||||
|
|
||||||
|
|
||||||
def _publish_tag_for_image(local_image_tag: str, remote_repo: str, version: str):
|
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
|
||||||
(image_name, image_version) = local_image_tag.split(":")
|
|
||||||
if image_version == "local":
|
|
||||||
return f"{remote_repo}/{image_name}:{version}"
|
|
||||||
else:
|
|
||||||
error_exit("Asked to publish a non-locally built image")
|
|
||||||
|
|
||||||
|
|
||||||
def publish_image(local_tag, registry):
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Publishing this image: {local_tag} to this registry: {registry}")
|
|
||||||
docker = DockerClient()
|
|
||||||
# Figure out the target image tag
|
|
||||||
# Eventually this version will be generated from the source repo state
|
|
||||||
# Using a timestemp is an intermediate step
|
|
||||||
version = datetime.now().strftime("%Y%m%d%H%M")
|
|
||||||
remote_tag = _publish_tag_for_image(local_tag, registry, version)
|
|
||||||
# Tag the image thus
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Tagging {local_tag} to {remote_tag}")
|
|
||||||
docker.image.tag(local_tag, remote_tag)
|
|
||||||
# Push it to the desired registry
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Pushing image {remote_tag}")
|
|
||||||
docker.image.push(remote_tag)
|
|
@ -1,41 +0,0 @@
|
|||||||
# Copyright © 2023 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
cluster_name_prefix = "laconic-"
|
|
||||||
stack_file_name = "stack.yml"
|
|
||||||
spec_file_name = "spec.yml"
|
|
||||||
config_file_name = "config.env"
|
|
||||||
deployment_file_name = "deployment.yml"
|
|
||||||
compose_dir_name = "compose"
|
|
||||||
compose_deploy_type = "compose"
|
|
||||||
k8s_kind_deploy_type = "k8s-kind"
|
|
||||||
k8s_deploy_type = "k8s"
|
|
||||||
cluster_id_key = "cluster-id"
|
|
||||||
kube_config_key = "kube-config"
|
|
||||||
deploy_to_key = "deploy-to"
|
|
||||||
network_key = "network"
|
|
||||||
http_proxy_key = "http-proxy"
|
|
||||||
image_registry_key = "image-registry"
|
|
||||||
configmaps_key = "configmaps"
|
|
||||||
resources_key = "resources"
|
|
||||||
volumes_key = "volumes"
|
|
||||||
security_key = "security"
|
|
||||||
annotations_key = "annotations"
|
|
||||||
labels_key = "labels"
|
|
||||||
replicas_key = "replicas"
|
|
||||||
node_affinities_key = "node-affinities"
|
|
||||||
node_tolerations_key = "node-tolerations"
|
|
||||||
kind_config_filename = "kind-config.yml"
|
|
||||||
kube_config_filename = "kubeconfig.yml"
|
|
@ -1,15 +0,0 @@
|
|||||||
services:
|
|
||||||
registry:
|
|
||||||
image: registry:2.8
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
REGISTRY_LOG_LEVEL: ${REGISTRY_LOG_LEVEL}
|
|
||||||
volumes:
|
|
||||||
- config:/config:ro
|
|
||||||
- registry-data:/var/lib/registry
|
|
||||||
ports:
|
|
||||||
- "5000"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
config:
|
|
||||||
registry-data:
|
|
@ -1,80 +0,0 @@
|
|||||||
|
|
||||||
# From: https://raw.githubusercontent.com/blast-io/deployment/master/docker-compose.yml
|
|
||||||
services:
|
|
||||||
# generate jwt.txt if it's absent
|
|
||||||
generate-jwt:
|
|
||||||
image: blastio/openssl
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast:rw
|
|
||||||
command: >
|
|
||||||
sh -c "[ ! -f /blast/jwt.txt ] && openssl rand -hex 32 | tr -d '\n' > /blast/jwt.txt || exit 0"
|
|
||||||
# initialise geth db
|
|
||||||
geth-init:
|
|
||||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast:rw
|
|
||||||
- ../config/fixturenet-blast/genesis.json:/blast/genesis.json
|
|
||||||
entrypoint: /bin/sh
|
|
||||||
command: >
|
|
||||||
-c "[ ! -d /blast/${GETH_DATA_DIR:-blast-geth-data}/geth ] && /usr/local/bin/geth init --datadir=/blast/${GETH_DATA_DIR:-blast-geth-data} /blast/genesis.json || exit 0"
|
|
||||||
depends_on:
|
|
||||||
generate-jwt:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
env_file:
|
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
|
||||||
blast-geth:
|
|
||||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast
|
|
||||||
ports:
|
|
||||||
- "9545"
|
|
||||||
- "9546"
|
|
||||||
command: >
|
|
||||||
--datadir=/blast/${GETH_DATA_DIR:-blast-geth-data}
|
|
||||||
--http
|
|
||||||
--http.corsdomain="*"
|
|
||||||
--http.vhosts="*"
|
|
||||||
--http.addr=0.0.0.0
|
|
||||||
--http.port=9545
|
|
||||||
--http.api=web3,debug,eth,txpool,net,engine
|
|
||||||
--ws
|
|
||||||
--ws.addr=0.0.0.0
|
|
||||||
--ws.port=9546
|
|
||||||
--ws.origins="*"
|
|
||||||
--ws.api=debug,eth,txpool,net,engine
|
|
||||||
--authrpc.addr="0.0.0.0"
|
|
||||||
--authrpc.port="8551"
|
|
||||||
--authrpc.vhosts="*"
|
|
||||||
--authrpc.jwtsecret=/blast/jwt.txt
|
|
||||||
--syncmode=full
|
|
||||||
--gcmode=archive
|
|
||||||
--nodiscover
|
|
||||||
--maxpeers=0
|
|
||||||
--rollup.disabletxpoolgossip=true
|
|
||||||
env_file:
|
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
|
||||||
depends_on:
|
|
||||||
geth-init:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
op-node:
|
|
||||||
image: blastio/blast-optimism:${NETWORK:-testnet-sepolia}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast
|
|
||||||
- ../config/fixturenet-blast/rollup.json:/blast/rollup.json
|
|
||||||
ports:
|
|
||||||
- "9003"
|
|
||||||
command: >
|
|
||||||
op-node
|
|
||||||
--l1="${CERC_L1_RPC}"
|
|
||||||
--l1.rpckind="any"
|
|
||||||
--l1.trustrpc=true
|
|
||||||
--l2="http://blast-geth:8551"
|
|
||||||
--l2.jwt-secret=/blast/jwt.txt
|
|
||||||
--rollup.config="/blast/rollup.json"
|
|
||||||
depends_on:
|
|
||||||
- blast-geth
|
|
||||||
env_file:
|
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
blast-data:
|
|
@ -4,6 +4,6 @@ services:
|
|||||||
image: cerc/laconic-console-host:local
|
image: cerc/laconic-console-host:local
|
||||||
environment:
|
environment:
|
||||||
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production}
|
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production}
|
||||||
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473}
|
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "80"
|
||||||
|
@ -2,14 +2,10 @@ services:
|
|||||||
laconicd:
|
laconicd:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: cerc/laconicd:local
|
image: cerc/laconicd:local
|
||||||
command: ["bash", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||||
environment:
|
|
||||||
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED:-false}
|
|
||||||
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY:-false}
|
|
||||||
ONBOARDING_ENABLED: ${ONBOARDING_ENABLED:-false}
|
|
||||||
volumes:
|
volumes:
|
||||||
# The cosmos-sdk node's database directory:
|
# The cosmos-sdk node's database directory:
|
||||||
- laconicd-data:/root/.laconicd
|
- laconicd-data:/root/.laconicd/data
|
||||||
# TODO: look at folding these scripts into the container
|
# TODO: look at folding these scripts into the container
|
||||||
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
||||||
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
||||||
@ -20,14 +16,15 @@ services:
|
|||||||
- "26657"
|
- "26657"
|
||||||
- "26656"
|
- "26656"
|
||||||
- "9473"
|
- "9473"
|
||||||
|
- "8545"
|
||||||
|
- "8546"
|
||||||
- "9090"
|
- "9090"
|
||||||
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
cli:
|
cli:
|
||||||
image: cerc/laconic-registry-cli:local
|
image: cerc/laconic-registry-cli:local
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
||||||
- ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
laconicd-data:
|
laconicd-data:
|
||||||
|
@ -6,8 +6,8 @@ services:
|
|||||||
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
|
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
|
||||||
fixturenet-optimism-contracts:
|
fixturenet-optimism-contracts:
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
image: cerc/optimism-contracts:local
|
|
||||||
hostname: fixturenet-optimism-contracts
|
hostname: fixturenet-optimism-contracts
|
||||||
|
image: cerc/optimism-contracts:local
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-optimism/l1-params.env
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
environment:
|
environment:
|
||||||
@ -17,49 +17,27 @@ services:
|
|||||||
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
|
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
|
||||||
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
|
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
|
||||||
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
|
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
|
||||||
|
CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
|
||||||
|
CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
|
||||||
|
# Waits for L1 endpoint to be up before running the script
|
||||||
|
command: |
|
||||||
|
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
|
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
|
||||||
- ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh
|
- ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
|
||||||
|
- ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
|
||||||
|
- ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
|
||||||
|
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
|
||||||
|
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
|
||||||
- l2_accounts:/l2-accounts
|
- l2_accounts:/l2-accounts
|
||||||
- l1_deployment:/l1-deployment
|
- l1_deployment:/app/packages/contracts-bedrock
|
||||||
- l2_config:/l2-config
|
|
||||||
# Waits for L1 endpoint to be up before running the contract deploy script
|
|
||||||
command: |
|
|
||||||
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh"
|
|
||||||
|
|
||||||
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
|
||||||
op-geth:
|
|
||||||
restart: always
|
|
||||||
image: cerc/optimism-l2geth:local
|
|
||||||
hostname: op-geth
|
|
||||||
depends_on:
|
|
||||||
op-node:
|
|
||||||
condition: service_started
|
|
||||||
volumes:
|
|
||||||
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
|
|
||||||
- l2_config:/l2-config:ro
|
|
||||||
- l2_accounts:/l2-accounts:ro
|
|
||||||
- l2_geth_data:/datadir
|
|
||||||
entrypoint: "sh"
|
|
||||||
command: "/run-op-geth.sh"
|
|
||||||
ports:
|
|
||||||
- "8545"
|
|
||||||
- "8546"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost:8545"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 100
|
|
||||||
start_period: 10s
|
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Runs the L2 consensus client (Sequencer node)
|
# Generates the config files required for L2 (outputs to volume l2_config)
|
||||||
# Generates the L2 config files if not already present (outputs to volume l2_config)
|
op-node-l2-config-gen:
|
||||||
op-node:
|
restart: on-failure
|
||||||
restart: always
|
|
||||||
image: cerc/optimism-op-node:local
|
image: cerc/optimism-op-node:local
|
||||||
hostname: op-node
|
|
||||||
depends_on:
|
depends_on:
|
||||||
fixturenet-optimism-contracts:
|
fixturenet-optimism-contracts:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
@ -69,19 +47,61 @@ services:
|
|||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_L1_RPC: ${CERC_L1_RPC}
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh
|
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
|
||||||
- l1_deployment:/l1-deployment:ro
|
- l1_deployment:/contracts-bedrock:ro
|
||||||
- l2_config:/l2-config
|
- l2_config:/app
|
||||||
|
command: ["sh", "/app/generate-l2-config.sh"]
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
||||||
|
op-geth:
|
||||||
|
restart: always
|
||||||
|
image: cerc/optimism-l2geth:local
|
||||||
|
depends_on:
|
||||||
|
op-node-l2-config-gen:
|
||||||
|
condition: service_started
|
||||||
|
volumes:
|
||||||
|
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
|
||||||
|
- l2_config:/op-node:ro
|
||||||
- l2_accounts:/l2-accounts:ro
|
- l2_accounts:/l2-accounts:ro
|
||||||
|
- l2_geth_data:/datadir
|
||||||
entrypoint: "sh"
|
entrypoint: "sh"
|
||||||
command: "/run-op-node.sh"
|
command: "/run-op-geth.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8547"
|
- "0.0.0.0:8545:8545"
|
||||||
|
- "0.0.0.0:8546:8546"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-vz", "localhost:8545"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 10
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
# Runs the L2 consensus client (Sequencer node)
|
||||||
|
op-node:
|
||||||
|
restart: always
|
||||||
|
image: cerc/optimism-op-node:local
|
||||||
|
depends_on:
|
||||||
|
op-geth:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file:
|
||||||
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
|
environment:
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
|
volumes:
|
||||||
|
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
|
||||||
|
- l2_config:/op-node-data:ro
|
||||||
|
- l2_accounts:/l2-accounts:ro
|
||||||
|
command: ["sh", "/app/run-op-node.sh"]
|
||||||
|
ports:
|
||||||
|
- "0.0.0.0:8547:8547"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost:8547"]
|
test: ["CMD", "nc", "-vz", "localhost:8547"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 100
|
retries: 10
|
||||||
start_period: 10s
|
start_period: 10s
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
@ -90,7 +110,6 @@ services:
|
|||||||
op-batcher:
|
op-batcher:
|
||||||
restart: always
|
restart: always
|
||||||
image: cerc/optimism-op-batcher:local
|
image: cerc/optimism-op-batcher:local
|
||||||
hostname: op-batcher
|
|
||||||
depends_on:
|
depends_on:
|
||||||
op-node:
|
op-node:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@ -110,7 +129,7 @@ services:
|
|||||||
command: |
|
command: |
|
||||||
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8548"
|
- "127.0.0.1:8548:8548"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
@ -118,29 +137,25 @@ services:
|
|||||||
op-proposer:
|
op-proposer:
|
||||||
restart: always
|
restart: always
|
||||||
image: cerc/optimism-op-proposer:local
|
image: cerc/optimism-op-proposer:local
|
||||||
hostname: op-proposer
|
|
||||||
depends_on:
|
depends_on:
|
||||||
op-node:
|
op-node:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
op-geth:
|
|
||||||
condition: service_healthy
|
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-optimism/l1-params.env
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_L1_RPC: ${CERC_L1_RPC}
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/network/wait-for-it.sh:/wait-for-it.sh
|
- ../config/network/wait-for-it.sh:/wait-for-it.sh
|
||||||
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
|
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
|
||||||
- l1_deployment:/l1-deployment:ro
|
- l1_deployment:/contracts-bedrock:ro
|
||||||
- l2_accounts:/l2-accounts:ro
|
- l2_accounts:/l2-accounts:ro
|
||||||
entrypoint: ["sh", "-c"]
|
entrypoint: ["sh", "-c"]
|
||||||
# Waits for L1 endpoint to be up before running the proposer
|
# Waits for L1 endpoint to be up before running the proposer
|
||||||
command: |
|
command: |
|
||||||
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8560"
|
- "127.0.0.1:8560:8560"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ services:
|
|||||||
- ../config/fixturenet-eth/fixturenet-eth.env
|
- ../config/fixturenet-eth/fixturenet-eth.env
|
||||||
environment:
|
environment:
|
||||||
RUN_BOOTNODE: "true"
|
RUN_BOOTNODE: "true"
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
image: cerc/fixturenet-plugeth-plugeth:local
|
image: cerc/fixturenet-plugeth-plugeth:local
|
||||||
volumes:
|
volumes:
|
||||||
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata
|
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
version: '3.7'
|
|
||||||
|
|
||||||
services:
|
|
||||||
# Runs an Urbit fake ship and attempts an app installation using given data
|
|
||||||
# Uploads the app glob to given IPFS endpoint
|
|
||||||
# From urbit_app_builds volume:
|
|
||||||
# - takes app build from ${CERC_URBIT_APP}/build (waits for it to appear)
|
|
||||||
# - takes additional mark files from ${CERC_URBIT_APP}/mar
|
|
||||||
# - takes the docket file from ${CERC_URBIT_APP}/desk.docket-0
|
|
||||||
urbit-fake-ship:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: tloncorp/vere
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_URBIT_APP: ${CERC_URBIT_APP}
|
|
||||||
CERC_ENABLE_APP_INSTALL: ${CERC_ENABLE_APP_INSTALL:-true}
|
|
||||||
CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs:5001}
|
|
||||||
CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs:8080}
|
|
||||||
entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-app.sh && tail -f /dev/null"]
|
|
||||||
volumes:
|
|
||||||
- urbit_data:/urbit
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh
|
|
||||||
- ../config/urbit/deploy-app.sh:/urbit/deploy-app.sh
|
|
||||||
ports:
|
|
||||||
- "80"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "80"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_data:
|
|
||||||
urbit_app_builds:
|
|
@ -1,31 +0,0 @@
|
|||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
grafana:
|
|
||||||
image: grafana/grafana:10.2.3
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
|
||||||
CERC_GRAFANA_ALERTS_SUBGRAPH_IDS: ${CERC_GRAFANA_ALERTS_SUBGRAPH_IDS}
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
|
||||||
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
|
||||||
- ../config/monitoring/update-grafana-alerts-config.sh:/update-grafana-alerts-config.sh
|
|
||||||
- grafana_storage:/var/lib/grafana
|
|
||||||
user: root
|
|
||||||
entrypoint: ["bash", "-c"]
|
|
||||||
command: |
|
|
||||||
"/update-grafana-alerts-config.sh && /run.sh"
|
|
||||||
ports:
|
|
||||||
- "3000"
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 10
|
|
||||||
start_period: 3s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
grafana_storage:
|
|
@ -16,13 +16,8 @@ services:
|
|||||||
postgres_pass: password
|
postgres_pass: password
|
||||||
postgres_db: graph-node
|
postgres_db: graph-node
|
||||||
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
||||||
# Env varaibles reference: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
|
||||||
GRAPH_LOG: debug
|
GRAPH_LOG: debug
|
||||||
ETHEREUM_REORG_THRESHOLD: 3
|
ETHEREUM_REORG_THRESHOLD: 3
|
||||||
GRAPH_ETHEREUM_JSON_RPC_TIMEOUT: ${GRAPH_ETHEREUM_JSON_RPC_TIMEOUT:-180}
|
|
||||||
GRAPH_ETHEREUM_REQUEST_RETRIES: ${GRAPH_ETHEREUM_REQUEST_RETRIES:-10}
|
|
||||||
GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE: ${GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE:-2000}
|
|
||||||
GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS: ${GRAPH_ETHEREUM_BLOCK_INGESTOR_MAX_CONCURRENT_JSON_RPC_CALLS_FOR_TXN_RECEIPTS:-1000}
|
|
||||||
entrypoint: ["bash", "-c"]
|
entrypoint: ["bash", "-c"]
|
||||||
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
||||||
command: |
|
command: |
|
||||||
@ -32,7 +27,6 @@ services:
|
|||||||
- "8001"
|
- "8001"
|
||||||
- "8020"
|
- "8020"
|
||||||
- "8030"
|
- "8030"
|
||||||
- "8040"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
test: ["CMD", "nc", "-vz", "localhost", "8020"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
|
@ -1,24 +1,13 @@
|
|||||||
version: "3.2"
|
version: "3.2"
|
||||||
|
|
||||||
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
|
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
|
||||||
services:
|
services:
|
||||||
ipfs:
|
ipfs:
|
||||||
image: ipfs/kubo:v0.24.0
|
image: ipfs/kubo:v0.24.0
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- ipfs-import:/import
|
- ./ipfs/import:/import
|
||||||
- ipfs-data:/data/ipfs
|
- ./ipfs/data:/data/ipfs
|
||||||
ports:
|
ports:
|
||||||
- "4001"
|
- "0.0.0.0:8080:8080"
|
||||||
- "8080"
|
- "0.0.0.0:4001:4001"
|
||||||
- "0.0.0.0:5001:5001"
|
- "0.0.0.0:5001:5001"
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "5001"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
ipfs-import:
|
|
||||||
ipfs-data:
|
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
services:
|
|
||||||
laconic-explorer:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: cerc/ping-pub:local
|
|
||||||
environment:
|
|
||||||
- LACONIC_LACONICD_API_URL=${LACONIC_LACONICD_API_URL:-http://localhost:1317}
|
|
||||||
- LACONIC_LACONICD_RPC_URL=${LACONIC_LACONICD_RPC_URL:-http://localhost:26657}
|
|
||||||
- LACONIC_LACONICD_CHAIN_ID=${LACONIC_LACONICD_CHAIN_ID:-chain-id-not-set}
|
|
||||||
ports:
|
|
||||||
- "5173"
|
|
@ -1,83 +0,0 @@
|
|||||||
|
|
||||||
# From: https://raw.githubusercontent.com/blast-io/deployment/master/docker-compose.yml
|
|
||||||
services:
|
|
||||||
# generate jwt.txt if it's absent
|
|
||||||
generate-jwt:
|
|
||||||
image: blastio/openssl
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast:rw
|
|
||||||
command: >
|
|
||||||
sh -c "[ ! -f /blast/jwt.txt ] && openssl rand -hex 32 | tr -d '\n' > /blast/jwt.txt || exit 0"
|
|
||||||
# initialise geth db
|
|
||||||
geth-init:
|
|
||||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast:rw
|
|
||||||
entrypoint: /bin/sh
|
|
||||||
command: >
|
|
||||||
-c "[ ! -d /blast/${GETH_DATA_DIR:-blast-geth-data}/geth ] && /usr/local/bin/geth init --datadir=/blast/${GETH_DATA_DIR:-blast-geth-data} /blast/genesis.json || exit 0"
|
|
||||||
depends_on:
|
|
||||||
generate-jwt:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
env_file:
|
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
|
||||||
blast-geth:
|
|
||||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast
|
|
||||||
ports:
|
|
||||||
- "9545"
|
|
||||||
- "9546"
|
|
||||||
- "6060"
|
|
||||||
command: >
|
|
||||||
--datadir=/blast/${GETH_DATA_DIR:-blast-geth-data}
|
|
||||||
--http
|
|
||||||
--http.corsdomain="*"
|
|
||||||
--http.vhosts="*"
|
|
||||||
--http.addr=0.0.0.0
|
|
||||||
--http.port=9545
|
|
||||||
--http.api=web3,debug,eth,txpool,net,engine
|
|
||||||
--ws
|
|
||||||
--ws.addr=0.0.0.0
|
|
||||||
--ws.port=9546
|
|
||||||
--ws.origins="*"
|
|
||||||
--ws.api=debug,eth,txpool,net,engine
|
|
||||||
--authrpc.addr="0.0.0.0"
|
|
||||||
--authrpc.port="8551"
|
|
||||||
--authrpc.vhosts="*"
|
|
||||||
--authrpc.jwtsecret=/blast/jwt.txt
|
|
||||||
--syncmode=full
|
|
||||||
--metrics
|
|
||||||
--metrics.addr=0.0.0.0
|
|
||||||
--gcmode=archive
|
|
||||||
--nodiscover
|
|
||||||
--maxpeers=0
|
|
||||||
--rollup.disabletxpoolgossip=true
|
|
||||||
env_file:
|
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
|
||||||
depends_on:
|
|
||||||
geth-init:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
op-node:
|
|
||||||
image: blastio/blast-optimism:${NETWORK:-mainnet}
|
|
||||||
volumes:
|
|
||||||
- blast-data:/blast
|
|
||||||
ports:
|
|
||||||
- "9003"
|
|
||||||
- "7300"
|
|
||||||
command: >
|
|
||||||
op-node
|
|
||||||
--l1="https://eth-mainnet-1.vdb.to/"
|
|
||||||
--metrics.enabled
|
|
||||||
--l1.rpckind="any"
|
|
||||||
--l1.trustrpc=true
|
|
||||||
--l2="http://blast-geth:8551"
|
|
||||||
--l2.jwt-secret=/blast/jwt.txt
|
|
||||||
--rollup.config="/blast/rollup.json"
|
|
||||||
depends_on:
|
|
||||||
- blast-geth
|
|
||||||
env_file:
|
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
blast-data:
|
|
@ -1,12 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
mars:
|
|
||||||
image: cerc/mars-v2:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
environment:
|
|
||||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
|
||||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
|
@ -1,20 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
mars:
|
|
||||||
image: cerc/mars:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
environment:
|
|
||||||
- URL_OSMOSIS_GQL=https://osmosis-node.marsprotocol.io/GGSFGSFGFG34/osmosis-hive-front/graphql
|
|
||||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
|
||||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
|
||||||
- URL_NEUTRON_GQL=https://neutron.rpc.p2p.world/qgrnU6PsQZA8F9S5Fb8Fn3tV3kXmMBl2M9bcc9jWLjQy8p/hive/graphql
|
|
||||||
- URL_NEUTRON_REST=https://rest-kralum.neutron-1.neutron.org
|
|
||||||
- URL_NEUTRON_RPC=https://rpc-kralum.neutron-1.neutron.org
|
|
||||||
- URL_NEUTRON_TEST_GQL=https://testnet-neutron-gql.marsprotocol.io/graphql
|
|
||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
node-exporter:
|
|
||||||
image: prom/node-exporter:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
command:
|
|
||||||
- '--web.listen-address=:9100'
|
|
||||||
- '--path.rootfs=/host'
|
|
||||||
- '--collector.systemd'
|
|
||||||
- '--collector.processes'
|
|
||||||
network_mode: host
|
|
||||||
pid: host
|
|
||||||
volumes:
|
|
||||||
- '/:/host:ro,rslave'
|
|
@ -1,22 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
osmosis-front-end:
|
|
||||||
image: cerc/osmosis-front-end-urbit:local
|
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- NEXT_PUBLIC_WEB_API_BASE_URL=${CERC_WEB_API_BASE_URL}
|
|
||||||
- ASSET_LIST_COMMIT_HASH=a326bcefc51372b4912be5a2a2fa84a5d142a438
|
|
||||||
- NEXT_PUBLIC_BASEPATH=/apps/osmosis
|
|
||||||
- NEXT_PUBLIC_URBIT_DEPLOYMENT=true
|
|
||||||
working_dir: /app/packages/web
|
|
||||||
command: ["./build-app-for-urbit.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/build-app-for-urbit.sh:/app/packages/web/build-app-for-urbit.sh
|
|
||||||
- ../config/osmosis/.env.production:/app/packages/web/.env.production
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/osmosis/urbit-files/mar:/app/packages/web/mar
|
|
||||||
- ../config/osmosis/urbit-files/desk.docket-0:/app/packages/web/desk.docket-0
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_app_builds:
|
|
@ -3,34 +3,6 @@ version: "3.2"
|
|||||||
services:
|
services:
|
||||||
osmosis-front-end:
|
osmosis-front-end:
|
||||||
image: cerc/osmosis-front-end:local
|
image: cerc/osmosis-front-end:local
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- NEXT_PUBLIC_WEB_API_BASE_URL=${CERC_WEB_API_BASE_URL}
|
|
||||||
- ASSET_LIST_COMMIT_HASH=a326bcefc51372b4912be5a2a2fa84a5d142a438
|
|
||||||
working_dir: /app/packages/web
|
|
||||||
command: ["./build-app.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/build-app.sh:/app/packages/web/build-app.sh
|
|
||||||
- ../config/osmosis/.env.production:/app/packages/web/.env.production
|
|
||||||
- app_builds:/app-builds
|
|
||||||
|
|
||||||
nginx:
|
|
||||||
image: nginx:1.23-alpine
|
|
||||||
restart: always
|
restart: always
|
||||||
depends_on:
|
|
||||||
osmosis-front-end:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/nginx:/etc/nginx/conf.d
|
|
||||||
- app_builds:/usr/share/nginx
|
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "3002:3002" #TODO make `3000` when using the deployment feature
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "80"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
app_builds:
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
ping-pub:
|
|
||||||
image: cerc/ping-pub:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "5173:5173"
|
|
@ -1,79 +0,0 @@
|
|||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
prometheus:
|
|
||||||
image: prom/prometheus:v2.49.1
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/prometheus:/etc/prometheus
|
|
||||||
- prometheus_data:/prometheus
|
|
||||||
ports:
|
|
||||||
- "9090"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9090"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 10
|
|
||||||
start_period: 3s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
blackbox:
|
|
||||||
image: prom/blackbox-exporter:latest
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/blackbox.yml:/etc/blackbox_exporter/config.yml
|
|
||||||
ports:
|
|
||||||
- '9115'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
ethereum-chain-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT:-https://mainnet.infura.io/v3}
|
|
||||||
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
filecoin-chain-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT:-https://api.node.glif.io/rpc/v1}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
graph-node-upstream-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${GRAPH_NODE_RPC_ENDPOINT}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
postgres-exporter:
|
|
||||||
image: quay.io/prometheuscommunity/postgres-exporter
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/postgres-exporter.yml:/postgres_exporter.yml
|
|
||||||
ports:
|
|
||||||
- '9187'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
prometheus_data:
|
|
@ -1,22 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
proxy-server:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: on-failure
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ENABLE_PROXY: ${CERC_ENABLE_PROXY:-true}
|
|
||||||
PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM}
|
|
||||||
PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER}
|
|
||||||
command: ["sh", "-c", "./run.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/proxy-server/run.sh:/app/packages/cli/run.sh
|
|
||||||
ports:
|
|
||||||
- "4000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "4000"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
@ -1,20 +0,0 @@
|
|||||||
services:
|
|
||||||
|
|
||||||
database:
|
|
||||||
image: cerc/test-database-container:local
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- db-data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "test-user"
|
|
||||||
POSTGRES_DB: "test-db"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
|
||||||
ports:
|
|
||||||
- "5432"
|
|
||||||
|
|
||||||
test-client:
|
|
||||||
image: cerc/test-database-client:local
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
db-data:
|
|
@ -5,16 +5,10 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
|
||||||
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
|
||||||
volumes:
|
volumes:
|
||||||
- test-data-bind:/data
|
- test-data:/data
|
||||||
- test-data-auto:/data2
|
|
||||||
- test-config:/config:ro
|
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "80"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
test-data-bind:
|
test-data:
|
||||||
test-data-auto:
|
|
||||||
test-config:
|
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
uniswap-interface:
|
|
||||||
image: cerc/uniswap-interface:local
|
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- REACT_APP_INFURA_KEY=${CERC_INFURA_KEY}
|
|
||||||
- REACT_APP_AWS_API_ENDPOINT=${CERC_UNISWAP_GQL}
|
|
||||||
command: ["./build-app.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/uniswap-interface/build-app.sh:/app/build-app.sh
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/uniswap-interface/urbit-files/mar:/app/mar
|
|
||||||
- ../config/uniswap-interface/urbit-files/desk.docket-0:/app/desk.docket-0
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_app_builds:
|
|
@ -1,78 +0,0 @@
|
|||||||
version: '3.2'
|
|
||||||
|
|
||||||
services:
|
|
||||||
ajna-watcher-db:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: postgres:14-alpine
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=vdbm
|
|
||||||
- POSTGRES_MULTIPLE_DATABASES=ajna-watcher,ajna-watcher-job-queue
|
|
||||||
- POSTGRES_EXTENSION=ajna-watcher-job-queue:pgcrypto
|
|
||||||
- POSTGRES_PASSWORD=password
|
|
||||||
volumes:
|
|
||||||
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
|
||||||
- ajna_watcher_db_data:/var/lib/postgresql/data
|
|
||||||
ports:
|
|
||||||
- "5432"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "5432"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
ajna-watcher-job-runner:
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
ajna-watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
image: cerc/watcher-ajna:local
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-ajna/start-job-runner.sh:/app/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
ajna-watcher-server:
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
ajna-watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
ajna-watcher-job-runner:
|
|
||||||
condition: service_healthy
|
|
||||||
image: cerc/watcher-ajna:local
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
command: ["bash", "./start-server.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
|
||||||
- ajna_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
|
||||||
- "3008"
|
|
||||||
- "9001"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
ajna_watcher_db_data:
|
|
||||||
ajna_watcher_gql_logs_data:
|
|
@ -10,7 +10,6 @@ services:
|
|||||||
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
|
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
|
||||||
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
|
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
|
||||||
- POSTGRES_PASSWORD=password
|
- POSTGRES_PASSWORD=password
|
||||||
command: ["postgres", "-c", "max_connections=200"]
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
||||||
- watcher_db_data:/var/lib/postgresql/data
|
- watcher_db_data:/var/lib/postgresql/data
|
||||||
@ -23,38 +22,6 @@ services:
|
|||||||
retries: 15
|
retries: 15
|
||||||
start_period: 10s
|
start_period: 10s
|
||||||
|
|
||||||
# Starts the azimuth-watcher job runner
|
|
||||||
azimuth-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CERC_HISTORICAL_BLOCK_RANGE: 500
|
|
||||||
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
|
||||||
CONTRACT_NAME: Azimuth
|
|
||||||
STARTING_BLOCK: 6784880
|
|
||||||
working_dir: /app/packages/azimuth-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/azimuth-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the azimuth-watcher server
|
# Starts the azimuth-watcher server
|
||||||
azimuth-watcher-server:
|
azimuth-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -62,55 +29,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
azimuth-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/azimuth-watcher
|
working_dir: /app/packages/azimuth-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
|
||||||
- azimuth_watcher_gql_logs_data:/app/packages/azimuth-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3001"
|
- "3001"
|
||||||
- "9001"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3001"]
|
test: ["CMD", "nc", "-vz", "localhost", "3001"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the censures-watcher job runner
|
|
||||||
censures-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
|
||||||
CONTRACT_NAME: Censures
|
|
||||||
STARTING_BLOCK: 6784954
|
|
||||||
working_dir: /app/packages/censures-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/censures-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9002"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9002"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -125,55 +59,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
censures-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/censures-watcher
|
working_dir: /app/packages/censures-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
|
||||||
- censures_watcher_gql_logs_data:/app/packages/censures-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3002"
|
- "3002"
|
||||||
- "9003"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3002"]
|
test: ["CMD", "nc", "-vz", "localhost", "3002"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the claims-watcher job runner
|
|
||||||
claims-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
|
||||||
CONTRACT_NAME: Claims
|
|
||||||
STARTING_BLOCK: 6784941
|
|
||||||
working_dir: /app/packages/claims-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/claims-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9004"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9004"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -188,55 +89,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
claims-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/claims-watcher
|
working_dir: /app/packages/claims-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
|
||||||
- claims_watcher_gql_logs_data:/app/packages/claims-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3003"
|
- "3003"
|
||||||
- "9005"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3003"]
|
test: ["CMD", "nc", "-vz", "localhost", "3003"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the conditional-star-release-watcher job runner
|
|
||||||
conditional-star-release-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
|
||||||
CONTRACT_NAME: ConditionalStarRelease
|
|
||||||
STARTING_BLOCK: 6828004
|
|
||||||
working_dir: /app/packages/conditional-star-release-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/conditional-star-release-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9006"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9006"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -251,55 +119,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
conditional-star-release-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/conditional-star-release-watcher
|
working_dir: /app/packages/conditional-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
|
||||||
- conditional_star_release_watcher_gql_logs_data:/app/packages/conditional-star-release-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3004"
|
- "3004"
|
||||||
- "9007"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3004"]
|
test: ["CMD", "nc", "-vz", "localhost", "3004"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the delegated-sending-watcher job runner
|
|
||||||
delegated-sending-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
|
||||||
CONTRACT_NAME: DelegatedSending
|
|
||||||
STARTING_BLOCK: 6784956
|
|
||||||
working_dir: /app/packages/delegated-sending-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/delegated-sending-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9008"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9008"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -314,55 +149,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
delegated-sending-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/delegated-sending-watcher
|
working_dir: /app/packages/delegated-sending-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
|
||||||
- delegated_sending_watcher_gql_logs_data:/app/packages/delegated-sending-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3005"
|
- "3005"
|
||||||
- "9009"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3005"]
|
test: ["CMD", "nc", "-vz", "localhost", "3005"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the ecliptic-watcher job runner
|
|
||||||
ecliptic-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
|
||||||
CONTRACT_NAME: Ecliptic
|
|
||||||
STARTING_BLOCK: 13692129
|
|
||||||
working_dir: /app/packages/ecliptic-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/ecliptic-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9010"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9010"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -377,55 +179,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
ecliptic-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/ecliptic-watcher
|
working_dir: /app/packages/ecliptic-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
|
||||||
- ecliptic_watcher_gql_logs_data:/app/packages/ecliptic-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3006"
|
- "3006"
|
||||||
- "9011"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3006"]
|
test: ["CMD", "nc", "-vz", "localhost", "3006"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the linear-star-release-watcher job runner
|
|
||||||
linear-star-release-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
|
||||||
CONTRACT_NAME: LinearStarRelease
|
|
||||||
STARTING_BLOCK: 6784943
|
|
||||||
working_dir: /app/packages/linear-star-release-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/linear-star-release-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9012"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9012"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -440,55 +209,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
linear-star-release-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/linear-star-release-watcher
|
working_dir: /app/packages/linear-star-release-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
|
||||||
- linear_star_release_watcher_gql_logs_data:/app/packages/linear-star-release-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3007"
|
- "3007"
|
||||||
- "9013"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3007"]
|
test: ["CMD", "nc", "-vz", "localhost", "3007"]
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the polls-watcher job runner
|
|
||||||
polls-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
|
||||||
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
|
||||||
CONTRACT_NAME: Polls
|
|
||||||
STARTING_BLOCK: 6784912
|
|
||||||
working_dir: /app/packages/polls-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/polls-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9014"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9014"]
|
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -503,24 +239,22 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
polls-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
CERC_IPLD_ETH_GQL_ENDPOINT: ${CERC_IPLD_ETH_GQL_ENDPOINT}
|
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
||||||
working_dir: /app/packages/polls-watcher
|
working_dir: /app/packages/polls-watcher
|
||||||
command: "./start-server.sh"
|
command: "./start-server.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
||||||
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
|
||||||
- polls_watcher_gql_logs_data:/app/packages/polls-watcher/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "3008"
|
- "3008"
|
||||||
- "9015"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-vz", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -558,7 +292,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "0.0.0.0:4000:4000"
|
- "0.0.0.0:4000:4000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "4000"]
|
test: ["CMD", "nc", "-vz", "localhost", "4000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -568,11 +302,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
watcher_db_data:
|
watcher_db_data:
|
||||||
azimuth_watcher_gql_logs_data:
|
|
||||||
censures_watcher_gql_logs_data:
|
|
||||||
claims_watcher_gql_logs_data:
|
|
||||||
conditional_star_release_watcher_gql_logs_data:
|
|
||||||
delegated_sending_watcher_gql_logs_data:
|
|
||||||
ecliptic_watcher_gql_logs_data:
|
|
||||||
linear_star_release_watcher_gql_logs_data:
|
|
||||||
polls_watcher_gql_logs_data:
|
|
||||||
|
@ -29,15 +29,15 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
||||||
ports:
|
ports:
|
||||||
- "9002:9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,17 +55,17 @@ services:
|
|||||||
image: cerc/watcher-merkl-sushiswap-v3:local
|
image: cerc/watcher-merkl-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
- merkl_sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3007:3008"
|
- "127.0.0.1:3007:3008"
|
||||||
- "9003:9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -75,4 +75,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
merkl_sushiswap_v3_watcher_db_data:
|
merkl_sushiswap_v3_watcher_db_data:
|
||||||
merkl_sushiswap_v3_watcher_gql_logs_data:
|
|
||||||
|
@ -29,15 +29,15 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
command: ["bash", "./start-job-runner.sh"]
|
command: ["bash", "./start-job-runner.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
- ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -55,17 +55,17 @@ services:
|
|||||||
image: cerc/watcher-sushiswap-v3:local
|
image: cerc/watcher-sushiswap-v3:local
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINTS: ${CERC_ETH_RPC_ENDPOINTS}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
- sushiswap_v3_watcher_gql_logs_data:/app/gql-logs
|
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3008:3008"
|
- "127.0.0.1:3008:3008"
|
||||||
- "9001:9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "127.0.0.1", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 15
|
retries: 15
|
||||||
@ -75,4 +75,3 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
sushiswap_v3_watcher_db_data:
|
sushiswap_v3_watcher_db_data:
|
||||||
sushiswap_v3_watcher_gql_logs_data:
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
services:
|
|
||||||
webapp:
|
|
||||||
image: cerc/webapp-container:local
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
ports:
|
|
||||||
- "80"
|
|
@ -1,2 +0,0 @@
|
|||||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
|
||||||
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
|
@ -1,57 +0,0 @@
|
|||||||
{
|
|
||||||
"config": {
|
|
||||||
"chainId": 608943043,
|
|
||||||
"homesteadBlock": 0,
|
|
||||||
"eip150Block": 0,
|
|
||||||
"eip155Block": 0,
|
|
||||||
"eip158Block": 0,
|
|
||||||
"byzantiumBlock": 0,
|
|
||||||
"constantinopleBlock": 0,
|
|
||||||
"petersburgBlock": 0,
|
|
||||||
"istanbulBlock": 0,
|
|
||||||
"muirGlacierBlock": 0,
|
|
||||||
"berlinBlock": 0,
|
|
||||||
"londonBlock": 0,
|
|
||||||
"arrowGlacierBlock": 0,
|
|
||||||
"grayGlacierBlock": 0,
|
|
||||||
"mergeNetsplitBlock": 0,
|
|
||||||
"shanghaiTime": 0,
|
|
||||||
"bedrockBlock": 0,
|
|
||||||
"regolithTime": 0,
|
|
||||||
"canyonTime": 0,
|
|
||||||
"terminalTotalDifficulty": 0,
|
|
||||||
"terminalTotalDifficultyPassed": true,
|
|
||||||
"optimism": {
|
|
||||||
"eip1559Elasticity": 6,
|
|
||||||
"eip1559Denominator": 50,
|
|
||||||
"eip1559DenominatorCanyon": 250
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"alloc": {
|
|
||||||
"0000000000000000000000000000000000000000": {
|
|
||||||
"balance": "0x1"
|
|
||||||
},
|
|
||||||
"4200000000000000000000000000000000000000": {
|
|
||||||
"code": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100ae5780638f283970146100db578063f851a440146100fb5761005d565b3661005d5761005b610110565b005b61005b610110565b34801561007157600080fd5b5061005b610080366004610521565b6101c8565b61009861009336600461053c565b61020e565b6040516100a591906105bf565b60405180910390f35b3480156100ba57600080fd5b506100c361033e565b6040516001600160a01b0390911681526020016100a5565b3480156100e757600080fd5b5061005b6100f6366004610521565b6103a9565b34801561010757600080fd5b506100c36103e4565b600061013a7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5490565b90506001600160a01b0381166101a55760405162461bcd60e51b815260206004820152602560248201527f50726f78793a20696d706c656d656e746174696f6e206e6f7420696e697469616044820152641b1a5e995960da1b60648201526084015b60405180910390fd5b3660008037600080366000845af43d6000803e806101c2573d6000fd5b503d6000f35b600080516020610625833981519152546001600160a01b0316336001600160a01b031614806101f5575033155b156102065761020381610432565b50565b610203610110565b60606102266000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610243575033155b1561032f5761025184610432565b600080856001600160a01b0316858560405161026e929190610614565b600060405180830381855af49150503d80600081146102a9576040519150601f19603f3d011682016040523d82523d6000602084013e6102ae565b606091505b5091509150816103265760405162461bcd60e51b815260206004820152603960248201527f50726f78793a2064656c656761746563616c6c20746f206e657720696d706c6560448201527f6d656e746174696f6e20636f6e7472616374206661696c656400000000000000606482015260840161019c565b91506103379050565b610337610110565b9392505050565b60006103566000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610373575033155b1561039e57507f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5490565b6103a6610110565b90565b600080516020610625833981519152546001600160a01b0316336001600160a01b031614806103d6575033155b15610206576102038161048e565b60006103fc6000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610419575033155b1561039e57506000805160206106258339815191525490565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc8181556040516001600160a01b038316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a25050565b60006104a66000805160206106258339815191525490565b600080516020610625833981519152838155604080516001600160a01b0380851682528616602082015292935090917f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f910160405180910390a1505050565b80356001600160a01b038116811461051c57600080fd5b919050565b60006020828403121561053357600080fd5b61033782610505565b60008060006040848603121561055157600080fd5b61055a84610505565b9250602084013567ffffffffffffffff8082111561057757600080fd5b818601915086601f83011261058b57600080fd5b81358181111561059a57600080fd5b8760208285010111156105ac57600080fd5b6020830194508093505050509250925092565b600060208083528351808285015260005b818110156105ec578581018301518582016040015282016105d0565b818111156105fe576000604083870101525b50601f01601f1916929092016040019392505050565b818382376000910190815291905056feb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103a164736f6c634300080f000a",
|
|
||||||
"storage": {
|
|
||||||
"0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30000",
|
|
||||||
"0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000004200000000000000000000000000000000000018"
|
|
||||||
},
|
|
||||||
"balance": "0x0",
|
|
||||||
"flags": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nonce": "0x0",
|
|
||||||
"timestamp": "0x659b7460",
|
|
||||||
"extraData": "0x424544524f434b",
|
|
||||||
"gasLimit": "0x1c9c380",
|
|
||||||
"difficulty": "0x0",
|
|
||||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
"coinbase": "0x4200000000000000000000000000000000000011",
|
|
||||||
"number": "0x0",
|
|
||||||
"gasUsed": "0x0",
|
|
||||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
"baseFeePerGas": "0x3b9aca00",
|
|
||||||
"excessBlobGas": null,
|
|
||||||
"blobGasUsed": null
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis": {
|
|
||||||
"l1": {
|
|
||||||
"hash": "0x17728cf4d8e0b4f292d2390a869fd7c632d39e72efb00ca3462b4387c6aa2437",
|
|
||||||
"number": 5044255
|
|
||||||
},
|
|
||||||
"l2": {
|
|
||||||
"hash": "0x26a1c0faad7b041f34569a1bb383f00ab74b335883a44bed53e9f41ced5fd906",
|
|
||||||
"number": 0
|
|
||||||
},
|
|
||||||
"l2_time": 1704686688,
|
|
||||||
"system_config": {
|
|
||||||
"batcherAddr": "0xba26fee2fa917443e05e65de8d4350bcd2f59222",
|
|
||||||
"overhead": "0x00000000000000000000000000000000000000000000000000000000000000bc",
|
|
||||||
"scalar": "0x00000000000000000000000000000000000000000000000000000000000a6fe0",
|
|
||||||
"gasLimit": 30000000
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"block_time": 2,
|
|
||||||
"max_sequencer_drift": 600,
|
|
||||||
"seq_window_size": 3600,
|
|
||||||
"channel_timeout": 300,
|
|
||||||
"l1_chain_id": 11155111,
|
|
||||||
"l2_chain_id": 608943043,
|
|
||||||
"regolith_time": 0,
|
|
||||||
"canyon_time": 0,
|
|
||||||
"batch_inbox_address": "0x1c3b85a2108784eab6a4bf56cdd6f722e415b331",
|
|
||||||
"deposit_contract_address": "0x2757e4430e694f27b73ec9c02257cab3a498c8c5",
|
|
||||||
"l1_system_config_address": "0x329faf078c364a316e08bf6a17b7eee6ae75a613",
|
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
|
||||||
}
|
|
@ -23,6 +23,3 @@ CERC_STATEDIFF_WORKERS=2
|
|||||||
|
|
||||||
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
|
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
|
||||||
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
|
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
|
||||||
|
|
||||||
# Used by Lighthouse
|
|
||||||
SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-3}
|
|
||||||
|
@ -8,109 +8,111 @@ KEY="mykey"
|
|||||||
CHAINID="laconic_9000-1"
|
CHAINID="laconic_9000-1"
|
||||||
MONIKER="localtestnet"
|
MONIKER="localtestnet"
|
||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="eth_secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="info"
|
||||||
DENOM="alnt"
|
# trace evm
|
||||||
|
TRACE="--trace"
|
||||||
|
# TRACE=""
|
||||||
|
|
||||||
|
# validate dependencies are installed
|
||||||
|
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
# remove existing daemon and client
|
||||||
# validate dependencies are installed
|
rm -rf ~/.laconic*
|
||||||
command -v jq > /dev/null 2>&1 || {
|
|
||||||
echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# remove existing daemon and client
|
make install
|
||||||
rm -rf $HOME/.laconicd/*
|
|
||||||
|
|
||||||
if [ -n "`which make`" ]; then
|
laconicd config keyring-backend $KEYRING
|
||||||
make install
|
laconicd config chain-id $CHAINID
|
||||||
fi
|
|
||||||
|
|
||||||
laconicd config set client chain-id $CHAINID
|
# if $KEY exists it should be deleted
|
||||||
laconicd config set client keyring-backend $KEYRING
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# if $KEY exists it should be deleted
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd init $MONIKER --chain-id $CHAINID
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Change parameter token denominations to aphoton
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
# Custom modules
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
update_genesis() {
|
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
|
||||||
mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
|
|
||||||
echo "Setting timers for expiry tests."
|
echo "Setting timers for expiry tests."
|
||||||
|
|
||||||
update_genesis '.app_state["registry"]["params"]["record_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
|
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
|
||||||
echo "Enabling auction and setting timers."
|
echo "Enabling auction and setting timers."
|
||||||
|
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_enabled"]=true'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="300s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
update_genesis '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"'
|
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$ONBOARDING_ENABLED" == "true" ]]; then
|
# increase block time (?)
|
||||||
echo "Enabling validator onboarding."
|
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
update_genesis '.app_state["onboarding"]["params"]["onboarding_enabled"]=true'
|
# Set gas limit in genesis
|
||||||
fi
|
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
||||||
|
|
||||||
# increase block time (?)
|
# disable produce empty block
|
||||||
update_genesis '.consensus["params"]["block"]["time_iota_ms"]="1000"'
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
|
||||||
# Set gas limit in genesis
|
|
||||||
update_genesis '.consensus["params"]["block"]["max_gas"]="10000000"'
|
|
||||||
|
|
||||||
# disable produce empty block
|
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
||||||
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
||||||
else
|
else
|
||||||
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Enable telemetry (prometheus metrics: http://localhost:1317/metrics?format=prometheus)
|
if [[ $1 == "pending" ]]; then
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
sed -i '' 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
sed -i '' 's/prometheus-retention-time = 0/prometheus-retention-time = 60/g' $HOME/.laconicd/config/app.toml
|
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
sed -i '' 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
else
|
else
|
||||||
sed -i 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
sed -i 's/prometheus-retention-time = 0/prometheus-retention-time = 60/g' $HOME/.laconicd/config/app.toml
|
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
||||||
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
# 10^30 alnt | 10^12 lnt
|
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
|
||||||
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
# 10^24 alnt | 10^6 lnt
|
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd collect-gentxs
|
||||||
|
|
||||||
# Run this to ensure everything worked and that the genesis file is setup correctly
|
# Run this to ensure everything worked and that the genesis file is setup correctly
|
||||||
laconicd genesis validate
|
laconicd validate-genesis
|
||||||
else
|
|
||||||
echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'"
|
if [[ $1 == "pending" ]]; then
|
||||||
|
echo "pending mode is on, please wait for the first block committed."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
||||||
laconicd start \
|
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground
|
||||||
--pruning=nothing \
|
|
||||||
--log_level $LOGLEVEL \
|
|
||||||
--minimum-gas-prices=1$DENOM \
|
|
||||||
--api.enable \
|
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
|
||||||
--gql-server --gql-playground
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
services:
|
services:
|
||||||
registry:
|
cns:
|
||||||
rpcEndpoint: 'http://laconicd:26657'
|
restEndpoint: 'http://laconicd:1317'
|
||||||
gqlEndpoint: 'http://laconicd:9473/api'
|
gqlEndpoint: 'http://laconicd:9473/api'
|
||||||
userKey: REPLACE_WITH_MYKEY
|
userKey: REPLACE_WITH_MYKEY
|
||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 2000000alnt
|
fees: 200000aphoton
|
||||||
|
37
stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh
Executable file
37
stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
|
# Check existing config if it exists
|
||||||
|
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
|
||||||
|
echo "Found existing L2 config, cross-checking with L1 deployment config"
|
||||||
|
|
||||||
|
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
|
||||||
|
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
|
||||||
|
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
|
||||||
|
|
||||||
|
GEN_L2_CONF=$(cat /app/rollup.json)
|
||||||
|
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
|
||||||
|
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
|
||||||
|
|
||||||
|
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
|
||||||
|
echo "Config cross-checked, exiting"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
op-node genesis l2 \
|
||||||
|
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
|
||||||
|
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
|
||||||
|
--outfile.l2 /app/genesis.json \
|
||||||
|
--outfile.rollup /app/rollup.json \
|
||||||
|
--l1-rpc $CERC_L1_RPC
|
||||||
|
|
||||||
|
openssl rand -hex 32 > /app/jwt.txt
|
@ -1,172 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
|
||||||
|
|
||||||
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
|
|
||||||
|
|
||||||
export DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
# Optional create2 salt for deterministic deployment of contract implementations
|
|
||||||
export IMPL_SALT=$(openssl rand -hex 32)
|
|
||||||
|
|
||||||
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
|
|
||||||
|
|
||||||
# Exit if a deployment already exists (on restarts)
|
|
||||||
if [ -d "/l1-deployment/$DEPLOYMENT_CONTEXT" ]; then
|
|
||||||
echo "Deployment directory /l1-deployment/$DEPLOYMENT_CONTEXT, checking OptimismPortal deployment"
|
|
||||||
|
|
||||||
OPTIMISM_PORTAL_ADDRESS=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/OptimismPortal.json | jq -r .address)
|
|
||||||
contract_code=$(cast code $OPTIMISM_PORTAL_ADDRESS --rpc-url $CERC_L1_RPC)
|
|
||||||
|
|
||||||
if [ -z "${contract_code#0x}" ]; then
|
|
||||||
echo "Error: A deployment directory was found in the volume, but no contract code was found on-chain at the associated address. Please clear L1 deployment volume before restarting."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Deployment found, exiting (successfully)."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
wait_for_block() {
|
|
||||||
local block="$1" # Block to wait for
|
|
||||||
local timeout="$2" # Max time to wait in seconds
|
|
||||||
|
|
||||||
echo "Waiting for block $block."
|
|
||||||
i=0
|
|
||||||
loops=$(($timeout/10))
|
|
||||||
while [ -z "$block_result" ] && [[ "$i" -lt "$loops" ]]; do
|
|
||||||
sleep 10
|
|
||||||
echo "Checking..."
|
|
||||||
block_result=$(cast block $block --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
i=$(($i + 1))
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# We need four accounts and their private keys for the deployment: Admin, Proposer, Batcher, and Sequencer
|
|
||||||
# If $CERC_L1_ADDRESS and $CERC_L1_PRIV_KEY have been set, we'll assign it to Admin and generate/fund the remaining three accounts from it
|
|
||||||
# If not, we'll assume the L1 is the stack's own fixturenet-eth and use the pre-funded accounts/keys from $CERC_L1_ACCOUNTS_CSV_URL
|
|
||||||
if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then
|
|
||||||
wallet1=$(cast wallet new)
|
|
||||||
wallet2=$(cast wallet new)
|
|
||||||
wallet3=$(cast wallet new)
|
|
||||||
# Admin
|
|
||||||
ADMIN=$CERC_L1_ADDRESS
|
|
||||||
ADMIN_KEY=$CERC_L1_PRIV_KEY
|
|
||||||
# Proposer
|
|
||||||
PROPOSER=$(echo "$wallet1" | awk '/Address:/{print $2}')
|
|
||||||
PROPOSER_KEY=$(echo "$wallet1" | awk '/Private key:/{print $3}')
|
|
||||||
# Batcher
|
|
||||||
BATCHER=$(echo "$wallet2" | awk '/Address:/{print $2}')
|
|
||||||
BATCHER_KEY=$(echo "$wallet2" | awk '/Private key:/{print $3}')
|
|
||||||
# Sequencer
|
|
||||||
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
|
||||||
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
|
||||||
|
|
||||||
echo "Funding accounts."
|
|
||||||
wait_for_block 1 300
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 10ether $BATCHER --private-key $ADMIN_KEY
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 2ether $SEQ --private-key $ADMIN_KEY
|
|
||||||
else
|
|
||||||
curl -o accounts.csv $CERC_L1_ACCOUNTS_CSV_URL
|
|
||||||
# Admin
|
|
||||||
ADMIN=$(awk -F ',' 'NR == 1 {print $2}' accounts.csv)
|
|
||||||
ADMIN_KEY=$(awk -F ',' 'NR == 1 {print $3}' accounts.csv)
|
|
||||||
# Proposer
|
|
||||||
PROPOSER=$(awk -F ',' 'NR == 2 {print $2}' accounts.csv)
|
|
||||||
PROPOSER_KEY=$(awk -F ',' 'NR == 2 {print $3}' accounts.csv)
|
|
||||||
# Batcher
|
|
||||||
BATCHER=$(awk -F ',' 'NR == 3 {print $2}' accounts.csv)
|
|
||||||
BATCHER_KEY=$(awk -F ',' 'NR == 3 {print $3}' accounts.csv)
|
|
||||||
# Sequencer
|
|
||||||
SEQ=$(awk -F ',' 'NR == 4 {print $2}' accounts.csv)
|
|
||||||
SEQ_KEY=$(awk -F ',' 'NR == 4 {print $3}' accounts.csv)
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using accounts:"
|
|
||||||
echo -e "Admin: $ADMIN\nProposer: $PROPOSER\nBatcher: $BATCHER\nSequencer: $SEQ"
|
|
||||||
|
|
||||||
# These accounts will be needed by other containers, so write them to a shared volume
|
|
||||||
echo "Writing accounts/private keys to volume l2_accounts."
|
|
||||||
accounts_json=$(jq -n \
|
|
||||||
--arg Admin "$ADMIN" --arg AdminKey "$ADMIN_KEY" \
|
|
||||||
--arg Proposer "$PROPOSER" --arg ProposerKey "$PROPOSER_KEY" \
|
|
||||||
--arg Batcher "$BATCHER" --arg BatcherKey "$BATCHER_KEY" \
|
|
||||||
--arg Seq "$SEQ" --arg SeqKey "$SEQ_KEY" \
|
|
||||||
'{Admin: $Admin, AdminKey: $AdminKey, Proposer: $Proposer, ProposerKey: $ProposerKey, Batcher: $Batcher, BatcherKey: $BatcherKey, Seq: $Seq, SeqKey: $SeqKey}')
|
|
||||||
echo "$accounts_json" > "/l2-accounts/accounts.json"
|
|
||||||
|
|
||||||
# Get a finalized L1 block to set as the starting point for the L2 deployment
|
|
||||||
# If the chain is a freshly created fixturenet-eth, a finalized block won't be available for many minutes; rather than wait, we can use block 1
|
|
||||||
echo "Checking L1 for finalized block..."
|
|
||||||
finalized=$(cast block finalized --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
|
|
||||||
if [ -n "$finalized" ]; then
|
|
||||||
# finalized block was found
|
|
||||||
start_block=$finalized
|
|
||||||
else
|
|
||||||
# assume fresh chain and use block 1 instead
|
|
||||||
echo "No finalized block. Using block 1 instead."
|
|
||||||
# wait for 20 or so blocks to be safe
|
|
||||||
wait_for_block 24 300
|
|
||||||
start_block=$(cast block 1 --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$start_block" ]; then
|
|
||||||
echo "Unable to query chain for starting block. Exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
BLOCKHASH=$(echo $start_block | awk -F ' ' '{print $2}')
|
|
||||||
HEIGHT=$(echo $start_block | awk -F ' ' '{print $4}')
|
|
||||||
TIMESTAMP=$(echo $start_block | awk -F ' ' '{print $6}')
|
|
||||||
|
|
||||||
echo "Using block as deployment point:"
|
|
||||||
echo "Height: $HEIGHT"
|
|
||||||
echo "Hash: $BLOCKHASH"
|
|
||||||
echo "Timestamp: $TIMESTAMP"
|
|
||||||
|
|
||||||
# Fill out the deployment template (./deploy-config/getting-started.json) with our values:
|
|
||||||
echo "Writing deployment config."
|
|
||||||
deploy_config_file="deploy-config/$DEPLOYMENT_CONTEXT.json"
|
|
||||||
cp deploy-config/getting-started.json $deploy_config_file
|
|
||||||
sed -i "s/\"l1ChainID\": .*/\"l1ChainID\": $DEPLOYMENT_CONTEXT,/g" $deploy_config_file
|
|
||||||
sed -i "s/ADMIN/$ADMIN/g" $deploy_config_file
|
|
||||||
sed -i "s/PROPOSER/$PROPOSER/g" $deploy_config_file
|
|
||||||
sed -i "s/BATCHER/$BATCHER/g" $deploy_config_file
|
|
||||||
sed -i "s/SEQUENCER/$SEQ/g" $deploy_config_file
|
|
||||||
sed -i "s/BLOCKHASH/$BLOCKHASH/g" $deploy_config_file
|
|
||||||
sed -i "s/TIMESTAMP/$TIMESTAMP/g" $deploy_config_file
|
|
||||||
|
|
||||||
mkdir -p deployments/$DEPLOYMENT_CONTEXT
|
|
||||||
|
|
||||||
# Deployment requires the create2 deterministic proxy contract be published on L1 at address 0x4e59b44847b379578588920ca78fbf26c0b4956c
|
|
||||||
# See: https://github.com/Arachnid/deterministic-deployment-proxy
|
|
||||||
echo "Deploying create2 proxy contract..."
|
|
||||||
echo "Funding deployment signer address"
|
|
||||||
deployment_signer="0x3fab184622dc19b6109349b94811493bf2a45362"
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 0.5ether $deployment_signer --private-key $ADMIN_KEY
|
|
||||||
echo "Deploying contract..."
|
|
||||||
raw_bytes="0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222"
|
|
||||||
|
|
||||||
cast publish --rpc-url $CERC_L1_RPC $raw_bytes
|
|
||||||
|
|
||||||
# Create the L2 deployment
|
|
||||||
echo "Deploying L1 Optimism contracts..."
|
|
||||||
forge script scripts/Deploy.s.sol:Deploy --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC
|
|
||||||
forge script scripts/Deploy.s.sol:Deploy --sig 'sync()' --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC
|
|
||||||
|
|
||||||
echo "*************************************"
|
|
||||||
echo "Done deploying contracts."
|
|
||||||
|
|
||||||
# Copy files needed by other containers to the appropriate shared volumes
|
|
||||||
echo "Copying deployment artifacts volume l1_deployment and deploy-config to volume l2_config"
|
|
||||||
cp -a /app/packages/contracts-bedrock/deployments/$DEPLOYMENT_CONTEXT /l1-deployment
|
|
||||||
cp /app/packages/contracts-bedrock/deploy-config/$DEPLOYMENT_CONTEXT.json /l2-config
|
|
||||||
openssl rand -hex 32 > /l2-config/l2-jwt.txt
|
|
||||||
|
|
||||||
echo "Deployment successful. Exiting"
|
|
131
stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh
Executable file
131
stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh
Executable file
@ -0,0 +1,131 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
||||||
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
|
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
|
||||||
|
|
||||||
|
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
|
||||||
|
|
||||||
|
IMPORT_1="import './verify-contract-deployment'"
|
||||||
|
IMPORT_2="import './rekey-json'"
|
||||||
|
IMPORT_3="import './send-balance'"
|
||||||
|
|
||||||
|
# Append mounted tasks to tasks/index.ts file if not present
|
||||||
|
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
|
||||||
|
echo "$IMPORT_1" >> tasks/index.ts
|
||||||
|
echo "$IMPORT_2" >> tasks/index.ts
|
||||||
|
echo "$IMPORT_3" >> tasks/index.ts
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update the chainId in the hardhat config
|
||||||
|
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
|
||||||
|
|
||||||
|
# Exit if a deployment already exists (on restarts)
|
||||||
|
# Note: fixturenet-eth-geth currently starts fresh on a restart
|
||||||
|
if [ -d "deployments/getting-started" ]; then
|
||||||
|
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
|
||||||
|
|
||||||
|
# Read JSON file into variable
|
||||||
|
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
|
||||||
|
|
||||||
|
# Parse JSON into variables
|
||||||
|
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
|
||||||
|
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
|
||||||
|
|
||||||
|
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
|
||||||
|
echo "Deployment verfication successful, exiting"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate the L2 account addresses
|
||||||
|
yarn hardhat rekey-json --output /l2-accounts/keys.json
|
||||||
|
|
||||||
|
# Read JSON file into variable
|
||||||
|
KEYS_JSON=$(cat /l2-accounts/keys.json)
|
||||||
|
|
||||||
|
# Parse JSON into variables
|
||||||
|
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
|
||||||
|
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
|
||||||
|
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
|
||||||
|
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
|
||||||
|
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
|
||||||
|
|
||||||
|
# Get the private keys of L1 accounts
|
||||||
|
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
|
||||||
|
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
|
||||||
|
[ "$l1_accounts_response" -eq 200 ];
|
||||||
|
then
|
||||||
|
echo "Fetching L1 account credentials using provided URL"
|
||||||
|
mkdir -p /geth-accounts
|
||||||
|
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
|
||||||
|
|
||||||
|
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
|
||||||
|
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
|
||||||
|
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
|
||||||
|
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
|
||||||
|
else
|
||||||
|
echo "Couldn't fetch L1 account credentials, using them from env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Send balances to the above L2 addresses
|
||||||
|
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
|
||||||
|
echo "Balances sent to L2 accounts"
|
||||||
|
|
||||||
|
# Select a finalized L1 block as the starting point for roll ups
|
||||||
|
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
|
||||||
|
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
|
||||||
|
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
|
||||||
|
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
|
||||||
|
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
|
||||||
|
|
||||||
|
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
|
||||||
|
|
||||||
|
# Update the deployment config
|
||||||
|
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
|
||||||
|
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
|
||||||
|
|
||||||
|
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
|
||||||
|
|
||||||
|
echo "Updated the deployment config"
|
||||||
|
|
||||||
|
# Create a .env file
|
||||||
|
echo "L1_RPC=$CERC_L1_RPC" > .env
|
||||||
|
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
|
||||||
|
|
||||||
|
echo "Deploying the L1 smart contracts, this will take a while..."
|
||||||
|
|
||||||
|
# Deploy the L1 smart contracts
|
||||||
|
yarn hardhat deploy --network getting-started --tags l1
|
||||||
|
|
||||||
|
echo "Deployed the L1 smart contracts"
|
||||||
|
|
||||||
|
# Read Proxy contract's JSON and get the address
|
||||||
|
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
|
||||||
|
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
|
||||||
|
|
||||||
|
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
|
||||||
|
# First account
|
||||||
|
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
# Second account
|
||||||
|
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
|
||||||
|
|
||||||
|
echo "Balance sent to Proxy L2 contract"
|
||||||
|
echo "Use following accounts for transactions in L2:"
|
||||||
|
echo "${CERC_L1_ADDRESS}"
|
||||||
|
echo "${CERC_L1_ADDRESS_2}"
|
||||||
|
echo "Done"
|
@ -0,0 +1,36 @@
|
|||||||
|
const fs = require('fs')
|
||||||
|
|
||||||
|
// Get the command-line argument
|
||||||
|
const configFile = process.argv[2]
|
||||||
|
const adminAddress = process.argv[3]
|
||||||
|
const proposerAddress = process.argv[4]
|
||||||
|
const batcherAddress = process.argv[5]
|
||||||
|
const sequencerAddress = process.argv[6]
|
||||||
|
const blockHash = process.argv[7]
|
||||||
|
|
||||||
|
// Read the JSON file
|
||||||
|
const configData = fs.readFileSync(configFile)
|
||||||
|
const configObj = JSON.parse(configData)
|
||||||
|
|
||||||
|
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
|
||||||
|
configObj.finalSystemOwner =
|
||||||
|
configObj.portalGuardian =
|
||||||
|
configObj.controller =
|
||||||
|
configObj.l2OutputOracleChallenger =
|
||||||
|
configObj.proxyAdminOwner =
|
||||||
|
configObj.baseFeeVaultRecipient =
|
||||||
|
configObj.l1FeeVaultRecipient =
|
||||||
|
configObj.sequencerFeeVaultRecipient =
|
||||||
|
configObj.governanceTokenOwner =
|
||||||
|
adminAddress
|
||||||
|
|
||||||
|
configObj.l2OutputOracleProposer = proposerAddress
|
||||||
|
|
||||||
|
configObj.batchSenderAddress = batcherAddress
|
||||||
|
|
||||||
|
configObj.p2pSequencerAddress = sequencerAddress
|
||||||
|
|
||||||
|
configObj.l1StartingBlockTag = blockHash
|
||||||
|
|
||||||
|
// Write the updated JSON object back to the file
|
||||||
|
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))
|
@ -1,155 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# To facilitate deploying the Optimism contracts, a few additional arguments have been added to the geth start command
|
|
||||||
# Otherwise this script is unchanged from the image's default startup script
|
|
||||||
|
|
||||||
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
|
|
||||||
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
|
|
||||||
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
|
|
||||||
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
|
|
||||||
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
|
|
||||||
|
|
||||||
cd /opt/testnet/build/el
|
|
||||||
python3 -m http.server 9898 &
|
|
||||||
cd $HOME
|
|
||||||
|
|
||||||
START_CMD="geth"
|
|
||||||
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
|
|
||||||
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
|
|
||||||
cleanup() {
|
|
||||||
echo "Signal received, cleaning up..."
|
|
||||||
|
|
||||||
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
|
|
||||||
pkill -P ${geth_pid}
|
|
||||||
sleep 2
|
|
||||||
kill $(jobs -p)
|
|
||||||
|
|
||||||
wait
|
|
||||||
echo "Done"
|
|
||||||
}
|
|
||||||
trap 'cleanup' SIGINT SIGTERM
|
|
||||||
|
|
||||||
if [ "true" == "$RUN_BOOTNODE" ]; then
|
|
||||||
$START_CMD \
|
|
||||||
--datadir="${CERC_ETH_DATADIR}" \
|
|
||||||
--nodekeyhex="${BOOTNODE_KEY}" \
|
|
||||||
--nodiscover \
|
|
||||||
--ipcdisable \
|
|
||||||
--networkid=${NETWORK_ID} \
|
|
||||||
--netrestrict="${NETRESTRICT}" \
|
|
||||||
&
|
|
||||||
|
|
||||||
geth_pid=$!
|
|
||||||
else
|
|
||||||
cd /opt/testnet/accounts
|
|
||||||
./import_keys.sh
|
|
||||||
|
|
||||||
echo -n "$JWT" > /opt/testnet/build/el/jwtsecret
|
|
||||||
|
|
||||||
if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then
|
|
||||||
dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short)
|
|
||||||
dig_status_code=$?
|
|
||||||
if [[ $dig_status_code = 0 && -n $dig_result ]]; then
|
|
||||||
echo "Statediff DB at $CERC_STATEDIFF_DB_HOST"
|
|
||||||
CERC_RUN_STATEDIFF="true"
|
|
||||||
else
|
|
||||||
echo "No statediff DB available."
|
|
||||||
CERC_RUN_STATEDIFF="false"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
STATEDIFF_OPTS=""
|
|
||||||
if [ "$CERC_RUN_STATEDIFF" == "true" ]; then
|
|
||||||
ready=0
|
|
||||||
echo "Waiting for statediff DB..."
|
|
||||||
while [ $ready -eq 0 ]; do
|
|
||||||
sleep 1
|
|
||||||
export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
|
|
||||||
result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
|
|
||||||
-p "$CERC_STATEDIFF_DB_PORT" \
|
|
||||||
-U "$CERC_STATEDIFF_DB_USER" \
|
|
||||||
-d "$CERC_STATEDIFF_DB_NAME" \
|
|
||||||
-t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
|
|
||||||
if [ -n "$result" ]; then
|
|
||||||
echo "DB ready..."
|
|
||||||
if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
|
|
||||||
ready=1
|
|
||||||
else
|
|
||||||
echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
STATEDIFF_OPTS="--statediff \
|
|
||||||
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \
|
|
||||||
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \
|
|
||||||
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
|
|
||||||
--statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \
|
|
||||||
--statediff.db.port=$CERC_STATEDIFF_DB_PORT \
|
|
||||||
--statediff.db.user=$CERC_STATEDIFF_DB_USER \
|
|
||||||
--statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
|
|
||||||
--statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
|
|
||||||
--statediff.waitforsync=true \
|
|
||||||
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
|
|
||||||
--statediff.writing=true"
|
|
||||||
|
|
||||||
if [ -d "${CERC_PLUGINS_DIR}" ]; then
|
|
||||||
# With plugeth, we separate the statediff options by prefixing with ' -- '
|
|
||||||
STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# unlock account[0]
|
|
||||||
echo $ACCOUNT_PASSWORD > "$CERC_ETH_DATADIR/password"
|
|
||||||
|
|
||||||
$START_CMD \
|
|
||||||
--datadir="${CERC_ETH_DATADIR}" \
|
|
||||||
--bootnodes="${ENODE}" \
|
|
||||||
--allow-insecure-unlock \
|
|
||||||
--password="${CERC_ETH_DATADIR}/password" \
|
|
||||||
--unlock="$ETHERBASE" \
|
|
||||||
--rpc.allow-unprotected-txs \
|
|
||||||
--http \
|
|
||||||
--http.addr="0.0.0.0" \
|
|
||||||
--http.vhosts="*" \
|
|
||||||
--http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
|
|
||||||
--http.corsdomain="*" \
|
|
||||||
--authrpc.addr="0.0.0.0" \
|
|
||||||
--authrpc.vhosts="*" \
|
|
||||||
--authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \
|
|
||||||
--ws \
|
|
||||||
--ws.addr="0.0.0.0" \
|
|
||||||
--ws.origins="*" \
|
|
||||||
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
|
|
||||||
--http.corsdomain="*" \
|
|
||||||
--networkid="${NETWORK_ID}" \
|
|
||||||
--netrestrict="${NETRESTRICT}" \
|
|
||||||
--gcmode archive \
|
|
||||||
--txlookuplimit=0 \
|
|
||||||
--cache.preimages \
|
|
||||||
--syncmode=full \
|
|
||||||
--mine \
|
|
||||||
--miner.threads=1 \
|
|
||||||
--metrics \
|
|
||||||
--metrics.addr="0.0.0.0" \
|
|
||||||
--verbosity=${CERC_GETH_VERBOSITY:-3} \
|
|
||||||
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
|
|
||||||
--miner.etherbase="${ETHERBASE}" \
|
|
||||||
${STATEDIFF_OPTS} \
|
|
||||||
&
|
|
||||||
|
|
||||||
geth_pid=$!
|
|
||||||
fi
|
|
||||||
|
|
||||||
wait $geth_pid
|
|
||||||
|
|
||||||
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
|
|
||||||
while [ 1 -eq 1 ]; do
|
|
||||||
sleep 60
|
|
||||||
done
|
|
||||||
fi
|
|
@ -6,14 +6,22 @@ fi
|
|||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
# Start op-batcher
|
# Get Batcher key from keys.json
|
||||||
L2_RPC="http://op-geth:8545"
|
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
ROLLUP_RPC="http://op-node:8547"
|
|
||||||
BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey)
|
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${batcher_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-batcher
|
||||||
op-batcher \
|
op-batcher \
|
||||||
--l2-eth-rpc=$L2_RPC \
|
--l2-eth-rpc=http://op-geth:8545 \
|
||||||
--rollup-rpc=$ROLLUP_RPC \
|
--rollup-rpc=http://op-node:8547 \
|
||||||
--poll-interval=1s \
|
--poll-interval=1s \
|
||||||
--sub-safety-margin=6 \
|
--sub-safety-margin=6 \
|
||||||
--num-confirmations=1 \
|
--num-confirmations=1 \
|
||||||
@ -24,4 +32,8 @@ op-batcher \
|
|||||||
--rpc.enable-admin \
|
--rpc.enable-admin \
|
||||||
--max-channel-duration=1 \
|
--max-channel-duration=1 \
|
||||||
--l1-eth-rpc=$CERC_L1_RPC \
|
--l1-eth-rpc=$CERC_L1_RPC \
|
||||||
--private-key="${BATCHER_KEY#0x}"
|
--private-key=$BATCHER_KEY \
|
||||||
|
&
|
||||||
|
|
||||||
|
batcher_pid=$!
|
||||||
|
wait $batcher_pid
|
||||||
|
@ -4,36 +4,61 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
l2_genesis_file="/l2-config/genesis.json"
|
# TODO: Add in container build or use other tool
|
||||||
|
echo "Installing jq"
|
||||||
|
apk update && apk add jq
|
||||||
|
|
||||||
# Check for genesis file; if necessary, wait on op-node to generate
|
# Get Sequencer key from keys.json
|
||||||
timeout=300 # 5 minutes
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
start_time=$(date +%s)
|
|
||||||
elapsed_time=0
|
|
||||||
echo "Checking for L2 genesis file at location $l2_genesis_file"
|
|
||||||
while [ ! -f "$l2_genesis_file" ] && [ $elapsed_time -lt $timeout ]; do
|
|
||||||
echo "Waiting for L2 genesis file to be generated..."
|
|
||||||
sleep 10
|
|
||||||
current_time=$(date +%s)
|
|
||||||
elapsed_time=$((current_time - start_time))
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ! -f "$l2_genesis_file" ]; then
|
# Initialize op-geth if datadir/geth not found
|
||||||
echo "L2 genesis file not found after timeout of $timeout seconds. Exiting..."
|
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
|
||||||
|
echo "Found existing datadir, checking block signer key"
|
||||||
|
|
||||||
|
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
|
||||||
|
|
||||||
|
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
|
||||||
|
echo "Sequencer and block signer keys match, skipping initialization"
|
||||||
|
else
|
||||||
|
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
|
||||||
exit 1
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Initializing op-geth"
|
||||||
|
|
||||||
|
mkdir -p datadir
|
||||||
|
echo "pwd" > datadir/password
|
||||||
|
echo $SEQUENCER_KEY > datadir/block-signer-key
|
||||||
|
|
||||||
|
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
|
||||||
|
|
||||||
|
while [ ! -f "/op-node/jwt.txt" ]
|
||||||
|
do
|
||||||
|
echo "Config files not created. Checking after 5 seconds."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Config files created by op-node, proceeding with the initialization..."
|
||||||
|
|
||||||
|
geth init --datadir=datadir /op-node/genesis.json
|
||||||
|
echo "Node Initialized"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Initialize geth from our generated L2 genesis file (if not already initialized)
|
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
|
||||||
data_dir="/datadir"
|
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
|
||||||
if [ ! -d "$datadir/geth" ]; then
|
|
||||||
geth init --datadir=$data_dir $l2_genesis_file
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start op-geth
|
cleanup() {
|
||||||
jwt_file="/l2-config/l2-jwt.txt"
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${geth_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-geth
|
||||||
geth \
|
geth \
|
||||||
--datadir=$data_dir \
|
--datadir ./datadir \
|
||||||
--http \
|
--http \
|
||||||
--http.corsdomain="*" \
|
--http.corsdomain="*" \
|
||||||
--http.vhosts="*" \
|
--http.vhosts="*" \
|
||||||
@ -52,5 +77,14 @@ geth \
|
|||||||
--authrpc.vhosts="*" \
|
--authrpc.vhosts="*" \
|
||||||
--authrpc.addr=0.0.0.0 \
|
--authrpc.addr=0.0.0.0 \
|
||||||
--authrpc.port=8551 \
|
--authrpc.port=8551 \
|
||||||
--authrpc.jwtsecret=$jwt_file \
|
--authrpc.jwtsecret=/op-node/jwt.txt \
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true \
|
||||||
|
--password=./datadir/password \
|
||||||
|
--allow-insecure-unlock \
|
||||||
|
--mine \
|
||||||
|
--miner.etherbase=$SEQUENCER_ADDRESS \
|
||||||
|
--unlock=$SEQUENCER_ADDRESS \
|
||||||
|
&
|
||||||
|
|
||||||
|
geth_pid=$!
|
||||||
|
wait $geth_pid
|
||||||
|
@ -4,42 +4,23 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
|
|
||||||
deploy_config_file="/l2-config/$DEPLOYMENT_CONTEXT.json"
|
# Get Sequencer key from keys.json
|
||||||
deployment_dir="/l1-deployment/$DEPLOYMENT_CONTEXT"
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
genesis_outfile="/l2-config/genesis.json"
|
|
||||||
rollup_outfile="/l2-config/rollup.json"
|
|
||||||
|
|
||||||
# Generate L2 genesis (if not already done)
|
|
||||||
if [ ! -f "$genesis_outfile" ] || [ ! -f "$rollup_outfile" ]; then
|
|
||||||
op-node genesis l2 \
|
|
||||||
--deploy-config $deploy_config_file \
|
|
||||||
--deployment-dir $deployment_dir \
|
|
||||||
--outfile.l2 $genesis_outfile \
|
|
||||||
--outfile.rollup $rollup_outfile \
|
|
||||||
--l1-rpc $CERC_L1_RPC
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start op-node
|
|
||||||
SEQ_KEY=$(cat /l2-accounts/accounts.json | jq -r .SeqKey)
|
|
||||||
jwt_file=/l2-config/l2-jwt.txt
|
|
||||||
L2_AUTH="http://op-geth:8551"
|
|
||||||
RPC_KIND=any # this can optionally be set to a preset for common node providers like Infura, Alchemy, etc.
|
|
||||||
|
|
||||||
|
# Run op-node
|
||||||
op-node \
|
op-node \
|
||||||
--l2=$L2_AUTH \
|
--l2=http://op-geth:8551 \
|
||||||
--l2.jwt-secret=$jwt_file \
|
--l2.jwt-secret=/op-node-data/jwt.txt \
|
||||||
--sequencer.enabled \
|
--sequencer.enabled \
|
||||||
--sequencer.l1-confs=5 \
|
--sequencer.l1-confs=3 \
|
||||||
--verifier.l1-confs=4 \
|
--verifier.l1-confs=3 \
|
||||||
--rollup.config=$rollup_outfile \
|
--rollup.config=/op-node-data/rollup.json \
|
||||||
--rpc.addr=0.0.0.0 \
|
--rpc.addr=0.0.0.0 \
|
||||||
--rpc.port=8547 \
|
--rpc.port=8547 \
|
||||||
--p2p.disable \
|
--p2p.disable \
|
||||||
--rpc.enable-admin \
|
--rpc.enable-admin \
|
||||||
--p2p.sequencer.key="${SEQ_KEY#0x}" \
|
--p2p.sequencer.key=$SEQUENCER_KEY \
|
||||||
--l1=$CERC_L1_RPC \
|
--l1=$CERC_L1_RPC \
|
||||||
--l1.rpckind=$RPC_KIND
|
--l1.rpckind=any
|
||||||
|
@ -5,18 +5,32 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
|
|
||||||
# Start op-proposer
|
# Read the L2OutputOracle contract address from the deployment
|
||||||
ROLLUP_RPC="http://op-node:8547"
|
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
|
||||||
PROPOSER_KEY=$(cat /l2-accounts/accounts.json | jq -r .ProposerKey)
|
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
|
||||||
L2OO_ADDR=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/L2OutputOracleProxy.json | jq -r .address)
|
|
||||||
|
|
||||||
|
# Get Proposer key from keys.json
|
||||||
|
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${proposer_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-proposer
|
||||||
op-proposer \
|
op-proposer \
|
||||||
--poll-interval=12s \
|
--poll-interval 12s \
|
||||||
--rpc.port=8560 \
|
--rpc.port 8560 \
|
||||||
--rollup-rpc=$ROLLUP_RPC \
|
--rollup-rpc http://op-node:8547 \
|
||||||
--l2oo-address="${L2OO_ADDR#0x}" \
|
--l2oo-address $L2OO_ADDR \
|
||||||
--private-key="${PROPOSER_KEY#0x}" \
|
--private-key $PROPOSER_KEY \
|
||||||
--l1-eth-rpc=$CERC_L1_RPC
|
--l1-eth-rpc $CERC_L1_RPC \
|
||||||
|
&
|
||||||
|
|
||||||
|
proposer_pid=$!
|
||||||
|
wait $proposer_pid
|
||||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -1,32 +0,0 @@
|
|||||||
POSTGRES_DB=keycloak
|
|
||||||
POSTGRES_USER=keycloak
|
|
||||||
POSTGRES_PASSWORD=keycloak
|
|
||||||
# Don't change this unless you also change the healthcheck in docker-compose-mainnet-eth-keycloak.yml
|
|
||||||
PGPORT=35432
|
|
||||||
KC_DB=postgres
|
|
||||||
KC_DB_URL_HOST=keycloak-db
|
|
||||||
KC_DB_URL_PORT=${PGPORT}
|
|
||||||
KC_DB_URL_DATABASE=${POSTGRES_DB}
|
|
||||||
KC_DB_USERNAME=${POSTGRES_USER}
|
|
||||||
KC_DB_PASSWORD=${POSTGRES_PASSWORD}
|
|
||||||
KC_DB_SCHEMA=public
|
|
||||||
KC_HOSTNAME=localhost
|
|
||||||
KC_HTTP_ENABLED="true"
|
|
||||||
KC_HTTP_RELATIVE_PATH="/auth"
|
|
||||||
KC_HOSTNAME_STRICT_HTTPS="false"
|
|
||||||
KEYCLOAK_ADMIN=admin
|
|
||||||
KEYCLOAK_ADMIN_PASSWORD=admin
|
|
||||||
X_API_CHECK_REALM=cerc
|
|
||||||
X_API_CHECK_CLIENT_ID="%user_id%"
|
|
||||||
|
|
||||||
|
|
||||||
# keycloak-reg-api
|
|
||||||
CERC_KCUSERREG_LISTEN_PORT=9292
|
|
||||||
CERC_KCUSERREG_LISTEN_ADDR='0.0.0.0'
|
|
||||||
CERC_KCUSERREG_API_URL='http://keycloak:8080/auth'
|
|
||||||
CERC_KCUSERREG_REG_USER="${KEYCLOAK_ADMIN}"
|
|
||||||
CERC_KCUSERREG_REG_PW="${KEYCLOAK_ADMIN_PASSWORD}"
|
|
||||||
CERC_KCUSERREG_REG_CLIENT_ID='admin-cli'
|
|
||||||
CERC_KCUSERREG_TARGET_REALM=cerc
|
|
||||||
CERC_KCUSERREG_TARGET_GROUPS=eth
|
|
||||||
CERC_KCUSERREG_CREATE_ENABLED=true
|
|
@ -1,33 +0,0 @@
|
|||||||
# Enable startup script debug output.
|
|
||||||
CERC_SCRIPT_DEBUG=false
|
|
||||||
|
|
||||||
# Specify any other lighthouse CLI options.
|
|
||||||
LIGHTHOUSE_OPTS=""
|
|
||||||
|
|
||||||
# Override the advertised public IP (optional)
|
|
||||||
# --enr-address
|
|
||||||
#LIGHTHOUSE_ENR_ADDRESS=""
|
|
||||||
|
|
||||||
# --checkpoint-sync-url
|
|
||||||
LIGHTHOUSE_CHECKPOINT_SYNC_URL="https://beaconstate.ethstaker.cc"
|
|
||||||
|
|
||||||
# --checkpoint-sync-url-timeout
|
|
||||||
LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT=300
|
|
||||||
|
|
||||||
# --datadir
|
|
||||||
LIGHTHOUSE_DATADIR=/data
|
|
||||||
|
|
||||||
# --debug-level
|
|
||||||
LIGHTHOUSE_DEBUG_LEVEL=info
|
|
||||||
|
|
||||||
# --http-port
|
|
||||||
LIGHTHOUSE_HTTP_PORT=5052
|
|
||||||
|
|
||||||
# --execution-jwt
|
|
||||||
LIGHTHOUSE_JWTSECRET=/etc/mainnet-eth/jwtsecret
|
|
||||||
|
|
||||||
# --metrics-port
|
|
||||||
LIGHTHOUSE_METRICS_PORT=5054
|
|
||||||
|
|
||||||
# --port --enr-udp-port --enr-tcp-port
|
|
||||||
LIGHTHOUSE_NETWORK_PORT=9000
|
|
@ -1,2 +0,0 @@
|
|||||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.blast.io
|
|
||||||
OP_NODE_P2P_BOOTNODES=enr:-J64QGwHl9uYLfC_cnmxSA6wQH811nkOWJDWjzxqkEUlJoZHWvI66u-BXgVcPCeMUmg0dBpFQAPotFchG67FHJMZ9OSGAY3d6wevgmlkgnY0gmlwhANizeSHb3BzdGFja4Sx_AQAiXNlY3AyNTZrMaECg4pk0cskPAyJ7pOmo9E6RqGBwV-Lex4VS9a3MQvu7PWDdGNwgnZhg3VkcIJ2YQ,enr:-J64QDge2jYBQtcNEpRqmKfci5E5BHAhNBjgv4WSdwH1_wPqbueq2bDj38-TSW8asjy5lJj1Xftui6Or8lnaYFCqCI-GAY3d6wf3gmlkgnY0gmlwhCO2D9yHb3BzdGFja4Sx_AQAiXNlY3AyNTZrMaEDo4aCTq7pCEN8om9U5n_VyWdambGnQhwHNwKc8o-OicaDdGNwgnZhg3VkcIJ2YQ
|
|
@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis": {
|
|
||||||
"l1": {
|
|
||||||
"hash": "0xfcfb8d586bdae763f1189988789211c69eb893a895e7ba48be3ca6289f0941b7",
|
|
||||||
"number": 19300102
|
|
||||||
},
|
|
||||||
"l2": {
|
|
||||||
"hash": "0xb689b35ef29d0bec5816938e0e52683c7257d2e325420ea69b739a2be4754b89",
|
|
||||||
"number": 0
|
|
||||||
},
|
|
||||||
"l2_time": 1708809815,
|
|
||||||
"system_config": {
|
|
||||||
"batcherAddr": "0x415c8893d514f9bc5211d36eeda4183226b84aa7",
|
|
||||||
"overhead": "0x00000000000000000000000000000000000000000000000000000000000000bc",
|
|
||||||
"scalar": "0x00000000000000000000000000000000000000000000000000000000000a6fe0",
|
|
||||||
"gasLimit": 30000000
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"block_time": 2,
|
|
||||||
"max_sequencer_drift": 600,
|
|
||||||
"seq_window_size": 3600,
|
|
||||||
"channel_timeout": 300,
|
|
||||||
"l1_chain_id": 1,
|
|
||||||
"l2_chain_id": 81457,
|
|
||||||
"regolith_time": 0,
|
|
||||||
"canyon_time": 0,
|
|
||||||
"batch_inbox_address": "0xff00000000000000000000000000000000081457",
|
|
||||||
"deposit_contract_address": "0x0ec68c5b10f21effb74f2a5c61dfe6b08c0db6cb",
|
|
||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
|
||||||
}
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
ENR_OPTS=""
|
|
||||||
if [[ -n "$LIGHTHOUSE_ENR_ADDRESS" ]]; then
|
|
||||||
ENR_OPTS="--enr-address $LIGHTHOUSE_ENR_ADDRESS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec lighthouse bn \
|
|
||||||
--checkpoint-sync-url "$LIGHTHOUSE_CHECKPOINT_SYNC_URL" \
|
|
||||||
--checkpoint-sync-url-timeout ${LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT} \
|
|
||||||
--datadir "$LIGHTHOUSE_DATADIR" \
|
|
||||||
--debug-level $LIGHTHOUSE_DEBUG_LEVEL \
|
|
||||||
--disable-deposit-contract-sync \
|
|
||||||
--disable-upnp \
|
|
||||||
--enr-tcp-port $LIGHTHOUSE_NETWORK_PORT \
|
|
||||||
--enr-udp-port $LIGHTHOUSE_NETWORK_PORT \
|
|
||||||
--execution-endpoint "$LIGHTHOUSE_EXECUTION_ENDPOINT" \
|
|
||||||
--execution-jwt /etc/mainnet-eth/jwtsecret \
|
|
||||||
--http \
|
|
||||||
--http-address 0.0.0.0 \
|
|
||||||
--http-port $LIGHTHOUSE_HTTP_PORT \
|
|
||||||
--metrics \
|
|
||||||
--metrics-address=0.0.0.0 \
|
|
||||||
--metrics-port $LIGHTHOUSE_METRICS_PORT \
|
|
||||||
--network mainnet \
|
|
||||||
--port $LIGHTHOUSE_NETWORK_PORT \
|
|
||||||
$ENR_OPTS $LIGHTHOUSE_OPTS
|
|
@ -1,9 +1,9 @@
|
|||||||
services:
|
services:
|
||||||
registry:
|
cns:
|
||||||
rpcEndpoint: 'http://laconicd:26657'
|
restEndpoint: 'http://laconicd:1317'
|
||||||
gqlEndpoint: 'http://laconicd:9473/api'
|
gqlEndpoint: 'http://laconicd:9473/api'
|
||||||
userKey: REPLACE_WITH_MYKEY
|
userKey: REPLACE_WITH_MYKEY
|
||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 2000000alnt
|
fees: 200000aphoton
|
||||||
|
@ -1,15 +1,18 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#TODO: pass these in from the caller
|
#TODO: pass these in from the caller
|
||||||
|
TRACE="--trace"
|
||||||
LOGLEVEL="info"
|
LOGLEVEL="info"
|
||||||
|
|
||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
|
--evm.tracer=json $TRACE \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1alnt \
|
--minimum-gas-prices=0.0001aphoton \
|
||||||
|
--json-rpc.api eth,txpool,personal,net,debug,web3,miner \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
modules:
|
|
||||||
http_2xx:
|
|
||||||
prober: http
|
|
||||||
timeout: 5s
|
|
||||||
http:
|
|
||||||
valid_status_codes: [] #default to 2xx
|
|
||||||
method: GET
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user