Compare commits

..

8 Commits

Author SHA1 Message Date
d4c4cc1ed0 Add comments.
Former-commit-id: 46f00e1d485661b6328b8ca6911a81455a346f6a
2023-01-30 11:24:56 -06:00
2e5eee0223 Add comments.
Former-commit-id: a3a60b76959056ba9cc40f0de3a8bd9a2ee2e17e
2023-01-30 11:23:45 -06:00
c0455e3e0b Simplify build.
Former-commit-id: 3a80606d6c20751fa12c911fc965380b49086710
2023-01-30 11:20:23 -06:00
cecc7a1274 EOL
Former-commit-id: 5f09e42422e5c458919925de751be93e028d05ba
2023-01-30 11:09:05 -06:00
97592b0fca Remove unneeded env
Former-commit-id: a3880de4cde7b5fdd43cd953a7c4b198cc445573
2023-01-30 11:08:27 -06:00
da3636bc17 Fix typo
Former-commit-id: 34413ea2607ead60df6905992b29808828d60448
2023-01-28 00:54:48 -06:00
22077dedc8 Remove generated files.
Former-commit-id: d0c506af881fee53f174f6d8380c1e2b36cdc3d6
2023-01-28 00:51:11 -06:00
ce43e3061c Add a pod for generating geth data as quickly as possible.
Former-commit-id: e8bdb205c7faa89c583b0652fac1801d0177a3c1
2023-01-28 00:49:46 -06:00
754 changed files with 2678 additions and 102976 deletions

View File

@ -1,66 +0,0 @@
name: Fixturenet-Laconicd-Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
schedule:
- cron: '1 13 * * *'
jobs:
test:
name: "Run Laconicd fixturenet and Laconic CLI tests"
runs-on: ubuntu-latest
steps:
- name: 'Update'
run: apt-get update
- name: 'Setup jq'
run: apt-get install jq -y
- name: 'Check jq'
run: |
which jq
jq --version
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh
- name: "Run laconic CLI tests"
run: ./tests/fixturenet-laconicd/run-cli-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,37 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,72 +0,0 @@
name: Publish
on:
push:
branches:
- main
- publish-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
publish:
name: "Build and publish"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Get build info"
id: build-info
run: |
build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Build local shiv package"
id: build
run: |
./scripts/build_shiv_package.sh
result_code=$?
echo "package-file=$(ls ./package/*)" >> $GITHUB_OUTPUT
exit $result_code
- name: "Stage artifact file"
run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release"
uses: https://gitea.com/cerc-io/action-gh-release@gitea-v2
with:
tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,69 +0,0 @@
name: Container Registry Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-container-registry'
- '.gitea/workflows/test-container-registry.yml'
- 'tests/container-registry/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '6 19 * * *'
jobs:
test:
name: "Run contaier registry hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Install ed" # Only needed until we remove the need to edit the spec file
run: apt update && apt install -y ed
- name: "Run container registry deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/container-registry/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,67 +0,0 @@
name: Database Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-database'
- '.gitea/workflows/test-database.yml'
- 'tests/database/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '5 18 * * *'
jobs:
test:
name: "Run database hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run database deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/database/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,58 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,58 +0,0 @@
name: External Stack Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-external-stack'
- '.gitea/workflows/test-external-stack.yml'
- 'tests/external-stack/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '8 19 * * *'
jobs:
test:
name: "Run external stack test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run external stack tests"
run: ./tests/external-stack/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,69 +0,0 @@
name: K8s Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deploy'
- '.gitea/workflows/test-k8s-deploy.yml'
- 'tests/k8s-deploy/run-deploy-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 15 * * *'
jobs:
test:
name: "Run deploy test suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deploy/run-deploy-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,69 +0,0 @@
name: K8s Deployment Control Test
on:
pull_request:
branches: '*'
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deployment-control'
- '.gitea/workflows/test-k8s-deployment-control.yml'
- 'tests/k8s-deployment-control/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 30 * * *'
jobs:
test:
name: "Run deployment control suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment control test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deployment-control/run-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,59 +0,0 @@
name: Webapp Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
run: apt update && apt install -y wget
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,58 +0,0 @@
name: Smoke Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
jobs:
test:
name: "Run basic test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh
- name: Notify Vulcanize Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
- name: Notify DeepStack Slack on CI failure
if: ${{ always() && github.ref_name == 'main' }}
uses: ravsamhq/notify-slack-action@v2
with:
status: ${{ job.status }}
notify_when: 'failure'
env:
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}

View File

@ -1,10 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger
Trigger

View File

@ -1 +0,0 @@
Change this file to trigger running the test-container-registry CI job

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-database CI job
Trigger test run

View File

@ -1,2 +0,0 @@
Change this file to trigger running the external-stack CI job
trigger

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-k8s-deploy CI job
Trigger test on PR branch

View File

@ -1,30 +0,0 @@
name: Fixturenet-Eth Test
on:
push:
branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-eth-test'
jobs:
test:
name: "Run fixturenet-eth test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth/run-test.sh

View File

@ -1,30 +0,0 @@
name: Fixturenet-Laconicd Test
on:
push:
branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-laconicd-test'
jobs:
test:
name: "Run fixturenet-laconicd test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh

View File

@ -1,21 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2

View File

@ -1,46 +0,0 @@
name: Publish
on:
push:
branches:
- main
- publish-test
jobs:
publish:
name: "Build and publish"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Get build info"
id: build-info
run: |
build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Build local shiv package"
id: build
run: |
./scripts/build_shiv_package.sh
result_code=$?
echo "package-file=$(ls ./package/*)" >> $GITHUB_OUTPUT
exit $result_code
- name: "Stage artifact file"
run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release"
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so

View File

@ -1,29 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,29 +0,0 @@
name: Webapp Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh

View File

@ -1,29 +0,0 @@
name: Smoke Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run basic test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh

View File

@ -1,2 +0,0 @@
Change this file to trigger running the fixturenet-eth-test CI job

View File

@ -1,3 +0,0 @@
Change this file to trigger running the fixturenet-laconicd-test CI job
trigger

4
.gitignore vendored
View File

@ -5,6 +5,4 @@ laconic-so
laconic_stack_orchestrator.egg-info laconic_stack_orchestrator.egg-info
__pycache__ __pycache__
*~ *~
package
stack_orchestrator/data/build_tag.txt
/build

View File

@ -6,19 +6,13 @@ Stack Orchestrator allows building and deployment of a Laconic Stack on a single
## Install ## Install
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-linux.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
Ensure that the following are already installed: Ensure that the following are already installed:
- [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go) - [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.10.8`
- [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21` - [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21`
- [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5` - [Docker Compose](https://docs.docker.com/compose/install/): `docker-compose --version` >= `2.13.0`
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git): `git --version` >= `2.10.3`
Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. : Note: if installing docker-compose via package manager (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g., on Linux:
```bash ```bash
mkdir -p ~/.docker/cli-plugins mkdir -p ~/.docker/cli-plugins
@ -26,50 +20,80 @@ curl -SL https://github.com/docker/compose/releases/download/v2.11.2/docker-comp
chmod +x ~/.docker/cli-plugins/docker-compose chmod +x ~/.docker/cli-plugins/docker-compose
``` ```
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be Next, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags), into a suitable directory (e.g. `~/bin`):
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
```bash ```bash
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
``` ```
Give it execute permissions: Give it permissions:
```bash ```bash
chmod +x ~/bin/laconic-so chmod +x ~/bin/laconic-so
``` ```
Ensure `laconic-so` is on the [`PATH`](https://unix.stackexchange.com/a/26059) Ensure `laconic-so` is on the [`PATH`](https://unix.stackexchange.com/a/26059)
Verify operation (your version will probably be different, just check here that you see some version output and not an error): Verify operation:
``` ```
laconic-so version laconic-so --help
Version: 1.1.0-7a607c2-202304260513 Usage: python -m laconic-so [OPTIONS] COMMAND [ARGS]...
```
Save the distribution url to `~/.laconic-so/config.yml`:
```bash
mkdir ~/.laconic-so
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
```
### Update Laconic Stack Orchestrator
If Stack Orchestrator was installed using the process described above, it is able to subsequently self-update to the current latest version by running:
```bash Options:
laconic-so update --quiet
--verbose
--dry-run
--local-stack
-h, --help Show this message and exit.
Commands:
build-containers build the set of containers required for a complete...
build-npms build the set of npm packages required for a...
deploy-system deploy a stack
setup-repositories git clone the set of repositories required to build...
``` ```
## Usage ## Usage
The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: Three sub-commands: `setup-repositories`, `build-containers` and `deploy-system` are generally run in order. The following is a slim example for standing up the `erc20-watcher`. Go further with the [erc20 watcher demo](/app/data/stacks/erc20) and other pieces of the stack, within the [`stacks` directory](/app/data/stacks).
- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) ### Setup Repositories
- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism)
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded) Clone the set of git repositories necessary to build a system:
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo)
```bash
laconic-so --verbose setup-repositories --include cerc-io/go-ethereum,cerc-io/ipld-eth-db,cerc-io/ipld-eth-server,cerc-io/watcher-ts
```
This will default to `~/cerc` or - if set - the environment variable `CERC_REPO_BASE_DIR`
### Build Containers
Build the set of docker container images required to run a system. It takes around 10 minutes to build all the containers from scratch.
```bash
laconic-so --verbose build-containers --include cerc/go-ethereum,cerc/go-ethereum-foundry,cerc/ipld-eth-db,cerc/ipld-eth-server,cerc/watcher-erc20
```
### Deploy System
Uses `docker-compose` to deploy a system (with most recently built container images).
```bash
laconic-so --verbose deploy-system --include ipld-eth-db,go-ethereum-foundry,ipld-eth-server,watcher-erc20 up
```
Check out he GraphQL playground here: [http://localhost:3002/graphql](http://localhost:3002/graphql)
See the [erc20 watcher demo](/app/data/stacks/erc20) to continue further.
### Cleanup
```bash
laconic-so --verbose deploy-system --include ipld-eth-db,go-ethereum-foundry,ipld-eth-server,watcher-erc20 down
```
## Contributing ## Contributing
@ -79,4 +103,3 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested). Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).

128
app/build_containers.py Normal file
View File

@ -0,0 +1,128 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds or pulls containers for the system components
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import os
import sys
from decouple import config
import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@click.command()
@click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers")
@click.pass_context
def command(ctx, include, exclude):
'''build the set of containers required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
local_stack = ctx.obj.local_stack
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
container_build_dir = Path(__file__).absolute().parent.joinpath("data", "container-build")
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines()
containers_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
containers_in_scope = stack_config['containers']
else:
containers_in_scope = all_containers
if verbose:
print(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
# TODO: make this configurable
container_build_env = {
"CERC_NPM_URL": "http://gitea.local:3000/api/packages/cerc-io/npm/",
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default="<token-not-supplied>"),
"CERC_REPO_BASE_DIR": dev_root_path
}
def process_container(container):
if not quiet:
print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container} {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")

View File

@ -1,4 +1,4 @@
# Copyright © 2022, 2023 Vulcanize # Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -20,25 +20,17 @@
import os import os
import sys import sys
from shutil import rmtree, copytree
from decouple import config from decouple import config
import click import click
import importlib.resources import importlib.resources
from python_on_whales import docker, DockerException from python_on_whales import docker, DockerException
from stack_orchestrator.base import get_stack from .util import include_exclude_check, get_parsed_stack_config
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local"
@click.command() @click.command()
@click.option('--include', help="only build these packages") @click.option('--include', help="only build these packages")
@click.option('--exclude', help="don\'t build these packages") @click.option('--exclude', help="don\'t build these packages")
@click.option("--force-rebuild", is_flag=True, default=False,
help="Override existing target package version check -- force rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context @click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args): def command(ctx, include, exclude):
'''build the set of npm packages required for a complete stack''' '''build the set of npm packages required for a complete stack'''
quiet = ctx.obj.quiet quiet = ctx.obj.quiet
@ -49,41 +41,20 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
stack = ctx.obj.stack stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error continue_on_error = ctx.obj.continue_on_error
_ensure_prerequisites()
# build-npms depends on having access to a writable package registry
# so we check here that it is available
package_registry_stack = get_stack(ctx.obj, "package-registry")
registry_available = package_registry_stack.ensure_available()
if not registry_available:
print("FATAL: no npm registry available for build-npms command")
sys.exit(1)
npm_registry_url = package_registry_stack.get_url()
npm_registry_url_token = config("CERC_NPM_AUTH_TOKEN", default=None)
if not npm_registry_url_token:
print("FATAL: CERC_NPM_AUTH_TOKEN is not defined")
sys.exit(1)
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
build_root_path = os.path.join(dev_root_path, "build-trees") if not quiet:
if verbose:
print(f'Dev Root is: {dev_root_path}') print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating') print('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path)
if not os.path.isdir(dev_root_path):
print('Build root directory doesn\'t exist, creating')
os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from . import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
all_packages = package_list_file.read().splitlines() all_packages = package_list_file.read().splitlines()
@ -103,42 +74,21 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
print(f"Building npm package: {package}") print(f"Building npm package: {package}")
repo_dir = package repo_dir = package
repo_full_path = os.path.join(dev_root_path, repo_dir) repo_full_path = os.path.join(dev_root_path, repo_dir)
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo # TODO: make the npm registry url configurable.
repo_copy_path = os.path.join(build_root_path, repo_dir) build_command = ["sh", "-c", "cd /workspace && build-npm-package-local-dependencies.sh http://gitea.local:3000/api/packages/cerc-io/npm/"]
# First delete any old build tree
if os.path.isdir(repo_copy_path):
if verbose:
print(f"Deleting old build tree: {repo_copy_path}")
if not dry_run:
rmtree(repo_copy_path)
# Now copy the repo into the build tree location
if verbose:
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
if not dry_run:
copytree(repo_full_path, repo_copy_path)
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
if not dry_run: if not dry_run:
if verbose: if verbose:
print(f"Executing: {build_command}") print(f"Executing: {build_command}")
# Originally we used the PEP 584 merge operator: envs = {"CERC_NPM_AUTH_TOKEN": os.environ["CERC_NPM_AUTH_TOKEN"]} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
}
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
try: try:
docker.run(builder_js_image_name, docker.run("cerc/builder-js",
remove=True, remove=True,
interactive=True, interactive=True,
tty=True, tty=True,
user=f"{os.getuid()}:{os.getgid()}", user=f"{os.getuid()}:{os.getgid()}",
envs=envs, envs=envs,
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
add_hosts=[("gitea.local", "host-gateway")], add_hosts=[("gitea.local", "host-gateway")],
volumes=[(repo_copy_path, "/workspace")], volumes=[(repo_full_path, "/workspace")],
command=build_command command=build_command
) )
# Note that although the docs say that build_result should contain # Note that although the docs say that build_result should contain
@ -161,13 +111,3 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
else: else:
if verbose: if verbose:
print(f"Excluding: {package}") print(f"Excluding: {package}")
def _ensure_prerequisites():
# Check that the builder-js container is available and
# Tell the user how to build it if not
images = docker.image.list(builder_js_image_name)
if len(images) == 0:
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
print("Please run this command to create it: laconic-so --stack build-support build-containers")
sys.exit(1)

View File

@ -0,0 +1,67 @@
version: '3.7'
services:
datanet-eth-bootnode-geth:
hostname: datanet-eth-bootnode-geth
env_file:
- ../config/datanet-eth/datanet-eth.env
environment:
RUN_BOOTNODE: "true"
image: cerc/datanet-eth-geth:local
ports:
- "9898"
- "30303"
datanet-eth-geth-1:
hostname: datanet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "false"
CERC_RUN_STATEDIFF: "false"
CERC_STATEDIFF_DB_NODE_ID: 1
env_file:
- ../config/datanet-eth/datanet-eth.env
image: cerc/datanet-eth-geth:local
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- datanet-eth-bootnode-geth
ports:
- "8545"
- "40000"
- "6060"
datanet-eth-geth-2:
hostname: datanet-eth-geth-2
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
env_file:
- ../config/datanet-eth/datanet-eth.env
image: cerc/datanet-eth-geth:local
depends_on:
- datanet-eth-bootnode-geth
datanet-tx-spammer:
restart: always
image: cerc/tx-spammer:local
environment:
ACCOUNTS_CSV_URL: http://datanet-eth-bootnode-geth:9898/accounts.csv
ETH_HTTP_PATH: http://datanet-eth-geth-1:8545
LOG_LEVEL: info
SPAMMER_COMMAND: autoSend
ETH_CALL_FREQ: 0
ETH_SEND_FREQ: 0
depends_on:
datanet-eth-bootnode-geth:
condition: service_started
datanet-eth-geth-1:
condition: service_healthy

View File

@ -13,8 +13,6 @@ services:
grafana: grafana:
restart: always restart: always
image: grafana/grafana image: grafana/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=changeme6325
volumes: volumes:
- ../config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards:/etc/grafana/provisioning/dashboards - ../config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ../config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources:/etc/grafana/provisioning/datasources - ../config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources:/etc/grafana/provisioning/datasources

View File

@ -2,34 +2,28 @@ version: '3.7'
services: services:
fixturenet-eth-bootnode-geth: fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth hostname: fixturenet-eth-bootnode-geth
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_bootnode_geth_data:/root/ethdata
ports: ports:
- "9898" - "9898"
- "30303" - "30303"
fixturenet-eth-geth-1: fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1 hostname: fixturenet-eth-geth-1
cap_add: cap_add:
- SYS_PTRACE - SYS_PTRACE
environment: environment:
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect} CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1 CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_geth_1_data:/root/ethdata
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s interval: 30s
@ -40,12 +34,10 @@ services:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
ports: ports:
- "8545" - "8545"
- "8546"
- "40000" - "40000"
- "6060" - "6060"
fixturenet-eth-geth-2: fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2 hostname: fixturenet-eth-geth-2
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
@ -53,28 +45,19 @@ services:
timeout: 10s timeout: 10s
retries: 10 retries: 10
start_period: 3s start_period: 3s
environment:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local image: cerc/fixturenet-eth-geth:local
depends_on: depends_on:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
volumes:
- fixturenet_eth_geth_2_data:/root/ethdata
ports:
- "8545"
- "8546"
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse hostname: fixturenet-eth-bootnode-lighthouse
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1: fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1 hostname: fixturenet-eth-lighthouse-1
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -89,8 +72,6 @@ services:
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545" ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551" EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_1_data:/opt/testnet/build/cl
depends_on: depends_on:
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
condition: service_started condition: service_started
@ -100,7 +81,6 @@ services:
- "8001" - "8001"
fixturenet-eth-lighthouse-2: fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2 hostname: fixturenet-eth-lighthouse-2
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -116,17 +96,8 @@ services:
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551" EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0" LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-eth-lighthouse:local image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_2_data:/opt/testnet/build/cl
depends_on: depends_on:
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
condition: service_started condition: service_started
fixturenet-eth-geth-2: fixturenet-eth-geth-2:
condition: service_healthy condition: service_healthy
volumes:
fixturenet_eth_bootnode_geth_data:
fixturenet_eth_geth_1_data:
fixturenet_eth_geth_2_data:
fixturenet_eth_lighthouse_1_data:
fixturenet_eth_lighthouse_2_data:

View File

@ -0,0 +1,21 @@
version: "3.2"
services:
laconicd:
restart: unless-stopped
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# TODO: look at folding this script into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"

View File

@ -8,7 +8,7 @@ services:
condition: service_healthy condition: service_healthy
image: cerc/go-ethereum-foundry:local image: cerc/go-ethereum-foundry:local
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "8545"] test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s interval: 30s
timeout: 3s timeout: 3s
retries: 10 retries: 10

View File

@ -7,9 +7,11 @@ services:
condition: service_healthy condition: service_healthy
image: cerc/ipld-eth-server:local image: cerc/ipld-eth-server:local
environment: environment:
SERVER_HTTP_PATH: 0.0.0.0:8081 IPLD_SERVER_GRAPHQL: "true"
SERVER_GRAPHQL: "true" IPLD_POSTGRAPHILEPATH: http://graphql:5000
SERVER_GRAPHQLPATH: 0.0.0.0:8082 ETH_SERVER_HTTPPATH: 0.0.0.0:8081
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve" VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json" ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing DATABASE_NAME: cerc_testing
@ -25,8 +27,8 @@ services:
PROM_HTTP: "true" PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0" PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090" PROM_HTTP_PORT: "8090"
LOG_LEVEL: "debug" LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: "true"
volumes: volumes:
- type: bind - type: bind
source: ../config/ipld-eth-server/chain.json source: ../config/ipld-eth-server/chain.json

View File

@ -0,0 +1,7 @@
version: "3.2"
services:
test:
image: cerc/test-container:local
restart: always
ports:
- "80"

View File

@ -34,12 +34,12 @@ services:
- ETH_RPC_URL=http://go-ethereum:8545 - ETH_RPC_URL=http://go-ethereum:8545
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-erc20/erc20-watcher.toml:/app/environments/local.toml - ../config/watcher-erc20/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3002:3001" - "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001" - "0.0.0.0:9002:9001"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3001"] test: ["CMD", "nc", "-v", "localhost", "3002"]
interval: 20s interval: 20s
timeout: 5s timeout: 5s
retries: 15 retries: 15

View File

@ -24,37 +24,15 @@ services:
retries: 15 retries: 15
start_period: 10s start_period: 10s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
mobymask-watcher-server: mobymask-watcher-server:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
mobymask-watcher-db: mobymask-watcher-db:
condition: service_healthy condition: service_healthy
mobymask-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-mobymask:local image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn server"] command: ["sh", "-c", "yarn server"]
volumes: volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/environments/local.toml - ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports: ports:
- "0.0.0.0:3001:3001" - "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001" - "0.0.0.0:9001:9001"
@ -67,5 +45,21 @@ services:
retries: 15 retries: 15
start_period: 5s start_period: 5s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-server:
condition: service_healthy
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
volumes: volumes:
mobymask_watcher_db_data: mobymask_watcher_db_data:

View File

@ -0,0 +1,9 @@
# The password used to access test accounts (eg, via personal_unlockAccount). The password is the same for all accounts.
ACCOUNT_PASSWORD=secret1212
# ENODE of the geth bootnode.
BOOTNODE_KEY="b0ac22adcad37213c7c565810a50f1772291e7b0ce53fb73e7ec2a3c75bc13b5"
ENODE="enode://af22c29c316ad069cf48a09a4ad5cf04a251b411e45098888d114c6dd7f489a13786620d5953738762afa13711d4ffb3b19aa5de772d8af72f851f7e9c5b164a@datanet-eth-bootnode-geth:30303"
# JWT is required by the startup script, but won't be used without lighthouse.
JWT="0x6cdcac3501046a08e186730dd8bd136cfaf0fdc1fc955f6e15ad3068c0ff2af0"

View File

@ -17,12 +17,7 @@ CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing" CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm" CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password" CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18} CERC_STATEDIFF_DB_GOOSE_MIN_VER=23
CERC_STATEDIFF_DB_LOG_STATEMENTS="${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false}" CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5" CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
# Used by Lighthouse
SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-3}

View File

@ -0,0 +1,118 @@
#!/bin/sh
# Originally from: https://github.com/cerc-io/laconicd/blob/main/init.sh
# TODO: fold this back into the laconicd repo
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# to trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for laconic (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_NAMESERVICE_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

File diff suppressed because it is too large Load Diff

View File

@ -9,8 +9,8 @@
# Checkpoint interval in number of blocks. # Checkpoint interval in number of blocks.
checkpointInterval = 2000 checkpointInterval = 2000
# Enable state creation # IPFS API address (can be taken from the output on running the IPFS daemon).
enableState = true # ipfsApiAddr = "/ip4/127.0.0.1/tcp/5001"
# Boolean to filter logs by contract. # Boolean to filter logs by contract.
filterLogs = true filterLogs = true
@ -51,6 +51,3 @@
maxCompletionLagInSecs = 300 maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100 jobDelayInMilliSecs = 100
eventsInBatch = 50 eventsInBatch = 50
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -16,12 +16,6 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NO
RUN mkdir /scripts RUN mkdir /scripts
COPY install-dependencies.sh /scripts COPY install-dependencies.sh /scripts
# Override the definition of GERBIL_PATH in the base image, but
# is safe because (at present) no gerbil packages are installed in the base image
# We do this in order to allow a set of pre-installed packages from the container
# to be used with an arbitrary, potentially different set of projects bind mounted
# at /src
ENV GERBIL_PATH=/.gerbil
RUN bash /scripts/install-dependencies.sh RUN bash /scripts/install-dependencies.sh
# Needed to prevent git from raging about /src # Needed to prevent git from raging about /src

View File

@ -10,7 +10,6 @@ DEPS=(github.com/fare/gerbil-utils
github.com/vyzo/gerbil-libp2p github.com/vyzo/gerbil-libp2p
) ; ) ;
for i in ${DEPS[@]} ; do for i in ${DEPS[@]} ; do
echo "Installing gerbil package: $i" gxpkg install $i &&
gxpkg install $i
gxpkg build $i gxpkg build $i
done done

View File

@ -1,6 +1,6 @@
# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile # Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster # [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
ARG VARIANT=20-bullseye-slim ARG VARIANT=16-bullseye
FROM node:${VARIANT} FROM node:${VARIANT}
ARG USERNAME=node ARG USERNAME=node
@ -8,8 +8,6 @@ ARG NPM_GLOBAL=/usr/local/share/npm-global
# Add NPM global to PATH. # Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH} ENV PATH=${NPM_GLOBAL}/bin:${PATH}
# Prevents npm from printing version warnings
ENV NPM_CONFIG_UPDATE_NOTIFIER=false
RUN \ RUN \
# Configure global npm install location, use group to adapt to UID/GID changes # Configure global npm install location, use group to adapt to UID/GID changes
@ -24,37 +22,27 @@ RUN \
&& su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
# Install eslint # Install eslint
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
# Install semver
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
# Install pnpm
&& su ${USERNAME} -c "umask 0002 && npm install -g pnpm" \
# Install bun
&& su ${USERNAME} -c "umask 0002 && npm install -g bun@1.1.x" \
&& npm cache clean --force > /dev/null 2>&1 && npm cache clean --force > /dev/null 2>&1
# [Optional] Uncomment this section to install additional OS packages. # [Optional] Uncomment this section to install additional OS packages.
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends jq gettext-base git && apt-get -y install --no-install-recommends jq
# [Optional] Uncomment if you want to install an additional version of node using nvm # [Optional] Uncomment if you want to install an additional version of node using nvm
# ARG EXTRA_NODE_VERSION=10 # ARG EXTRA_NODE_VERSION=10
# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" # RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}"
# We do this to get a yq binary from the published container, for the correct architecture we're building here
COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq
COPY scripts /scripts
# [Optional] Uncomment if you want to install more global node modules # [Optional] Uncomment if you want to install more global node modules
# RUN su node -c "npm install -g <your-package-list-here>" # RUN su node -c "npm install -g <your-package-list-here>"
RUN mkdir -p /config RUN mkdir /scripts
COPY build-npm-package.sh /scripts
COPY yarn-local-registry-fixup.sh /scripts
COPY build-npm-package-local-dependencies.sh /scripts
ENV PATH="${PATH}:/scripts"
# Install simple web server for now (use nginx perhaps later) COPY entrypoint.sh .
RUN yarn global add http-server ENTRYPOINT ["./entrypoint.sh"]
# Placeholder CMD : generally this will be overridden at run time like :
# Expose port for http # docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
EXPOSE 80 CMD node --version
# Default command sleeps forever so docker doesn't kill it
CMD ["/scripts/start-serving-app.sh"]

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Usage: build-npm-package-local-dependencies.sh <registry-url> <publish-with-this-version>
# Runs build-npm-package.sh after first fixing up yarn.lock to use a local
# npm registry for all packages in a spcific scope (currently @cerc-io)
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
if ! [[ $# -eq 1 || $# -eq 2 ]]; then
echo "Illegal number of parameters" >&2
exit 1
fi
if [[ -z "${CERC_NPM_AUTH_TOKEN}" ]]; then
echo "CERC_NPM_AUTH_TOKEN is not set" >&2
exit 1
fi
# Exit on error
set -e
local_npm_registry_url=$1
package_publish_version=$2
# TODO: make this a paramater and allow a list of scopes
npm_scope_for_local="@cerc-io"
# We need to configure the local registry
npm config set ${npm_scope_for_local}:registry ${local_npm_registry_url}
npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
# Find the set of dependencies from the specified scope
mapfile -t dependencies_from_scope < <(cat package.json | jq -r '.dependencies | with_entries(if (.key|test("^'${npm_scope_for_local}'/.*$")) then ( {key: .key, value: .value } ) else empty end ) | keys[]')
echo "Fixing up dependencies"
for package in "${dependencies_from_scope[@]}"
do
echo "Fixing up package ${package}"
yarn-local-registry-fixup.sh $package ${local_npm_registry_url}
done
echo "Running build"
build-npm-package.sh ${local_npm_registry_url} ${package_publish_version}

View File

@ -22,24 +22,15 @@ set -e
# Get the name of this package from package.json since we weren't passed that # Get the name of this package from package.json since we weren't passed that
package_name=$( cat package.json | jq -r .name ) package_name=$( cat package.json | jq -r .name )
local_npm_registry_url=$1 local_npm_registry_url=$1
npm config set @cerc-io:registry ${local_npm_registry_url}
npm config set @lirewine:registry ${local_npm_registry_url} npm config set @lirewine:registry ${local_npm_registry_url}
# Workaround bug in npm unpublish where it needs the url to be of the form //<foo> and not http://<foo> npm config set @cerc-io:registry ${local_npm_registry_url}
local_npm_registry_url_fixed=$( echo ${local_npm_registry_url} | sed -e 's/^http[s]\{0,1\}://') npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
npm config set -- ${local_npm_registry_url_fixed}:_authToken ${CERC_NPM_AUTH_TOKEN}
# First check if the version of this package we're trying to build already exists in the registry # First check if the version of this package we're trying to build already exists in the registry
package_exists=$( yarn info --json ${package_name}@${package_publish_version} 2>/dev/null | jq -r .data.dist.tarball ) package_exists=$( yarn info --json ${package_name}@${package_publish_version} 2>/dev/null | jq -r .data.dist.tarball )
if [[ ! -z "$package_exists" && "$package_exists" != "null" ]]; then if [[ ! -z "$package_exists" && "$package_exists" != "null" ]]; then
echo "${package_publish_version} of ${package_name} already exists in the registry" echo "${package_publish_version} of ${package_name} already exists in the registry, skipping build"
if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then
# Attempt to unpublish the existing package
echo "NOTE: unpublishing existing package version since force rebuild is enabled"
npm unpublish --force ${package_name}@${package_publish_version}
else
echo "skipping build since target version already exists"
exit 0 exit 0
fi fi
fi
echo "Build and publish ${package_name} version ${package_publish_version}" echo "Build and publish ${package_name} version ${package_publish_version}"
yarn install yarn install
yarn build yarn build

View File

@ -0,0 +1,2 @@
#!/bin/sh
exec "$@"

View File

@ -18,17 +18,16 @@ fi
set -e set -e
target_package=$1 target_package=$1
local_npm_registry_url=$2 local_npm_registry_url=$2
# Extract the actual version pinned in yarn.lock # TODO: use jq rather than sed here:
# See: https://stackoverflow.com/questions/60454251/how-to-know-the-version-of-currently-installed-package-from-yarn-lock versioned_target_package=$(grep ${target_package} package.json | sed -e 's#[[:space:]]\{1,\}\"\('${target_package}'\)\":[[:space:]]\{1,\}\"\(.*\)\",#\1@\2#' )
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
# Use yarn info to get URL checksums etc from the new registry # Use yarn info to get URL checksums etc from the new registry
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null) yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
# First check if the target version actually exists. # First check if the target version actually exists.
# If it doesn't exist there will be no .data.dist.tarball element, # If it doesn't exist there will be no .data.dist.tarball element,
# and jq will output the string "null" # and jq will output the string "null"
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball) package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
if [[ "$yarn_info_output" == "" || $package_tarball == "null" ]]; then if [[ $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found (or bad npm auth token)" >&2 echo "FATAL: Target package version ($versioned_target_package) not found" >&2
exit 1 exit 1
fi fi
# Code below parses out the values we need # Code below parses out the values we need

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
#
#Build cerc/datanet-eth-geth
set -e
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# Make sure the "build" directory is empty.
rm -rf $SCRIPT_DIR/build
# Copy the fixture-net scripts and config.
cp -rp $SCRIPT_DIR/../cerc-fixturenet-eth-geth $SCRIPT_DIR/build
# Then remove terminal_total_difficulty and replace it with capped_maximum_difficulty.
# This has two effects:
# (1) Disables the Merge (so all we need is geth, not lighthouse).
# (2) Maintains a fast block rate, since the difficulty will never exceed the capped value.
sed -i '' 's/^terminal_total_difficulty:.*$/capped_maximum_difficulty: 1/' $SCRIPT_DIR/build/genesis/el/el-config.yaml
# Build the image.
docker build -t cerc/datanet-eth-geth:local ${SCRIPT_DIR}/build
# Clean up the "build" directory.
rm -rf $SCRIPT_DIR/build

View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-probe
docker build -t cerc/eth-probe:local ${CERC_REPO_BASE_DIR}/eth-probe

View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-fill-service
docker build -t cerc/eth-statediff-fill-service:local ${CERC_REPO_BASE_DIR}/eth-statediff-fill-service

View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-service
docker build -t cerc/eth-statediff-service:local ${CERC_REPO_BASE_DIR}/eth-statediff-service

View File

@ -0,0 +1,27 @@
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
# Using the same golang image as used to build geth: https://github.com/cerc-io/go-ethereum/blob/HEAD/Dockerfile
FROM golang:1.18-alpine as delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM cerc/go-ethereum:local as geth
FROM alpine:latest
RUN apk add --no-cache python3 python3-dev py3-pip curl wget jq build-base gettext libintl openssl bash bind-tools postgresql-client
COPY --from=delve /go/bin/dlv /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
COPY --from=ethgen /apps /apps
RUN cd /apps/el-gen && pip3 install -r requirements.txt
COPY genesis /opt/testnet
COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el
COPY --from=geth /usr/local/bin/geth /usr/local/bin/
RUN geth init /opt/testnet/build/el/geth.json && rm -f ~/.ethereum/geth/nodekey
ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -1,8 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Build cerc/fixturenet-eth-geth # Build cerc/fixturenet-eth-geth
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/fixturenet-eth-geth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR docker build -t cerc/fixturenet-eth-geth:local -f ${SCRIPT_DIR}/Dockerfile $SCRIPT_DIR

View File

@ -12,6 +12,6 @@ for line in `cat ../build/el/accounts.csv`; do
echo "" echo ""
echo "$ADDRESS" echo "$ADDRESS"
geth account import --datadir=~/ethdata --password .pw.$$ .key.$$ geth account import --password .pw.$$ .key.$$
rm -f .pw.$$ .key.$$ rm -f .pw.$$ .key.$$
done done

View File

@ -1,4 +1,5 @@
from web3.auto import w3 from web3.auto import w3
import json
import ruamel.yaml as yaml import ruamel.yaml as yaml
import sys import sys

View File

@ -0,0 +1,55 @@
#!/usr/bin/env bash
set -e
# See: https://github.com/skylenet/ethereum-genesis-generator/blob/master/entrypoint.sh
rm -rf ../build/el
mkdir -p ../build/el
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX)
envsubst < el-config.yaml > $tmp_dir/genesis-config.yaml
ttd=`cat $tmp_dir/genesis-config.yaml | grep terminal_total_difficulty | awk '{ print $2 }'`
homestead_block=`cat $tmp_dir/genesis-config.yaml | grep homestead_block | awk '{ print $2 }'`
eip150_block=`cat $tmp_dir/genesis-config.yaml | grep eip150_block | awk '{ print $2 }'`
eip155_block=`cat $tmp_dir/genesis-config.yaml | grep eip155_block | awk '{ print $2 }'`
eip158_block=`cat $tmp_dir/genesis-config.yaml | grep eip158_block | awk '{ print $2 }'`
byzantium_block=`cat $tmp_dir/genesis-config.yaml | grep byzantium_block | awk '{ print $2 }'`
constantinople_block=`cat $tmp_dir/genesis-config.yaml | grep constantinople_block | awk '{ print $2 }'`
petersburg_block=`cat $tmp_dir/genesis-config.yaml | grep petersburg_block | awk '{ print $2 }'`
istanbul_block=`cat $tmp_dir/genesis-config.yaml | grep istanbul_block | awk '{ print $2 }'`
berlin_block=`cat $tmp_dir/genesis-config.yaml | grep berlin_block | awk '{ print $2 }'`
london_block=`cat $tmp_dir/genesis-config.yaml | grep london_block | awk '{ print $2 }'`
merge_fork_block=`cat $tmp_dir/genesis-config.yaml | grep merge_fork_block | awk '{ print $2 }'`
capped_maximum_difficulty=`cat $tmp_dir/genesis-config.yaml | grep capped_maximum_difficulty | awk '{ print $2 }'`
python3 ../accounts/mnemonic_to_csv.py $tmp_dir/genesis-config.yaml > ../build/el/accounts.csv
python3 /apps/el-gen/genesis_geth.py $tmp_dir/genesis-config.yaml | \
jq ".config.homesteadBlock=$homestead_block" | \
jq ".config.eip150Block=$eip150_block" | \
jq ".config.eip155Block=$eip155_block" | \
jq ".config.eip158Block=$eip158_block" | \
jq ".config.byzantiumBlock=$byzantium_block" | \
jq ".config.constantinopleBlock=$constantinople_block" | \
jq ".config.petersburgBlock=$petersburg_block" | \
jq ".config.istanbulBlock=$istanbul_block" | \
jq ".config.berlinBlock=$berlin_block" | \
jq ".config.londonBlock=$london_block" | \
jq ".config.mergeForkBlock=$merge_fork_block" > ../build/el/geth.json
if [ -n "$ttd" ]; then
cat ../build/el/geth.json | jq ".config.terminalTotalDifficulty=$ttd" > ../build/el/geth.json.jq
mv ../build/el/geth.json.jq ../build/el/geth.json
else
cat ../build/el/geth.json | jq "del(.config.terminalTotalDifficulty)" > ../build/el/geth.json.jq
mv ../build/el/geth.json.jq ../build/el/geth.json
fi
if [ -n "$capped_maximum_difficulty" ]; then
cat ../build/el/geth.json | jq ".config.cappedMaximumDifficulty=$capped_maximum_difficulty" > ../build/el/geth.json.jq
mv ../build/el/geth.json.jq ../build/el/geth.json
else
cat ../build/el/geth.json | jq "del(.config.cappedMaximumDifficulty)" > ../build/el/geth.json.jq
mv ../build/el/geth.json.jq ../build/el/geth.json
fi

View File

@ -10,8 +10,22 @@ el_premine_addrs: {}
chain_id: 1212 chain_id: 1212
deposit_contract_address: "0x1212121212121212121212121212121212121212" deposit_contract_address: "0x1212121212121212121212121212121212121212"
genesis_timestamp: 0 genesis_timestamp: 0
genesis_delay: 0 terminal_total_difficulty: 1000
deneb_fork_epoch: 0 homestead_block: 1
# note: only needed as workaround https://github.com/ethpandaops/ethereum-genesis-generator/pull/105 eip150_block: 1
electra_fork_epoch: 0 eip155_block: 1
slot_duration_in_seconds: 3 eip158_block: 1
byzantium_block: 1
constantinople_block: 1
petersburg_block: 1
istanbul_block: 1
berlin_block: 1
london_block: 1
merge_fork_block: 1
clique:
enabled: false
signers:
- 36d56343bc308d4ffaac2f793d121aba905fa6cc
- 5e762d4a3847cadaf40a4b0c39574b0ff6698c78
- 15d7acc1019fdf8ab4f0f7bd31ec1487ecb5a2bd

View File

@ -6,44 +6,25 @@ fi
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
NETRESTRICT=`ip addr | grep -w inet | grep -v '127.0' | awk '{print $2}'` NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
HOME_DIR=`pwd`
cd /opt/testnet/build/el cd /opt/testnet/build/el
python3 -m http.server 9898 & python3 -m http.server 9898 &
cd $HOME cd $HOME_DIR
START_CMD="geth" START_CMD="geth"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
fi fi
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
cleanup() {
echo "Signal received, cleaning up..."
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
pkill -P ${geth_pid}
sleep 2
kill $(jobs -p)
wait
echo "Done"
}
trap 'cleanup' SIGINT SIGTERM
if [ "true" == "$RUN_BOOTNODE" ]; then if [ "true" == "$RUN_BOOTNODE" ]; then
$START_CMD \ $START_CMD \
--datadir="${CERC_ETH_DATADIR}" \
--nodekeyhex="${BOOTNODE_KEY}" \ --nodekeyhex="${BOOTNODE_KEY}" \
--nodiscover \ --nodiscover \
--ipcdisable \ --ipcdisable \
--networkid=${NETWORK_ID} \ --networkid=${NETWORK_ID} \
--netrestrict="${NETRESTRICT}" \ --netrestrict="${NETRESTRICT}"
&
geth_pid=$!
else else
cd /opt/testnet/accounts cd /opt/testnet/accounts
./import_keys.sh ./import_keys.sh
@ -65,8 +46,8 @@ else
STATEDIFF_OPTS="" STATEDIFF_OPTS=""
if [ "$CERC_RUN_STATEDIFF" == "true" ]; then if [ "$CERC_RUN_STATEDIFF" == "true" ]; then
ready=0 ready=0
echo "Waiting for statediff DB..."
while [ $ready -eq 0 ]; do while [ $ready -eq 0 ]; do
echo "Waiting for statediff DB..."
sleep 1 sleep 1
export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
@ -74,16 +55,12 @@ else
-U "$CERC_STATEDIFF_DB_USER" \ -U "$CERC_STATEDIFF_DB_USER" \
-d "$CERC_STATEDIFF_DB_NAME" \ -d "$CERC_STATEDIFF_DB_NAME" \
-t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
if [ -n "$result" ]; then if [ -n "$result" ] && [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
echo "DB ready..." echo "DB ready..."
if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
ready=1 ready=1
else
echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
fi
fi fi
done done
STATEDIFF_OPTS="--statediff \ STATEDIFF_OPTS="--statediff=true \
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \ --statediff.db.host=$CERC_STATEDIFF_DB_HOST \
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \ --statediff.db.name=$CERC_STATEDIFF_DB_NAME \
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
@ -91,41 +68,21 @@ else
--statediff.db.port=$CERC_STATEDIFF_DB_PORT \ --statediff.db.port=$CERC_STATEDIFF_DB_PORT \
--statediff.db.user=$CERC_STATEDIFF_DB_USER \ --statediff.db.user=$CERC_STATEDIFF_DB_USER \
--statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
--statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
--statediff.waitforsync=true \ --statediff.waitforsync=true \
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
--statediff.writing=true" --statediff.writing=true"
if [ -d "${CERC_PLUGINS_DIR}" ]; then
# With plugeth, we separate the statediff options by prefixing with ' -- '
STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
fi
fi
OTHER_OPTS=""
# miner options were removed in v1.12
GETH_VERSION=$(geth --version | grep -io '[0-9][0-9a-z.-]*')
if echo -e "$GETH_VERSION\n1.12" | sort -Vc; then
OTHER_OPTS="--miner.threads=1"
fi fi
$START_CMD \ $START_CMD \
--datadir="${CERC_ETH_DATADIR}" \
--bootnodes="${ENODE}" \ --bootnodes="${ENODE}" \
--allow-insecure-unlock \ --allow-insecure-unlock \
--http \ --http \
--http.addr="0.0.0.0" \ --http.addr="0.0.0.0" \
--http.vhosts="*" \ --http.vhosts="*" \
--http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ --http.api="eth,web3,net,admin,personal,debug,statediff" \
--http.corsdomain="*" \ --http.corsdomain="*" \
--authrpc.addr="0.0.0.0" \ --authrpc.addr="0.0.0.0" \
--authrpc.vhosts="*" \ --authrpc.vhosts="*" \
--authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \ --authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \
--ws \
--ws.addr="0.0.0.0" \
--ws.origins="*" \
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
--http.corsdomain="*" \
--networkid="${NETWORK_ID}" \ --networkid="${NETWORK_ID}" \
--netrestrict="${NETRESTRICT}" \ --netrestrict="${NETRESTRICT}" \
--gcmode archive \ --gcmode archive \
@ -133,22 +90,10 @@ else
--cache.preimages \ --cache.preimages \
--syncmode=full \ --syncmode=full \
--mine \ --mine \
--miner.threads=1 \
--metrics \ --metrics \
--metrics.addr="0.0.0.0" \ --metrics.addr="0.0.0.0" \
--verbosity=${CERC_GETH_VERBOSITY:-3} \ --verbosity=${CERC_GETH_VERBOSITY:-3} \
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ --vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
--miner.etherbase="${ETHERBASE}" \ --miner.etherbase="${ETHERBASE}" ${STATEDIFF_OPTS}
${OTHER_OPTS} \
${STATEDIFF_OPTS} \
&
geth_pid=$!
fi
wait $geth_pid
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
while [ 1 -eq 1 ]; do
sleep 60
done
fi fi

View File

@ -1,5 +1,6 @@
FROM cerc/lighthouse-cli:local AS lcli FROM sigp/lcli:v3.2.1 AS lcli
FROM cerc/fixturenet-eth-genesis:local AS fnetgen FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM cerc/fixturenet-eth-geth:local AS fnetgeth
FROM cerc/lighthouse:local FROM cerc/lighthouse:local
@ -11,13 +12,16 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
COPY --from=lcli /usr/local/bin/lcli /usr/local/bin/lcli
COPY --from=fnetgen /opt/genesis/el /opt/testnet/el
COPY --from=fnetgen /opt/genesis/build/el /opt/testnet/build/el
COPY genesis /opt/testnet COPY genesis /opt/testnet
COPY run-cl.sh /opt/testnet/run.sh COPY run-cl.sh /opt/testnet/run.sh
COPY --from=lcli /usr/local/bin/lcli /usr/local/bin/lcli
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/eth2-testnet-genesis
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/eth2-val-tools
COPY --from=ethgen /apps /apps
COPY --from=fnetgeth /opt/testnet/el /opt/testnet/el
COPY --from=fnetgeth /opt/testnet/build/el /opt/testnet/build/el
RUN cd /opt/testnet && make genesis-cl RUN cd /opt/testnet && make genesis-cl
# Work around some bugs in lcli where the default path is always used. # Work around some bugs in lcli where the default path is always used.

Some files were not shown because too many files have changed in this diff Show More