Compare commits
No commits in common. "21f7b96eb4744b38ee2fcf7e9edcb21264d34a8e" and "fe6b15b6a62a7c3aef6a545e57e05fc04158740f" have entirely different histories.
21f7b96eb4
...
fe6b15b6a6
@ -43,19 +43,3 @@ jobs:
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Run fixturenet-eth tests"
|
||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -47,19 +47,3 @@ jobs:
|
||||
sleep 5
|
||||
- name: "Run fixturenet-eth tests"
|
||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -45,19 +45,4 @@ jobs:
|
||||
sleep 5
|
||||
- name: "Run fixturenet-eth tests"
|
||||
run: ./tests/fixturenet-eth/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
||||
|
@ -11,7 +11,7 @@ on:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run Laconicd fixturenet and Laconic CLI tests"
|
||||
name: "Run an Laconicd fixturenet test"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Update'
|
||||
@ -46,21 +46,3 @@ jobs:
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Run fixturenet-laconicd tests"
|
||||
run: ./tests/fixturenet-laconicd/run-test.sh
|
||||
- name: "Run laconic CLI tests"
|
||||
run: ./tests/fixturenet-laconicd/run-cli-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -19,19 +19,3 @@ jobs:
|
||||
python-version: '3.8'
|
||||
- name : "Run flake8"
|
||||
uses: py-actions/flake8@v2
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -54,19 +54,3 @@ jobs:
|
||||
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
|
||||
draft: ${{ endsWith('publish-test', github.ref ) }}
|
||||
files: ./laconic-so
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -51,19 +51,4 @@ jobs:
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/container-registry/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
||||
|
@ -49,19 +49,4 @@ jobs:
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/database/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
||||
|
@ -47,19 +47,3 @@ jobs:
|
||||
sleep 5
|
||||
- name: "Run deploy tests"
|
||||
run: ./tests/deploy/run-deploy-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -1,58 +0,0 @@
|
||||
name: External Stack Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: '*'
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/test-external-stack'
|
||||
- '.gitea/workflows/test-external-stack.yml'
|
||||
- 'tests/external-stack/run-test.sh'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '8 19 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run external stack test suite"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
# At present the stock setup-python action fails on Linux/aarch64
|
||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Print Python version"
|
||||
run: python3 --version
|
||||
- name: "Install shiv"
|
||||
run: pip install shiv
|
||||
- name: "Generate build version file"
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Run external stack tests"
|
||||
run: ./tests/external-stack/run-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
@ -51,19 +51,4 @@ jobs:
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/k8s-deploy/run-deploy-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
||||
|
@ -49,19 +49,3 @@ jobs:
|
||||
sleep 5
|
||||
- name: "Run webapp tests"
|
||||
run: ./tests/webapp-test/run-webapp-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
@ -47,19 +47,5 @@ jobs:
|
||||
sleep 5
|
||||
- name: "Run smoke tests"
|
||||
run: ./tests/smoke-test/run-smoke-test.sh
|
||||
- name: Notify Vulcanize Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||
- name: Notify DeepStack Slack on CI failure
|
||||
if: ${{ always() && github.ref_name == 'main' }}
|
||||
uses: ravsamhq/notify-slack-action@v2
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notify_when: 'failure'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||
|
||||
|
||||
|
@ -1,6 +1,3 @@
|
||||
Change this file to trigger running the fixturenet-laconicd-test CI job
|
||||
Trigger
|
||||
Trigger
|
||||
Trigger
|
||||
Trigger
|
||||
Trigger
|
||||
|
@ -1,2 +0,0 @@
|
||||
Change this file to trigger running the external-stack CI job
|
||||
trigger
|
@ -71,7 +71,7 @@ def process_container(build_context: BuildContext) -> bool:
|
||||
|
||||
# Check if this is in an external stack
|
||||
if stack_is_external(build_context.stack):
|
||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
||||
container_parent_dir = Path(build_context.stack).joinpath("container-build")
|
||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||
# Now check if the container exists in the external stack.
|
||||
|
@ -1,80 +0,0 @@
|
||||
|
||||
# From: https://raw.githubusercontent.com/blast-io/deployment/master/docker-compose.yml
|
||||
services:
|
||||
# generate jwt.txt if it's absent
|
||||
generate-jwt:
|
||||
image: blastio/openssl
|
||||
volumes:
|
||||
- blast-data:/blast:rw
|
||||
command: >
|
||||
sh -c "[ ! -f /blast/jwt.txt ] && openssl rand -hex 32 | tr -d '\n' > /blast/jwt.txt || exit 0"
|
||||
# initialise geth db
|
||||
geth-init:
|
||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
||||
volumes:
|
||||
- blast-data:/blast:rw
|
||||
- ../config/fixturenet-blast/genesis.json:/blast/genesis.json
|
||||
entrypoint: /bin/sh
|
||||
command: >
|
||||
-c "[ ! -d /blast/${GETH_DATA_DIR:-blast-geth-data}/geth ] && /usr/local/bin/geth init --datadir=/blast/${GETH_DATA_DIR:-blast-geth-data} /blast/genesis.json || exit 0"
|
||||
depends_on:
|
||||
generate-jwt:
|
||||
condition: service_completed_successfully
|
||||
env_file:
|
||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||
blast-geth:
|
||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
||||
volumes:
|
||||
- blast-data:/blast
|
||||
ports:
|
||||
- "9545"
|
||||
- "9546"
|
||||
command: >
|
||||
--datadir=/blast/${GETH_DATA_DIR:-blast-geth-data}
|
||||
--http
|
||||
--http.corsdomain="*"
|
||||
--http.vhosts="*"
|
||||
--http.addr=0.0.0.0
|
||||
--http.port=9545
|
||||
--http.api=web3,debug,eth,txpool,net,engine
|
||||
--ws
|
||||
--ws.addr=0.0.0.0
|
||||
--ws.port=9546
|
||||
--ws.origins="*"
|
||||
--ws.api=debug,eth,txpool,net,engine
|
||||
--authrpc.addr="0.0.0.0"
|
||||
--authrpc.port="8551"
|
||||
--authrpc.vhosts="*"
|
||||
--authrpc.jwtsecret=/blast/jwt.txt
|
||||
--syncmode=full
|
||||
--gcmode=archive
|
||||
--nodiscover
|
||||
--maxpeers=0
|
||||
--rollup.disabletxpoolgossip=true
|
||||
env_file:
|
||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||
depends_on:
|
||||
geth-init:
|
||||
condition: service_completed_successfully
|
||||
op-node:
|
||||
image: blastio/blast-optimism:${NETWORK:-testnet-sepolia}
|
||||
volumes:
|
||||
- blast-data:/blast
|
||||
- ../config/fixturenet-blast/rollup.json:/blast/rollup.json
|
||||
ports:
|
||||
- "9003"
|
||||
command: >
|
||||
op-node
|
||||
--l1="${CERC_L1_RPC}"
|
||||
--l1.rpckind="any"
|
||||
--l1.trustrpc=true
|
||||
--l2="http://blast-geth:8551"
|
||||
--l2.jwt-secret=/blast/jwt.txt
|
||||
--rollup.config="/blast/rollup.json"
|
||||
depends_on:
|
||||
- blast-geth
|
||||
env_file:
|
||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||
|
||||
volumes:
|
||||
blast-data:
|
@ -3,9 +3,6 @@ services:
|
||||
restart: unless-stopped
|
||||
image: cerc/laconicd:local
|
||||
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||
environment:
|
||||
TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED}
|
||||
TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY}
|
||||
volumes:
|
||||
# The cosmos-sdk node's database directory:
|
||||
- laconicd-data:/root/.laconicd
|
||||
@ -28,7 +25,6 @@ services:
|
||||
image: cerc/laconic-registry-cli:local
|
||||
volumes:
|
||||
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
|
||||
- ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli
|
||||
|
||||
volumes:
|
||||
laconicd-data:
|
||||
|
@ -6,7 +6,6 @@ services:
|
||||
- ../config/fixturenet-eth/fixturenet-eth.env
|
||||
environment:
|
||||
RUN_BOOTNODE: "true"
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
image: cerc/fixturenet-plugeth-plugeth:local
|
||||
volumes:
|
||||
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata
|
||||
|
@ -2,7 +2,7 @@ version: "3.7"
|
||||
|
||||
services:
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.3
|
||||
image: grafana/grafana:10.2.2
|
||||
restart: always
|
||||
environment:
|
||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
||||
|
@ -16,12 +16,8 @@ services:
|
||||
postgres_pass: password
|
||||
postgres_db: graph-node
|
||||
ethereum: ${ETH_NETWORKS:-lotus-fixturenet:http://lotus-node-1:1234/rpc/v1}
|
||||
# Env varaibles reference: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
||||
GRAPH_LOG: debug
|
||||
ETHEREUM_REORG_THRESHOLD: 3
|
||||
GRAPH_ETHEREUM_JSON_RPC_TIMEOUT: ${GRAPH_ETHEREUM_JSON_RPC_TIMEOUT:-180}
|
||||
GRAPH_ETHEREUM_REQUEST_RETRIES: ${GRAPH_ETHEREUM_REQUEST_RETRIES:-10}
|
||||
GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE: ${GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE:-2000}
|
||||
entrypoint: ["bash", "-c"]
|
||||
# Wait for ETH RPC endpoint to be up when running with fixturenet-lotus
|
||||
command: |
|
||||
|
@ -1,83 +0,0 @@
|
||||
|
||||
# From: https://raw.githubusercontent.com/blast-io/deployment/master/docker-compose.yml
|
||||
services:
|
||||
# generate jwt.txt if it's absent
|
||||
generate-jwt:
|
||||
image: blastio/openssl
|
||||
volumes:
|
||||
- blast-data:/blast:rw
|
||||
command: >
|
||||
sh -c "[ ! -f /blast/jwt.txt ] && openssl rand -hex 32 | tr -d '\n' > /blast/jwt.txt || exit 0"
|
||||
# initialise geth db
|
||||
geth-init:
|
||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
||||
volumes:
|
||||
- blast-data:/blast:rw
|
||||
entrypoint: /bin/sh
|
||||
command: >
|
||||
-c "[ ! -d /blast/${GETH_DATA_DIR:-blast-geth-data}/geth ] && /usr/local/bin/geth init --datadir=/blast/${GETH_DATA_DIR:-blast-geth-data} /blast/genesis.json || exit 0"
|
||||
depends_on:
|
||||
generate-jwt:
|
||||
condition: service_completed_successfully
|
||||
env_file:
|
||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||
blast-geth:
|
||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
||||
volumes:
|
||||
- blast-data:/blast
|
||||
ports:
|
||||
- "9545"
|
||||
- "9546"
|
||||
- "6060"
|
||||
command: >
|
||||
--datadir=/blast/${GETH_DATA_DIR:-blast-geth-data}
|
||||
--http
|
||||
--http.corsdomain="*"
|
||||
--http.vhosts="*"
|
||||
--http.addr=0.0.0.0
|
||||
--http.port=9545
|
||||
--http.api=web3,debug,eth,txpool,net,engine
|
||||
--ws
|
||||
--ws.addr=0.0.0.0
|
||||
--ws.port=9546
|
||||
--ws.origins="*"
|
||||
--ws.api=debug,eth,txpool,net,engine
|
||||
--authrpc.addr="0.0.0.0"
|
||||
--authrpc.port="8551"
|
||||
--authrpc.vhosts="*"
|
||||
--authrpc.jwtsecret=/blast/jwt.txt
|
||||
--syncmode=full
|
||||
--metrics
|
||||
--metrics.addr=0.0.0.0
|
||||
--gcmode=archive
|
||||
--nodiscover
|
||||
--maxpeers=0
|
||||
--rollup.disabletxpoolgossip=true
|
||||
env_file:
|
||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||
depends_on:
|
||||
geth-init:
|
||||
condition: service_completed_successfully
|
||||
op-node:
|
||||
image: blastio/blast-optimism:${NETWORK:-mainnet}
|
||||
volumes:
|
||||
- blast-data:/blast
|
||||
ports:
|
||||
- "9003"
|
||||
- "7300"
|
||||
command: >
|
||||
op-node
|
||||
--l1="https://eth-mainnet-1.vdb.to/"
|
||||
--metrics.enabled
|
||||
--l1.rpckind="any"
|
||||
--l1.trustrpc=true
|
||||
--l2="http://blast-geth:8551"
|
||||
--l2.jwt-secret=/blast/jwt.txt
|
||||
--rollup.config="/blast/rollup.json"
|
||||
depends_on:
|
||||
- blast-geth
|
||||
env_file:
|
||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||
|
||||
volumes:
|
||||
blast-data:
|
@ -1,76 +0,0 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
ajna-watcher-db:
|
||||
restart: unless-stopped
|
||||
image: postgres:14-alpine
|
||||
environment:
|
||||
- POSTGRES_USER=vdbm
|
||||
- POSTGRES_MULTIPLE_DATABASES=ajna-watcher,ajna-watcher-job-queue
|
||||
- POSTGRES_EXTENSION=ajna-watcher-job-queue:pgcrypto
|
||||
- POSTGRES_PASSWORD=password
|
||||
volumes:
|
||||
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
||||
- ajna_watcher_db_data:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-v", "localhost", "5432"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 10s
|
||||
|
||||
ajna-watcher-job-runner:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
ajna-watcher-db:
|
||||
condition: service_healthy
|
||||
image: cerc/watcher-ajna:local
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||
command: ["bash", "./start-job-runner.sh"]
|
||||
volumes:
|
||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||
- ../config/watcher-ajna/start-job-runner.sh:/app/start-job-runner.sh
|
||||
ports:
|
||||
- "9000"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 5s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
ajna-watcher-server:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
ajna-watcher-db:
|
||||
condition: service_healthy
|
||||
ajna-watcher-job-runner:
|
||||
condition: service_healthy
|
||||
image: cerc/watcher-ajna:local
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||
command: ["bash", "./start-server.sh"]
|
||||
volumes:
|
||||
- ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||
- ../config/watcher-ajna/start-server.sh:/app/start-server.sh
|
||||
ports:
|
||||
- "3008"
|
||||
- "9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 15
|
||||
start_period: 5s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
volumes:
|
||||
ajna_watcher_db_data:
|
@ -1,2 +0,0 @@
|
||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
||||
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
@ -1,57 +0,0 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 608943043,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"muirGlacierBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"arrowGlacierBlock": 0,
|
||||
"grayGlacierBlock": 0,
|
||||
"mergeNetsplitBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"bedrockBlock": 0,
|
||||
"regolithTime": 0,
|
||||
"canyonTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"optimism": {
|
||||
"eip1559Elasticity": 6,
|
||||
"eip1559Denominator": 50,
|
||||
"eip1559DenominatorCanyon": 250
|
||||
}
|
||||
},
|
||||
"alloc": {
|
||||
"0000000000000000000000000000000000000000": {
|
||||
"balance": "0x1"
|
||||
},
|
||||
"4200000000000000000000000000000000000000": {
|
||||
"code": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100ae5780638f283970146100db578063f851a440146100fb5761005d565b3661005d5761005b610110565b005b61005b610110565b34801561007157600080fd5b5061005b610080366004610521565b6101c8565b61009861009336600461053c565b61020e565b6040516100a591906105bf565b60405180910390f35b3480156100ba57600080fd5b506100c361033e565b6040516001600160a01b0390911681526020016100a5565b3480156100e757600080fd5b5061005b6100f6366004610521565b6103a9565b34801561010757600080fd5b506100c36103e4565b600061013a7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5490565b90506001600160a01b0381166101a55760405162461bcd60e51b815260206004820152602560248201527f50726f78793a20696d706c656d656e746174696f6e206e6f7420696e697469616044820152641b1a5e995960da1b60648201526084015b60405180910390fd5b3660008037600080366000845af43d6000803e806101c2573d6000fd5b503d6000f35b600080516020610625833981519152546001600160a01b0316336001600160a01b031614806101f5575033155b156102065761020381610432565b50565b610203610110565b60606102266000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610243575033155b1561032f5761025184610432565b600080856001600160a01b0316858560405161026e929190610614565b600060405180830381855af49150503d80600081146102a9576040519150601f19603f3d011682016040523d82523d6000602084013e6102ae565b606091505b5091509150816103265760405162461bcd60e51b815260206004820152603960248201527f50726f78793a2064656c656761746563616c6c20746f206e657720696d706c6560448201527f6d656e746174696f6e20636f6e7472616374206661696c656400000000000000606482015260840161019c565b91506103379050565b610337610110565b9392505050565b60006103566000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610373575033155b1561039e57507f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5490565b6103a6610110565b90565b600080516020610625833981519152546001600160a01b0316336001600160a01b031614806103d6575033155b15610206576102038161048e565b60006103fc6000805160206106258339815191525490565b6001600160a01b0316336001600160a01b03161480610419575033155b1561039e57506000805160206106258339815191525490565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc8181556040516001600160a01b038316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a25050565b60006104a66000805160206106258339815191525490565b600080516020610625833981519152838155604080516001600160a01b0380851682528616602082015292935090917f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f910160405180910390a1505050565b80356001600160a01b038116811461051c57600080fd5b919050565b60006020828403121561053357600080fd5b61033782610505565b60008060006040848603121561055157600080fd5b61055a84610505565b9250602084013567ffffffffffffffff8082111561057757600080fd5b818601915086601f83011261058b57600080fd5b81358181111561059a57600080fd5b8760208285010111156105ac57600080fd5b6020830194508093505050509250925092565b600060208083528351808285015260005b818110156105ec578581018301518582016040015282016105d0565b818111156105fe576000604083870101525b50601f01601f1916929092016040019392505050565b818382376000910190815291905056feb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103a164736f6c634300080f000a",
|
||||
"storage": {
|
||||
"0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30000",
|
||||
"0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000004200000000000000000000000000000000000018"
|
||||
},
|
||||
"balance": "0x0",
|
||||
"flags": 1
|
||||
}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x659b7460",
|
||||
"extraData": "0x424544524f434b",
|
||||
"gasLimit": "0x1c9c380",
|
||||
"difficulty": "0x0",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x4200000000000000000000000000000000000011",
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"baseFeePerGas": "0x3b9aca00",
|
||||
"excessBlobGas": null,
|
||||
"blobGasUsed": null
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
{
|
||||
"genesis": {
|
||||
"l1": {
|
||||
"hash": "0x17728cf4d8e0b4f292d2390a869fd7c632d39e72efb00ca3462b4387c6aa2437",
|
||||
"number": 5044255
|
||||
},
|
||||
"l2": {
|
||||
"hash": "0x26a1c0faad7b041f34569a1bb383f00ab74b335883a44bed53e9f41ced5fd906",
|
||||
"number": 0
|
||||
},
|
||||
"l2_time": 1704686688,
|
||||
"system_config": {
|
||||
"batcherAddr": "0xba26fee2fa917443e05e65de8d4350bcd2f59222",
|
||||
"overhead": "0x00000000000000000000000000000000000000000000000000000000000000bc",
|
||||
"scalar": "0x00000000000000000000000000000000000000000000000000000000000a6fe0",
|
||||
"gasLimit": 30000000
|
||||
}
|
||||
},
|
||||
"block_time": 2,
|
||||
"max_sequencer_drift": 600,
|
||||
"seq_window_size": 3600,
|
||||
"channel_timeout": 300,
|
||||
"l1_chain_id": 11155111,
|
||||
"l2_chain_id": 608943043,
|
||||
"regolith_time": 0,
|
||||
"canyon_time": 0,
|
||||
"batch_inbox_address": "0x1c3b85a2108784eab6a4bf56cdd6f722e415b331",
|
||||
"deposit_contract_address": "0x2757e4430e694f27b73ec9c02257cab3a498c8c5",
|
||||
"l1_system_config_address": "0x329faf078c364a316e08bf6a17b7eee6ae75a613",
|
||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||
}
|
@ -22,7 +22,4 @@ CERC_STATEDIFF_DB_LOG_STATEMENTS="${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false}"
|
||||
CERC_STATEDIFF_WORKERS=2
|
||||
|
||||
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
|
||||
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
|
||||
|
||||
# Used by Lighthouse
|
||||
SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-3}
|
||||
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}
|
@ -102,17 +102,6 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Enable telemetry (prometheus metrics: http://localhost:1317/metrics?format=prometheus)
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
||||
sed -i '' 's/prometheus-retention-time = 0/prometheus-retention-time = 60/g' $HOME/.laconicd/config/app.toml
|
||||
sed -i '' 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
||||
else
|
||||
sed -i 's/enabled = false/enabled = true/g' $HOME/.laconicd/config/app.toml
|
||||
sed -i 's/prometheus-retention-time = 0/prometheus-retention-time = 60/g' $HOME/.laconicd/config/app.toml
|
||||
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
||||
fi
|
||||
|
||||
# Allocate genesis accounts (cosmos formatted addresses)
|
||||
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -1,32 +0,0 @@
|
||||
POSTGRES_DB=keycloak
|
||||
POSTGRES_USER=keycloak
|
||||
POSTGRES_PASSWORD=keycloak
|
||||
# Don't change this unless you also change the healthcheck in docker-compose-mainnet-eth-keycloak.yml
|
||||
PGPORT=35432
|
||||
KC_DB=postgres
|
||||
KC_DB_URL_HOST=keycloak-db
|
||||
KC_DB_URL_PORT=${PGPORT}
|
||||
KC_DB_URL_DATABASE=${POSTGRES_DB}
|
||||
KC_DB_USERNAME=${POSTGRES_USER}
|
||||
KC_DB_PASSWORD=${POSTGRES_PASSWORD}
|
||||
KC_DB_SCHEMA=public
|
||||
KC_HOSTNAME=localhost
|
||||
KC_HTTP_ENABLED="true"
|
||||
KC_HTTP_RELATIVE_PATH="/auth"
|
||||
KC_HOSTNAME_STRICT_HTTPS="false"
|
||||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD=admin
|
||||
X_API_CHECK_REALM=cerc
|
||||
X_API_CHECK_CLIENT_ID="%user_id%"
|
||||
|
||||
|
||||
# keycloak-reg-api
|
||||
CERC_KCUSERREG_LISTEN_PORT=9292
|
||||
CERC_KCUSERREG_LISTEN_ADDR='0.0.0.0'
|
||||
CERC_KCUSERREG_API_URL='http://keycloak:8080/auth'
|
||||
CERC_KCUSERREG_REG_USER="${KEYCLOAK_ADMIN}"
|
||||
CERC_KCUSERREG_REG_PW="${KEYCLOAK_ADMIN_PASSWORD}"
|
||||
CERC_KCUSERREG_REG_CLIENT_ID='admin-cli'
|
||||
CERC_KCUSERREG_TARGET_REALM=cerc
|
||||
CERC_KCUSERREG_TARGET_GROUPS=eth
|
||||
CERC_KCUSERREG_CREATE_ENABLED=true
|
@ -1,33 +0,0 @@
|
||||
# Enable startup script debug output.
|
||||
CERC_SCRIPT_DEBUG=false
|
||||
|
||||
# Specify any other lighthouse CLI options.
|
||||
LIGHTHOUSE_OPTS=""
|
||||
|
||||
# Override the advertised public IP (optional)
|
||||
# --enr-address
|
||||
#LIGHTHOUSE_ENR_ADDRESS=""
|
||||
|
||||
# --checkpoint-sync-url
|
||||
LIGHTHOUSE_CHECKPOINT_SYNC_URL="https://beaconstate.ethstaker.cc"
|
||||
|
||||
# --checkpoint-sync-url-timeout
|
||||
LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT=300
|
||||
|
||||
# --datadir
|
||||
LIGHTHOUSE_DATADIR=/data
|
||||
|
||||
# --debug-level
|
||||
LIGHTHOUSE_DEBUG_LEVEL=info
|
||||
|
||||
# --http-port
|
||||
LIGHTHOUSE_HTTP_PORT=5052
|
||||
|
||||
# --execution-jwt
|
||||
LIGHTHOUSE_JWTSECRET=/etc/mainnet-eth/jwtsecret
|
||||
|
||||
# --metrics-port
|
||||
LIGHTHOUSE_METRICS_PORT=5054
|
||||
|
||||
# --port --enr-udp-port --enr-tcp-port
|
||||
LIGHTHOUSE_NETWORK_PORT=9000
|
@ -1,2 +0,0 @@
|
||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.blast.io
|
||||
OP_NODE_P2P_BOOTNODES=enr:-J64QGwHl9uYLfC_cnmxSA6wQH811nkOWJDWjzxqkEUlJoZHWvI66u-BXgVcPCeMUmg0dBpFQAPotFchG67FHJMZ9OSGAY3d6wevgmlkgnY0gmlwhANizeSHb3BzdGFja4Sx_AQAiXNlY3AyNTZrMaECg4pk0cskPAyJ7pOmo9E6RqGBwV-Lex4VS9a3MQvu7PWDdGNwgnZhg3VkcIJ2YQ,enr:-J64QDge2jYBQtcNEpRqmKfci5E5BHAhNBjgv4WSdwH1_wPqbueq2bDj38-TSW8asjy5lJj1Xftui6Or8lnaYFCqCI-GAY3d6wf3gmlkgnY0gmlwhCO2D9yHb3BzdGFja4Sx_AQAiXNlY3AyNTZrMaEDo4aCTq7pCEN8om9U5n_VyWdambGnQhwHNwKc8o-OicaDdGNwgnZhg3VkcIJ2YQ
|
@ -1,32 +0,0 @@
|
||||
{
|
||||
"genesis": {
|
||||
"l1": {
|
||||
"hash": "0xfcfb8d586bdae763f1189988789211c69eb893a895e7ba48be3ca6289f0941b7",
|
||||
"number": 19300102
|
||||
},
|
||||
"l2": {
|
||||
"hash": "0xb689b35ef29d0bec5816938e0e52683c7257d2e325420ea69b739a2be4754b89",
|
||||
"number": 0
|
||||
},
|
||||
"l2_time": 1708809815,
|
||||
"system_config": {
|
||||
"batcherAddr": "0x415c8893d514f9bc5211d36eeda4183226b84aa7",
|
||||
"overhead": "0x00000000000000000000000000000000000000000000000000000000000000bc",
|
||||
"scalar": "0x00000000000000000000000000000000000000000000000000000000000a6fe0",
|
||||
"gasLimit": 30000000
|
||||
}
|
||||
},
|
||||
"block_time": 2,
|
||||
"max_sequencer_drift": 600,
|
||||
"seq_window_size": 3600,
|
||||
"channel_timeout": 300,
|
||||
"l1_chain_id": 1,
|
||||
"l2_chain_id": 81457,
|
||||
"regolith_time": 0,
|
||||
"canyon_time": 0,
|
||||
"batch_inbox_address": "0xff00000000000000000000000000000000081457",
|
||||
"deposit_contract_address": "0x0ec68c5b10f21effb74f2a5c61dfe6b08c0db6cb",
|
||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||
}
|
||||
|
@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
ENR_OPTS=""
|
||||
if [[ -n "$LIGHTHOUSE_ENR_ADDRESS" ]]; then
|
||||
ENR_OPTS="--enr-address $LIGHTHOUSE_ENR_ADDRESS"
|
||||
fi
|
||||
|
||||
exec lighthouse bn \
|
||||
--checkpoint-sync-url "$LIGHTHOUSE_CHECKPOINT_SYNC_URL" \
|
||||
--checkpoint-sync-url-timeout ${LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT} \
|
||||
--datadir "$LIGHTHOUSE_DATADIR" \
|
||||
--debug-level $LIGHTHOUSE_DEBUG_LEVEL \
|
||||
--disable-deposit-contract-sync \
|
||||
--disable-upnp \
|
||||
--enr-tcp-port $LIGHTHOUSE_NETWORK_PORT \
|
||||
--enr-udp-port $LIGHTHOUSE_NETWORK_PORT \
|
||||
--execution-endpoint "$LIGHTHOUSE_EXECUTION_ENDPOINT" \
|
||||
--execution-jwt /etc/mainnet-eth/jwtsecret \
|
||||
--http \
|
||||
--http-address 0.0.0.0 \
|
||||
--http-port $LIGHTHOUSE_HTTP_PORT \
|
||||
--metrics \
|
||||
--metrics-address=0.0.0.0 \
|
||||
--metrics-port $LIGHTHOUSE_METRICS_PORT \
|
||||
--network mainnet \
|
||||
--port $LIGHTHOUSE_NETWORK_PORT \
|
||||
$ENR_OPTS $LIGHTHOUSE_OPTS
|
File diff suppressed because it is too large
Load Diff
@ -65,12 +65,3 @@ scrape_configs:
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: postgres-exporter:9187
|
||||
|
||||
- job_name: laconicd
|
||||
metrics_path: /metrics
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
# Add laconicd REST endpoint target with host and port (1317)
|
||||
# - targets: ['example-host:1317']
|
||||
params:
|
||||
format: ['prometheus']
|
||||
|
@ -50,6 +50,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -126,6 +142,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -202,6 +234,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -278,6 +326,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -354,6 +418,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -430,6 +510,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -506,6 +602,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -582,6 +694,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -660,6 +788,22 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
@ -736,69 +880,7 @@ groups:
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: __expr__
|
||||
model:
|
||||
conditions:
|
||||
- evaluator:
|
||||
params:
|
||||
- 0
|
||||
- 0
|
||||
type: gt
|
||||
operator:
|
||||
type: and
|
||||
query:
|
||||
params: []
|
||||
reducer:
|
||||
params: []
|
||||
type: avg
|
||||
type: query
|
||||
datasource:
|
||||
name: Expression
|
||||
type: __expr__
|
||||
uid: __expr__
|
||||
expression: ${diff} >= 16
|
||||
intervalMs: 1000
|
||||
maxDataPoints: 43200
|
||||
refId: condition
|
||||
type: math
|
||||
noDataState: Alerting
|
||||
execErrState: Alerting
|
||||
for: 15m
|
||||
annotations:
|
||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||
isPaused: false
|
||||
|
||||
# Ajna
|
||||
- uid: ajna_diff_external
|
||||
title: ajna_watcher_head_tracking
|
||||
condition: condition
|
||||
data:
|
||||
- refId: diff
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
datasourceUid: PBFA97CFB590B2093
|
||||
model:
|
||||
datasource:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
disableTextWrap: false
|
||||
editorMode: code
|
||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="ajna", instance="ajna", kind="latest_indexed"}
|
||||
fullMetaSearch: false
|
||||
includeNullMetadata: true
|
||||
instant: true
|
||||
intervalMs: 1000
|
||||
legendFormat: __auto
|
||||
maxDataPoints: 43200
|
||||
range: false
|
||||
refId: diff
|
||||
useBackend: false
|
||||
- refId: latest_external
|
||||
- refId: latest_indexed
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
to: 0
|
||||
@ -808,12 +890,12 @@ groups:
|
||||
type: prometheus
|
||||
uid: PBFA97CFB590B2093
|
||||
editorMode: code
|
||||
expr: latest_block_number{chain="filecoin"}
|
||||
expr: sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
||||
hide: false
|
||||
instant: true
|
||||
legendFormat: __auto
|
||||
range: false
|
||||
refId: latest_external
|
||||
refId: latest_indexed
|
||||
- refId: condition
|
||||
relativeTimeRange:
|
||||
from: 600
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
set -u
|
||||
|
||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||
|
||||
# Read in the config template TOML file and modify it
|
||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||
|
||||
# Write the modified content to a new file
|
||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||
|
||||
echo "Running job-runner..."
|
||||
DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
set -u
|
||||
|
||||
echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}"
|
||||
|
||||
# Read in the config template TOML file and modify it
|
||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||
sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ")
|
||||
|
||||
# Write the modified content to a new file
|
||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||
|
||||
echo "Running server..."
|
||||
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
@ -1,98 +0,0 @@
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 3008
|
||||
kind = "active"
|
||||
gqlPath = "/"
|
||||
|
||||
# Checkpointing state.
|
||||
checkpointing = true
|
||||
|
||||
# Checkpoint interval in number of blocks.
|
||||
checkpointInterval = 2000
|
||||
|
||||
# Enable state creation
|
||||
# CAUTION: Disable only if state creation is not desired or can be filled subsequently
|
||||
enableState = false
|
||||
|
||||
subgraphPath = "./subgraph-build"
|
||||
|
||||
# Interval to restart wasm instance periodically
|
||||
wasmRestartBlocksInterval = 20
|
||||
|
||||
# Interval in number of blocks at which to clear entities cache.
|
||||
clearEntitiesCacheInterval = 1000
|
||||
|
||||
# Max block range for which to return events in eventsInRange GQL query.
|
||||
# Use -1 for skipping check on block range.
|
||||
maxEventsBlockRange = 1000
|
||||
|
||||
# Flag to specify whether RPC endpoint supports block hash as block tag parameter
|
||||
rpcSupportsBlockHashParam = false
|
||||
|
||||
# GQL cache settings
|
||||
[server.gqlCache]
|
||||
enabled = true
|
||||
|
||||
# Max in-memory cache size (in bytes) (default 8 MB)
|
||||
# maxCacheSize
|
||||
|
||||
# GQL cache-control max-age settings (in seconds)
|
||||
maxAge = 15
|
||||
timeTravelMaxAge = 86400 # 1 day
|
||||
|
||||
[metrics]
|
||||
host = "0.0.0.0"
|
||||
port = 9000
|
||||
[metrics.gql]
|
||||
port = 9001
|
||||
|
||||
[database]
|
||||
type = "postgres"
|
||||
host = "ajna-watcher-db"
|
||||
port = 5432
|
||||
database = "ajna-watcher"
|
||||
username = "vdbm"
|
||||
password = "password"
|
||||
synchronize = true
|
||||
logging = false
|
||||
|
||||
[upstream]
|
||||
[upstream.ethServer]
|
||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT"
|
||||
|
||||
# Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client)
|
||||
rpcClient = true
|
||||
|
||||
# Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint
|
||||
isFEVM = true
|
||||
|
||||
# Boolean flag to filter event logs by contracts
|
||||
filterLogsByAddresses = true
|
||||
# Boolean flag to filter event logs by topics
|
||||
filterLogsByTopics = true
|
||||
|
||||
[upstream.cache]
|
||||
name = "requests"
|
||||
enabled = false
|
||||
deleteOnStart = false
|
||||
|
||||
[jobQueue]
|
||||
dbConnectionString = "postgres://vdbm:password@ajna-watcher-db/ajna-watcher-job-queue"
|
||||
maxCompletionLagInSecs = 300
|
||||
jobDelayInMilliSecs = 100
|
||||
eventsInBatch = 50
|
||||
subgraphEventsOrder = true
|
||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||
blockDelayInMilliSecs = 30000
|
||||
|
||||
# Boolean to switch between modes of processing events when starting the server.
|
||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||
# Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head).
|
||||
useBlockRanges = true
|
||||
|
||||
# Block range in which logs are fetched during historical blocks processing
|
||||
historicalLogsBlockRange = 2000
|
||||
|
||||
# Max block range of historical processing after which it waits for completion of events processing
|
||||
# If set to -1 historical processing does not wait for events processing and completes till latest canonical block
|
||||
historicalMaxFetchAhead = 10000
|
@ -1,23 +1,26 @@
|
||||
FROM ethpandaops/ethereum-genesis-generator:3.0.0 AS ethgen
|
||||
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
|
||||
|
||||
FROM golang:1.20-alpine as builder
|
||||
|
||||
RUN apk add --no-cache python3 py3-pip make bash envsubst jq
|
||||
RUN apk add --no-cache python3 py3-pip
|
||||
|
||||
COPY genesis /opt/genesis
|
||||
|
||||
# Install ethereum-genesis-generator tools
|
||||
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
|
||||
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
|
||||
COPY --from=ethgen /apps /apps
|
||||
RUN cd /apps/el-gen && pip3 install --break-system-packages -r requirements.txt
|
||||
RUN pip3 install --break-system-packages --upgrade "web3==v6.15.1"
|
||||
# web3==5.24.0 used by el-gen is broken on python 3.11
|
||||
RUN pip3 install --break-system-packages --upgrade "web3==6.5.0"
|
||||
RUN pip3 install --break-system-packages --upgrade "typing-extensions"
|
||||
|
||||
# Install tool to generate initial block
|
||||
RUN go install github.com/cerc-io/eth-dump-genblock@b29516740fc01cf1d1d623acbfd0e9a2b6440a96
|
||||
|
||||
# Build genesis config
|
||||
COPY genesis /opt/genesis
|
||||
RUN apk add --no-cache make bash envsubst jq
|
||||
RUN cd /opt/genesis && make genesis-el
|
||||
|
||||
# Snag the genesis block info.
|
||||
RUN go install github.com/cerc-io/eth-dump-genblock@latest
|
||||
RUN eth-dump-genblock /opt/genesis/build/el/geth.json > /opt/genesis/build/el/genesis_block.json
|
||||
|
||||
FROM alpine:latest
|
||||
|
@ -9,8 +9,32 @@ mkdir -p ../build/el
|
||||
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX)
|
||||
envsubst < el-config.yaml > $tmp_dir/genesis-config.yaml
|
||||
|
||||
python3 /apps/el-gen/genesis_geth.py $tmp_dir/genesis-config.yaml | \
|
||||
jq 'del(.config.pragueTime)' \
|
||||
> ../build/el/geth.json
|
||||
ttd=`cat $tmp_dir/genesis-config.yaml | grep terminal_total_difficulty | awk '{ print $2 }'`
|
||||
homestead_block=`cat $tmp_dir/genesis-config.yaml | grep homestead_block | awk '{ print $2 }'`
|
||||
eip150_block=`cat $tmp_dir/genesis-config.yaml | grep eip150_block | awk '{ print $2 }'`
|
||||
eip155_block=`cat $tmp_dir/genesis-config.yaml | grep eip155_block | awk '{ print $2 }'`
|
||||
eip158_block=`cat $tmp_dir/genesis-config.yaml | grep eip158_block | awk '{ print $2 }'`
|
||||
byzantium_block=`cat $tmp_dir/genesis-config.yaml | grep byzantium_block | awk '{ print $2 }'`
|
||||
constantinople_block=`cat $tmp_dir/genesis-config.yaml | grep constantinople_block | awk '{ print $2 }'`
|
||||
petersburg_block=`cat $tmp_dir/genesis-config.yaml | grep petersburg_block | awk '{ print $2 }'`
|
||||
istanbul_block=`cat $tmp_dir/genesis-config.yaml | grep istanbul_block | awk '{ print $2 }'`
|
||||
berlin_block=`cat $tmp_dir/genesis-config.yaml | grep berlin_block | awk '{ print $2 }'`
|
||||
london_block=`cat $tmp_dir/genesis-config.yaml | grep london_block | awk '{ print $2 }'`
|
||||
merge_fork_block=`cat $tmp_dir/genesis-config.yaml | grep merge_fork_block | awk '{ print $2 }'`
|
||||
|
||||
python3 /apps/el-gen/genesis_geth.py $tmp_dir/genesis-config.yaml | \
|
||||
jq ".config.terminalTotalDifficulty=$ttd" | \
|
||||
jq ".config.homesteadBlock=$homestead_block" | \
|
||||
jq ".config.eip150Block=$eip150_block" | \
|
||||
jq ".config.eip155Block=$eip155_block" | \
|
||||
jq ".config.eip158Block=$eip158_block" | \
|
||||
jq ".config.byzantiumBlock=$byzantium_block" | \
|
||||
jq ".config.constantinopleBlock=$constantinople_block" | \
|
||||
jq ".config.petersburgBlock=$petersburg_block" | \
|
||||
jq ".config.istanbulBlock=$istanbul_block" | \
|
||||
jq ".config.berlinBlock=$berlin_block" | \
|
||||
jq ".config.londonBlock=$london_block" | \
|
||||
jq ".config.mergeForkBlock=$merge_fork_block" | \
|
||||
jq ".config.mergeNetsplitBlock=$merge_fork_block" \
|
||||
> ../build/el/geth.json
|
||||
python3 ../accounts/mnemonic_to_csv.py $tmp_dir/genesis-config.yaml > ../build/el/accounts.csv
|
||||
|
@ -10,8 +10,22 @@ el_premine_addrs: {}
|
||||
chain_id: 1212
|
||||
deposit_contract_address: "0x1212121212121212121212121212121212121212"
|
||||
genesis_timestamp: 0
|
||||
genesis_delay: 0
|
||||
deneb_fork_epoch: 0
|
||||
# note: only needed as workaround https://github.com/ethpandaops/ethereum-genesis-generator/pull/105
|
||||
electra_fork_epoch: 0
|
||||
slot_duration_in_seconds: 3
|
||||
terminal_total_difficulty: 1000
|
||||
homestead_block: 1
|
||||
eip150_block: 1
|
||||
eip155_block: 1
|
||||
eip158_block: 1
|
||||
byzantium_block: 1
|
||||
constantinople_block: 1
|
||||
petersburg_block: 1
|
||||
istanbul_block: 1
|
||||
berlin_block: 1
|
||||
london_block: 1
|
||||
merge_fork_block: 1
|
||||
|
||||
clique:
|
||||
enabled: false
|
||||
signers:
|
||||
- 36d56343bc308d4ffaac2f793d121aba905fa6cc
|
||||
- 5e762d4a3847cadaf40a4b0c39574b0ff6698c78
|
||||
- 15d7acc1019fdf8ab4f0f7bd31ec1487ecb5a2bd
|
||||
|
@ -6,7 +6,7 @@ fi
|
||||
|
||||
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
|
||||
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
|
||||
NETRESTRICT=`ip addr | grep -w inet | grep -v '127.0' | awk '{print $2}'`
|
||||
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
|
||||
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
|
||||
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
|
||||
|
||||
@ -102,13 +102,6 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
OTHER_OPTS=""
|
||||
# miner options were removed in v1.12
|
||||
GETH_VERSION=$(geth --version | grep -io '[0-9][0-9a-z.-]*')
|
||||
if echo -e "$GETH_VERSION\n1.12" | sort -Vc; then
|
||||
OTHER_OPTS="--miner.threads=1"
|
||||
fi
|
||||
|
||||
$START_CMD \
|
||||
--datadir="${CERC_ETH_DATADIR}" \
|
||||
--bootnodes="${ENODE}" \
|
||||
@ -133,12 +126,12 @@ else
|
||||
--cache.preimages \
|
||||
--syncmode=full \
|
||||
--mine \
|
||||
--miner.threads=1 \
|
||||
--metrics \
|
||||
--metrics.addr="0.0.0.0" \
|
||||
--verbosity=${CERC_GETH_VERBOSITY:-3} \
|
||||
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
|
||||
--miner.etherbase="${ETHERBASE}" \
|
||||
${OTHER_OPTS} \
|
||||
${STATEDIFF_OPTS} \
|
||||
&
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
FROM cerc/lighthouse-cli:local AS lcli
|
||||
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
|
||||
FROM cerc/fixturenet-eth-genesis:local AS fnetgen
|
||||
|
||||
FROM cerc/lighthouse:local
|
||||
@ -11,13 +12,16 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=lcli /usr/local/bin/lcli /usr/local/bin/lcli
|
||||
COPY --from=fnetgen /opt/genesis/el /opt/testnet/el
|
||||
COPY --from=fnetgen /opt/genesis/build/el /opt/testnet/build/el
|
||||
|
||||
COPY genesis /opt/testnet
|
||||
COPY run-cl.sh /opt/testnet/run.sh
|
||||
|
||||
COPY --from=lcli /usr/local/bin/lcli /usr/local/bin/lcli
|
||||
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/eth2-testnet-genesis
|
||||
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/eth2-val-tools
|
||||
COPY --from=ethgen /apps /apps
|
||||
COPY --from=fnetgen /opt/genesis/el /opt/testnet/el
|
||||
COPY --from=fnetgen /opt/genesis/build/el /opt/testnet/build/el
|
||||
|
||||
RUN cd /opt/testnet && make genesis-cl
|
||||
|
||||
# Work around some bugs in lcli where the default path is always used.
|
||||
|
@ -10,6 +10,7 @@ set -Eeuo pipefail
|
||||
source ./vars.env
|
||||
|
||||
SUBSCRIBE_ALL_SUBNETS=
|
||||
DEBUG_LEVEL=${DEBUG_LEVEL:-debug}
|
||||
|
||||
# Get positional arguments
|
||||
data_dir=$DATADIR/node_${NODE_NUMBER}
|
||||
|
@ -9,6 +9,8 @@ set -Eeuo pipefail
|
||||
|
||||
source ./vars.env
|
||||
|
||||
DEBUG_LEVEL=${1:-info}
|
||||
|
||||
echo "Starting bootnode"
|
||||
|
||||
# Clean up existing ENR dir to avoid node connectivity issues on a restart
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# See https://github.com/sigp/lighthouse/scripts/local_testnet/setup.sh
|
||||
#
|
||||
# Deploys the deposit contract and makes deposits for $VALIDATOR_COUNT insecure deterministic validators.
|
||||
# Produces a testnet specification and a genesis state where the genesis time
|
||||
@ -25,26 +24,23 @@ echo "(Note: errors of the form 'WARN: Scrypt parameters are too weak...' below
|
||||
lcli \
|
||||
new-testnet \
|
||||
--spec $SPEC_PRESET \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
--deposit-contract-address $ETH1_DEPOSIT_CONTRACT_ADDRESS \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
--min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \
|
||||
--validator-count $VALIDATOR_COUNT \
|
||||
--min-genesis-time $GENESIS_TIME \
|
||||
--genesis-delay $GENESIS_DELAY \
|
||||
--genesis-fork-version $GENESIS_FORK_VERSION \
|
||||
--altair-fork-epoch $ALTAIR_FORK_EPOCH \
|
||||
--bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \
|
||||
--capella-fork-epoch $CAPELLA_FORK_EPOCH \
|
||||
--deneb-fork-epoch $DENEB_FORK_EPOCH \
|
||||
--bellatrix-fork-epoch $MERGE_FORK_EPOCH \
|
||||
--eth1-id $ETH1_CHAIN_ID \
|
||||
--eth1-block-hash $ETH1_BLOCK_HASH \
|
||||
--eth1-follow-distance 1 \
|
||||
--seconds-per-slot $SECONDS_PER_SLOT \
|
||||
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \
|
||||
--interop-genesis-state \
|
||||
--force
|
||||
|
||||
echo Specification and genesis.ssz generated at $TESTNET_DIR.
|
||||
echo Specification generated at $TESTNET_DIR.
|
||||
echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)"
|
||||
|
||||
lcli \
|
||||
@ -54,3 +50,13 @@ lcli \
|
||||
--node-count $BN_COUNT
|
||||
|
||||
echo Validators generated with keystore passwords at $DATADIR.
|
||||
echo "Building genesis state... (this might take a while)"
|
||||
|
||||
lcli \
|
||||
interop-genesis \
|
||||
--spec $SPEC_PRESET \
|
||||
--genesis-time $GENESIS_TIME \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
$GENESIS_VALIDATOR_COUNT
|
||||
|
||||
echo Created genesis state in $TESTNET_DIR
|
||||
|
@ -8,6 +8,8 @@ set -Eeuo pipefail
|
||||
|
||||
source ./vars.env
|
||||
|
||||
DEBUG_LEVEL=info
|
||||
|
||||
BUILDER_PROPOSALS=
|
||||
|
||||
# Get options
|
||||
|
@ -25,9 +25,7 @@ BOOTNODE_PORT=${BOOTNODE_PORT:-4242}
|
||||
|
||||
# Hard fork configuration
|
||||
ALTAIR_FORK_EPOCH=${ALTAIR_FORK_EPOCH:-0}
|
||||
BELLATRIX_FORK_EPOCH=${BELLATRIX_FORK_EPOCH:-0}
|
||||
CAPELLA_FORK_EPOCH=${CAPELLA_FORK_EPOCH:-0}
|
||||
DENEB_FORK_EPOCH=${DENEB_FORK_EPOCH:-0}
|
||||
MERGE_FORK_EPOCH=${MERGE_FORK_EPOCH:-0}
|
||||
|
||||
# Spec version (mainnet or minimal)
|
||||
SPEC_PRESET=${SPEC_PRESET:-mainnet}
|
||||
@ -53,6 +51,3 @@ ETH1_TTD=${ETH1_TTD:-`cat $ETH1_GENESIS_JSON | jq -r '.config.terminalTotalDiffi
|
||||
ETH1_DEPOSIT_CONTRACT_ADDRESS=${ETH1_DEPOSIT_CONTRACT_ADDRESS:-`cat $ETH1_CONFIG_YAML | grep 'deposit_contract_address' | awk '{ print $2 }' | sed 's/"//g'`}
|
||||
ETH1_DEPOSIT_CONTRACT_BLOCK=${ETH1_DEPOSIT_CONTRACT_BLOCK:-0x0}
|
||||
SUGGESTED_FEE_RECIPIENT=`cat ../build/el/accounts.csv | head -1 | cut -d',' -f2`
|
||||
|
||||
# --debug-level
|
||||
DEBUG_LEVEL=${LIGHTHOUSE_DEBUG_LEVEL:-debug}
|
||||
|
@ -1,10 +1,9 @@
|
||||
FROM sigp/lighthouse:v5.1.2
|
||||
ARG TAG_SUFFIX="-modern"
|
||||
FROM sigp/lighthouse:v4.3.0${TAG_SUFFIX}
|
||||
|
||||
RUN apt-get update && apt-get -y upgrade \
|
||||
&& apt-get -y install bash netcat curl less jq wget \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update; apt-get install bash netcat curl less jq wget -y;
|
||||
|
||||
WORKDIR /root
|
||||
WORKDIR /root/
|
||||
ADD start-lighthouse.sh .
|
||||
|
||||
ENTRYPOINT [ "./start-lighthouse.sh" ]
|
||||
|
@ -6,4 +6,4 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/lighthouse:local ${build_command_args} ${SCRIPT_DIR}
|
||||
docker build -t cerc/lighthouse:local ${build_command_args} --build-arg TAG_SUFFIX="" ${SCRIPT_DIR}
|
||||
|
@ -134,7 +134,6 @@ VOLUME /var/lib/lotus
|
||||
|
||||
|
||||
EXPOSE 1234
|
||||
EXPOSE 1235
|
||||
EXPOSE 2345
|
||||
EXPOSE 3456
|
||||
EXPOSE 1777
|
||||
|
@ -3,10 +3,8 @@
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
git stash
|
||||
|
||||
# Use a release version tag to match the modified Dockerfile replaced in next step
|
||||
git -C ${CERC_REPO_BASE_DIR}/lotus checkout v1.27.0-rc1-c
|
||||
git -C ${CERC_REPO_BASE_DIR}/lotus checkout master
|
||||
|
||||
# Replace repo's Dockerfile with modified one
|
||||
cp ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/lotus/Dockerfile
|
||||
|
@ -2,4 +2,4 @@
|
||||
# Build cerc/test-container
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
||||
docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
@ -1,10 +0,0 @@
|
||||
FROM node:18.17.1-alpine3.18
|
||||
|
||||
RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo "Installing dependencies and building ajna-watcher-ts" && \
|
||||
yarn && yarn build
|
@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/watcher-ajna
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/watcher-ajna:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/ajna-watcher-ts
|
@ -1,118 +0,0 @@
|
||||
# Ajna Watcher
|
||||
|
||||
## Setup
|
||||
|
||||
Clone required repositories:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ajna setup-repositories --git-ssh --pull
|
||||
```
|
||||
|
||||
Build the container images:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ajna build-containers
|
||||
```
|
||||
|
||||
## Deploy
|
||||
|
||||
Create a spec file for the deployment:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ajna deploy init --output ajna-spec.yml
|
||||
```
|
||||
|
||||
### Ports
|
||||
|
||||
Edit `network` in the spec file to map container ports to host ports as required:
|
||||
|
||||
```yml
|
||||
...
|
||||
network:
|
||||
ports:
|
||||
ajna-watcher-db:
|
||||
- 15432:5432
|
||||
ajna-watcher-job-runner:
|
||||
- 9000:9000
|
||||
ajna-watcher-server:
|
||||
- 3008:3008
|
||||
- 9001:9001
|
||||
```
|
||||
|
||||
### Create a deployment
|
||||
|
||||
Create a deployment from the spec file:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ajna deploy create --spec-file ajna-spec.yml --deployment-dir ajna-deployment
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||
|
||||
```bash
|
||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||
```
|
||||
|
||||
### Start the deployment
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir ajna-deployment start
|
||||
```
|
||||
|
||||
* To list down and monitor the running containers:
|
||||
|
||||
```bash
|
||||
# With status
|
||||
docker ps -a
|
||||
|
||||
# Check logs for a container
|
||||
docker logs -f <CONTAINER_ID>
|
||||
```
|
||||
|
||||
* Open the GQL playground at <http://localhost:3008/graphql>
|
||||
|
||||
```graphql
|
||||
# Example query
|
||||
query {
|
||||
_meta {
|
||||
block {
|
||||
hash
|
||||
number
|
||||
timestamp
|
||||
}
|
||||
deployment
|
||||
hasIndexingErrors
|
||||
}
|
||||
|
||||
accounts {
|
||||
id
|
||||
txCount
|
||||
tokensDelegated
|
||||
rewardsClaimed
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
Stop all the ajna services running in background:
|
||||
|
||||
```bash
|
||||
# Only stop the docker containers
|
||||
laconic-so deployment --dir ajna-deployment stop
|
||||
|
||||
# Run 'start' to restart the deployment
|
||||
```
|
||||
|
||||
To stop all the ajna services and also delete data:
|
||||
|
||||
```bash
|
||||
# Stop the docker containers
|
||||
laconic-so deployment --dir ajna-deployment stop --delete-volumes
|
||||
|
||||
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||
rm -r ajna-deployment
|
||||
```
|
@ -1,9 +0,0 @@
|
||||
version: "1.0"
|
||||
name: ajna
|
||||
description: "Ajna watcher stack"
|
||||
repos:
|
||||
- git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.1
|
||||
containers:
|
||||
- cerc/watcher-ajna
|
||||
pods:
|
||||
- watcher-ajna
|
@ -1,26 +0,0 @@
|
||||
# Blast stack
|
||||
|
||||
## Clone required repositories
|
||||
```
|
||||
$ laconic-so --stack fixturenet-blast setup-repositories
|
||||
```
|
||||
## Build the stack's containers
|
||||
```
|
||||
$ laconic-so --stack fixturenet-blast build-containers
|
||||
```
|
||||
## Create a deployment of the stack
|
||||
```
|
||||
$ laconic-so --stack fixturenet-blast deploy init --map-ports-to-host any-same --output blast-spec.yml
|
||||
```
|
||||
[Insert details on how to configure the stack]
|
||||
```
|
||||
$ laconic-so --stack fixturenet-blast deploy create --deployment-dir blast-deployment --spec-file blast-spec.yml
|
||||
```
|
||||
## Start the stack
|
||||
```
|
||||
$ laconic-so deployment --dir blast-deployment start
|
||||
```
|
||||
Check logs:
|
||||
```
|
||||
$ laconic-so deployment --dir blast-deployment logs
|
||||
```
|
@ -1,17 +0,0 @@
|
||||
version: "1.0"
|
||||
name: fixturenet-blast
|
||||
description: "A blast devnet stack"
|
||||
repos:
|
||||
- github.com/blast-io/blast
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
containers:
|
||||
- cerc/webapp-base
|
||||
- cerc/lighthouse
|
||||
- cerc/lighthouse-cli
|
||||
- cerc/foundry
|
||||
- cerc/fixturenet-eth-lighthouse
|
||||
|
||||
pods:
|
||||
- fixturenet-blast
|
||||
- foundry
|
||||
|
@ -76,19 +76,6 @@ export ETH_RPC_PORT=
|
||||
# The etherum network(s) graph-node will connect to
|
||||
# Set this to a space-separated list of the networks where each entry has the form NAME:URL
|
||||
export ETH_NETWORKS=
|
||||
|
||||
# Optional:
|
||||
|
||||
# Timeout for ETH RPC requests in seconds (default: 180s)
|
||||
export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=
|
||||
|
||||
# Number of times to retry ETH RPC requests (default: 10)
|
||||
export GRAPH_ETHEREUM_REQUEST_RETRIES=
|
||||
|
||||
# Maximum number of blocks to scan for triggers in each request (default: 2000)
|
||||
export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=
|
||||
|
||||
# Ref: https://git.vdb.to/cerc-io/graph-node/src/branch/master/docs/environment-variables.md
|
||||
```
|
||||
|
||||
Example env file:
|
||||
@ -98,10 +85,6 @@ export ETH_RPC_HOST=filecoin.chainup.net
|
||||
export ETH_RPC_PORT=443
|
||||
|
||||
export ETH_NETWORKS=filecoin:https://filecoin.chainup.net/rpc/v1
|
||||
|
||||
export GRAPH_ETHEREUM_JSON_RPC_TIMEOUT=360
|
||||
export GRAPH_ETHEREUM_REQUEST_RETRIES=5
|
||||
export GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE=50
|
||||
```
|
||||
|
||||
Set the environment variables:
|
||||
|
@ -1,26 +0,0 @@
|
||||
# Blast stack
|
||||
|
||||
## Clone required repositories
|
||||
```
|
||||
$ laconic-so --stack mainnet-blast setup-repositories
|
||||
```
|
||||
## Build the stack's containers
|
||||
```
|
||||
$ laconic-so --stack mainnet-blast build-containers
|
||||
```
|
||||
## Create a deployment of the stack
|
||||
```
|
||||
$ laconic-so --stack mainnet-blast deploy init --map-ports-to-host any-same --output blast-spec.yml
|
||||
```
|
||||
[Insert details on how to configure the stack]
|
||||
```
|
||||
$ laconic-so --stack mainnet-blast deploy create --deployment-dir blast-deployment --spec-file blast-spec.yml
|
||||
```
|
||||
## Start the stack
|
||||
```
|
||||
$ laconic-so deployment --dir blast-deployment start
|
||||
```
|
||||
Check logs:
|
||||
```
|
||||
$ laconic-so deployment --dir blast-deployment logs
|
||||
```
|
@ -1,39 +0,0 @@
|
||||
# Copyright © 2023 Vulcanize
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
from shutil import copy
|
||||
import yaml
|
||||
|
||||
|
||||
def create(context, extra_args):
|
||||
# Our goal here is just to copy the json files for blast
|
||||
yml_path = context.deployment_dir.joinpath("spec.yml")
|
||||
with open(yml_path, 'r') as file:
|
||||
data = yaml.safe_load(file)
|
||||
|
||||
mount_point = data['volumes']['blast-data']
|
||||
if mount_point[0] == "/":
|
||||
deploy_dir = Path(mount_point)
|
||||
else:
|
||||
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
||||
|
||||
command_context = extra_args[2]
|
||||
compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0]
|
||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json")
|
||||
copy(source_config_file, deploy_dir)
|
||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json")
|
||||
copy(source_config_file, deploy_dir)
|
@ -1,12 +0,0 @@
|
||||
version: "1.0"
|
||||
name: mainnet-blast
|
||||
description: "A blast stack"
|
||||
repos:
|
||||
- github.com/blast-io/blast
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
containers:
|
||||
- cerc/webapp-base
|
||||
- cerc/lighthouse
|
||||
- cerc/lighthouse-cli
|
||||
pods:
|
||||
- mainnet-blast
|
@ -16,55 +16,26 @@ laconic-so --stack merkl-sushiswap-v3 build-containers
|
||||
|
||||
## Deploy
|
||||
|
||||
Create a spec file for the deployment:
|
||||
|
||||
```bash
|
||||
laconic-so --stack merkl-sushiswap-v3 deploy init --output merkl-sushiswap-v3-spec.yml
|
||||
```
|
||||
|
||||
### Ports
|
||||
|
||||
Edit `network` in the spec file to map container ports to host ports as required:
|
||||
|
||||
```
|
||||
...
|
||||
network:
|
||||
ports:
|
||||
merkl-sushiswap-v3-watcher-db:
|
||||
- '5432'
|
||||
merkl-sushiswap-v3-watcher-job-runner:
|
||||
- 9002:9000
|
||||
merkl-sushiswap-v3-watcher-server:
|
||||
- 127.0.0.1:3007:3008
|
||||
- 9003:9001
|
||||
```
|
||||
|
||||
### Create a deployment
|
||||
|
||||
Create a deployment from the spec file:
|
||||
|
||||
```bash
|
||||
laconic-so --stack merkl-sushiswap-v3 deploy create --spec-file merkl-sushiswap-v3-spec.yml --deployment-dir merkl-sushiswap-v3-deployment
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||
Create and update an env file to be used in the next step:
|
||||
|
||||
```bash
|
||||
# External Filecoin (ETH RPC) endpoint to point the watcher
|
||||
CERC_ETH_RPC_ENDPOINT=
|
||||
```
|
||||
|
||||
### Deploy the stack
|
||||
|
||||
```bash
|
||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||
```
|
||||
|
||||
### Start the deployment
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir merkl-sushiswap-v3-deployment start
|
||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-file <PATH_TO_ENV_FILE> up
|
||||
```
|
||||
|
||||
* To list down and monitor the running containers:
|
||||
|
||||
```bash
|
||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 ps
|
||||
|
||||
# With status
|
||||
docker ps -a
|
||||
|
||||
@ -75,7 +46,6 @@ laconic-so deployment --dir merkl-sushiswap-v3-deployment start
|
||||
* Open the GQL playground at http://localhost:3007/graphql
|
||||
|
||||
```graphql
|
||||
# Example query
|
||||
{
|
||||
_meta {
|
||||
block {
|
||||
@ -84,7 +54,7 @@ laconic-so deployment --dir merkl-sushiswap-v3-deployment start
|
||||
}
|
||||
hasIndexingErrors
|
||||
}
|
||||
|
||||
|
||||
factories {
|
||||
id
|
||||
poolCount
|
||||
@ -94,21 +64,18 @@ laconic-so deployment --dir merkl-sushiswap-v3-deployment start
|
||||
|
||||
## Clean up
|
||||
|
||||
Stop all the merkl-sushiswap-v3 services running in background:
|
||||
Stop all the services running in background:
|
||||
|
||||
```bash
|
||||
# Only stop the docker containers
|
||||
laconic-so deployment --dir merkl-sushiswap-v3-deployment stop
|
||||
|
||||
# Run 'start' to restart the deployment
|
||||
laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 down
|
||||
```
|
||||
|
||||
To stop all the merkl-sushiswap-v3 services and also delete data:
|
||||
Clear volumes created by this stack:
|
||||
|
||||
```bash
|
||||
# Stop the docker containers
|
||||
laconic-so deployment --dir merkl-sushiswap-v3-deployment stop --delete-volumes
|
||||
# List all relevant volumes
|
||||
docker volume ls -q --filter "name=merkl_sushiswap_v3"
|
||||
|
||||
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||
rm -r merkl-sushiswap-v3-deployment
|
||||
# Remove all the listed volumes
|
||||
docker volume rm $(docker volume ls -q --filter "name=merkl_sushiswap_v3")
|
||||
```
|
||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
||||
name: merkl-sushiswap-v3
|
||||
description: "SushiSwap v3 watcher stack"
|
||||
repos:
|
||||
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.7
|
||||
- github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.6
|
||||
containers:
|
||||
- cerc/watcher-merkl-sushiswap-v3
|
||||
pods:
|
||||
|
@ -4,7 +4,6 @@
|
||||
* Comes with the following built-in exporters / dashboards:
|
||||
* Chain Head Exporter - for tracking chain heads given external ETH RPC endpoints
|
||||
* Watchers dashboard
|
||||
* laconicd dashboard
|
||||
* [Prometheus Blackbox](https://grafana.com/grafana/dashboards/7587-prometheus-blackbox-exporter/) - for tracking HTTP endpoints
|
||||
* [NodeJS Application Dashboard](https://grafana.com/grafana/dashboards/11159-nodejs-application-dashboard/) - for default NodeJS metrics
|
||||
* [PostgreSQL Database](https://grafana.com/grafana/dashboards/9628-postgresql-database/) - for monitoring Postgres dbs
|
||||
@ -100,7 +99,6 @@ laconic-so --stack monitoring deploy create --spec-file monitoring-spec.yml --de
|
||||
- targets:
|
||||
- <HTTP_ENDPOINT_1>
|
||||
- <HTTP_ENDPOINT_2>
|
||||
- <LACONICD_GQL_ENDPOINT>
|
||||
```
|
||||
|
||||
* Postgres (in-stack exporter):
|
||||
@ -118,16 +116,6 @@ laconic-so --stack monitoring deploy create --spec-file monitoring-spec.yml --de
|
||||
```
|
||||
* Add database credentials to be used in `auth_modules` in the postgres-exporter config file (`monitoring-deployment/config/monitoring/postgres-exporter.yml`)
|
||||
|
||||
* laconicd: update the `laconicd` job with a laconicd node's REST endpoint host and port:
|
||||
|
||||
```yml
|
||||
...
|
||||
- job_name: laconicd
|
||||
static_configs:
|
||||
- targets: ['example-host:1317']
|
||||
...
|
||||
```
|
||||
|
||||
Note: Use `host.docker.internal` as host to access ports on the host machine
|
||||
|
||||
### Grafana Config
|
||||
|
@ -46,11 +46,6 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
|
||||
static_configs:
|
||||
- targets:
|
||||
- <AZIMUTH_GATEWAY_GQL_ENDPOINT>
|
||||
- <LACONICD_GQL_ENDPOINT>
|
||||
...
|
||||
- job_name: laconicd
|
||||
static_configs:
|
||||
- targets: ['LACONICD_REST_HOST:LACONICD_REST_PORT']
|
||||
...
|
||||
- job_name: azimuth
|
||||
scrape_interval: 10s
|
||||
@ -103,16 +98,6 @@ Add the following scrape configs to prometheus config file (`monitoring-watchers
|
||||
labels:
|
||||
instance: 'merkl_sushiswap'
|
||||
chain: 'filecoin'
|
||||
|
||||
- job_name: ajna
|
||||
scrape_interval: 20s
|
||||
metrics_path: /metrics
|
||||
scheme: http
|
||||
static_configs:
|
||||
- targets: ['AJNA_WATCHER_HOST:AJNA_WATCHER_PORT']
|
||||
labels:
|
||||
instance: 'ajna'
|
||||
chain: 'filecoin'
|
||||
```
|
||||
|
||||
Add scrape config as done above for any additional watcher to add it to the Watchers dashboard.
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: "0.1"
|
||||
name: monitoring
|
||||
repos:
|
||||
- github.com/cerc-io/watcher-ts@v0.2.81
|
||||
- github.com/cerc-io/watcher-ts@v0.2.79
|
||||
containers:
|
||||
- cerc/watcher-ts
|
||||
pods:
|
||||
|
@ -55,7 +55,7 @@ ports:
|
||||
Create deployment:
|
||||
|
||||
```bash
|
||||
laconic-so --stack sushiswap-subgraph deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment
|
||||
laconic-so deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment
|
||||
```
|
||||
|
||||
## Start the stack
|
||||
|
@ -16,55 +16,26 @@ laconic-so --stack sushiswap-v3 build-containers
|
||||
|
||||
## Deploy
|
||||
|
||||
Create a spec file for the deployment:
|
||||
|
||||
```bash
|
||||
laconic-so --stack sushiswap-v3 deploy init --output sushiswap-v3-spec.yml
|
||||
```
|
||||
|
||||
### Ports
|
||||
|
||||
Edit `network` in the spec file to map container ports to host ports as required:
|
||||
|
||||
```
|
||||
...
|
||||
network:
|
||||
ports:
|
||||
sushiswap-v3-watcher-db:
|
||||
- '5432'
|
||||
sushiswap-v3-watcher-job-runner:
|
||||
- 9000:9000
|
||||
sushiswap-v3-watcher-server:
|
||||
- 127.0.0.1:3008:3008
|
||||
- 9001:9001
|
||||
```
|
||||
|
||||
### Create a deployment
|
||||
|
||||
Create a deployment from the spec file:
|
||||
|
||||
```bash
|
||||
laconic-so --stack sushiswap-v3 deploy create --spec-file sushiswap-v3-spec.yml --deployment-dir sushiswap-v3-deployment
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Inside deployment directory, open the `config.env` file and set following env variables:
|
||||
Create and update an env file to be used in the next step:
|
||||
|
||||
```bash
|
||||
# External Filecoin (ETH RPC) endpoint to point the watcher
|
||||
CERC_ETH_RPC_ENDPOINT=
|
||||
```
|
||||
|
||||
### Deploy the stack
|
||||
|
||||
```bash
|
||||
# External Filecoin (ETH RPC) endpoint to point the watcher to
|
||||
CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1
|
||||
```
|
||||
|
||||
### Start the deployment
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir sushiswap-v3-deployment start
|
||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file <PATH_TO_ENV_FILE> up
|
||||
```
|
||||
|
||||
* To list down and monitor the running containers:
|
||||
|
||||
```bash
|
||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 ps
|
||||
|
||||
# With status
|
||||
docker ps -a
|
||||
|
||||
@ -72,43 +43,20 @@ laconic-so deployment --dir sushiswap-v3-deployment start
|
||||
docker logs -f <CONTAINER_ID>
|
||||
```
|
||||
|
||||
* Open the GQL playground at http://localhost:3008/graphql
|
||||
|
||||
```graphql
|
||||
# Example query
|
||||
{
|
||||
_meta {
|
||||
block {
|
||||
number
|
||||
timestamp
|
||||
}
|
||||
hasIndexingErrors
|
||||
}
|
||||
|
||||
factories {
|
||||
id
|
||||
poolCount
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
Stop all the sushiswap-v3 services running in background:
|
||||
Stop all the services running in background:
|
||||
|
||||
```bash
|
||||
# Only stop the docker containers
|
||||
laconic-so deployment --dir sushiswap-v3-deployment stop
|
||||
|
||||
# Run 'start' to restart the deployment
|
||||
laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 down
|
||||
```
|
||||
|
||||
To stop all the sushiswap-v3 services and also delete data:
|
||||
Clear volumes created by this stack:
|
||||
|
||||
```bash
|
||||
# Stop the docker containers
|
||||
laconic-so deployment --dir sushiswap-v3-deployment stop --delete-volumes
|
||||
# List all relevant volumes
|
||||
docker volume ls -q --filter "name=sushiswap_v3"
|
||||
|
||||
# Remove deployment directory (deployment will have to be recreated for a re-run)
|
||||
rm -r sushiswap-v3-deployment
|
||||
# Remove all the listed volumes
|
||||
docker volume rm $(docker volume ls -q --filter "name=sushiswap_v3")
|
||||
```
|
||||
|
@ -2,7 +2,7 @@ version: "1.0"
|
||||
name: sushiswap-v3
|
||||
description: "SushiSwap v3 watcher stack"
|
||||
repos:
|
||||
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.7
|
||||
- github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.6
|
||||
containers:
|
||||
- cerc/watcher-sushiswap-v3
|
||||
pods:
|
||||
|
@ -2,10 +2,10 @@ version: "1.0"
|
||||
name: webapp-deployer-backend
|
||||
description: "Deployer for webapps"
|
||||
repos:
|
||||
- git.vdb.to/cerc-io/webapp-deployment-status-api
|
||||
- git.vdb.to/telackey/webapp-deployment-status-api
|
||||
containers:
|
||||
- cerc/webapp-deployer-backend
|
||||
pods:
|
||||
- name: webapp-deployer-backend
|
||||
repository: git.vdb.to/cerc-io/webapp-deployment-status-api
|
||||
repository: git.vdb.to/telackey/webapp-deployment-status-api
|
||||
path: ./
|
||||
|
@ -27,7 +27,6 @@ from pathlib import Path
|
||||
from stack_orchestrator import constants
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
|
||||
from stack_orchestrator.util import resolve_compose_file
|
||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
||||
@ -325,10 +324,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
pod_path = pod["path"]
|
||||
if include_exclude_check(pod_name, include, exclude):
|
||||
if pod_repository is None or pod_repository == "internal":
|
||||
if deployment:
|
||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||
else:
|
||||
compose_file_name = resolve_compose_file(stack, pod_name)
|
||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||
else:
|
||||
if deployment:
|
||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
||||
@ -340,7 +336,6 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
if pod_post_start_command is not None:
|
||||
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
||||
else:
|
||||
# TODO: fix this code for external stack with scripts
|
||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
||||
pod_pre_start_command = pod.get("pre_start_command")
|
||||
|
@ -16,7 +16,7 @@
|
||||
import os
|
||||
from typing import List, Any
|
||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
|
||||
from stack_orchestrator.opts import opts
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ def _container_image_from_service(stack: str, service: str):
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = resolve_compose_file(stack, pod)
|
||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "services" in parsed_pod_file:
|
||||
services = parsed_pod_file["services"]
|
||||
|
@ -26,8 +26,7 @@ from stack_orchestrator import constants
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
||||
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
|
||||
resolve_config_dir)
|
||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file)
|
||||
from stack_orchestrator.deploy.spec import Spec
|
||||
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
||||
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
||||
@ -44,7 +43,7 @@ def _get_ports(stack):
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "services" in parsed_pod_file:
|
||||
for svc_name, svc in parsed_pod_file["services"].items():
|
||||
@ -80,7 +79,7 @@ def _get_named_volumes(stack):
|
||||
return ret
|
||||
|
||||
for pod in pods:
|
||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
@ -481,9 +480,10 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
||||
os.mkdir(destination_compose_dir)
|
||||
destination_pods_dir = deployment_dir_path.joinpath("pods")
|
||||
os.mkdir(destination_pods_dir)
|
||||
data_dir = Path(__file__).absolute().parent.parent.joinpath("data")
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
||||
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
||||
@ -497,7 +497,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
||||
config_dirs = {pod}
|
||||
config_dirs = config_dirs.union(extra_config_dirs)
|
||||
for config_dir in config_dirs:
|
||||
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
||||
source_config_dir = data_dir.joinpath("config", config_dir)
|
||||
if os.path.exists(source_config_dir):
|
||||
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
|
||||
# If the same config dir appears in multiple pods, it may already have been copied
|
||||
|
@ -39,12 +39,11 @@ def process_app_deployment_request(
|
||||
app_deployment_request,
|
||||
deployment_record_namespace,
|
||||
dns_record_namespace,
|
||||
default_dns_suffix,
|
||||
dns_suffix,
|
||||
deployment_parent_dir,
|
||||
kube_config,
|
||||
image_registry,
|
||||
force_rebuild,
|
||||
fqdn_policy,
|
||||
logger
|
||||
):
|
||||
logger.log("BEGIN - process_app_deployment_request")
|
||||
@ -57,15 +56,14 @@ def process_app_deployment_request(
|
||||
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
|
||||
logger.log(f"Determined requested name: {requested_name}")
|
||||
|
||||
# HACK
|
||||
if "." in requested_name:
|
||||
if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
|
||||
fqdn = requested_name
|
||||
else:
|
||||
raise Exception(f"{requested_name} is invalid: only unqualified hostnames are allowed.")
|
||||
else:
|
||||
fqdn = f"{requested_name}.{default_dns_suffix}"
|
||||
raise Exception("Only unqualified hostnames allowed at this time.")
|
||||
|
||||
fqdn = f"{requested_name}.{dns_suffix}"
|
||||
|
||||
# 3. check ownership of existing dnsrecord vs this request
|
||||
# TODO: Support foreign DNS
|
||||
dns_crn = f"{dns_record_namespace}/{fqdn}"
|
||||
dns_record = laconic.get_record(dns_crn)
|
||||
if dns_record:
|
||||
@ -77,9 +75,7 @@ def process_app_deployment_request(
|
||||
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
||||
else:
|
||||
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
||||
(dns_crn, app_deployment_request.id))
|
||||
elif "preexisting" == fqdn_policy:
|
||||
raise Exception(f"No pre-existing DnsRecord {dns_crn} could be found for request {app_deployment_request.id}.")
|
||||
(dns_record.id, app_deployment_request.id))
|
||||
|
||||
# 4. get build and runtime config from request
|
||||
env_filename = None
|
||||
@ -195,7 +191,6 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
||||
@click.option("--state-file", help="File to store state about previously seen requests.")
|
||||
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
|
||||
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
|
||||
@click.option("--fqdn-policy", help="How to handle requests with an FQDN: prohibit, allow, preexisting", default="prohibit")
|
||||
@click.option("--record-namespace-dns", help="eg, crn://laconic/dns")
|
||||
@click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments")
|
||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
||||
@ -206,7 +201,7 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
||||
@click.pass_context
|
||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
||||
request_id, discover, state_file, only_update_state,
|
||||
dns_suffix, fqdn_policy, record_namespace_dns, record_namespace_deployments, dry_run,
|
||||
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
|
||||
include_tags, exclude_tags, force_rebuild, log_dir):
|
||||
if request_id and discover:
|
||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||
@ -225,10 +220,6 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
if fqdn_policy not in ["prohibit", "allow", "preexisting"]:
|
||||
print("--fqdn-policy must be one of 'prohibit', 'allow', or 'preexisting'", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Split CSV and clean up values.
|
||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||
@ -256,10 +247,7 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
requests_by_name = {}
|
||||
skipped_by_name = {}
|
||||
for r in requests:
|
||||
if r.id in previous_requests and previous_requests[r.id].get("status", "") != "RETRY":
|
||||
print(f"Skipping request {r.id}, we've already seen it.")
|
||||
continue
|
||||
|
||||
# TODO: Do this _after_ filtering deployments and cancellations to minimize round trips.
|
||||
app = laconic.get_record(r.attributes.application)
|
||||
if not app:
|
||||
print("Skipping request %s, cannot locate app." % r.id)
|
||||
@ -346,7 +334,6 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
kube_config,
|
||||
image_registry,
|
||||
force_rebuild,
|
||||
fqdn_policy,
|
||||
logger
|
||||
)
|
||||
status = "DEPLOYED"
|
||||
|
@ -17,7 +17,6 @@ import click
|
||||
|
||||
from stack_orchestrator.command_types import CommandOptions
|
||||
from stack_orchestrator.repos import setup_repositories
|
||||
from stack_orchestrator.repos import fetch_stack
|
||||
from stack_orchestrator.build import build_containers, fetch_containers
|
||||
from stack_orchestrator.build import build_npms
|
||||
from stack_orchestrator.build import build_webapp
|
||||
@ -51,7 +50,6 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
|
||||
ctx.obj = command_options
|
||||
|
||||
|
||||
cli.add_command(fetch_stack.command, "fetch-stack")
|
||||
cli.add_command(setup_repositories.command, "setup-repositories")
|
||||
cli.add_command(build_containers.command, "build-containers")
|
||||
cli.add_command(fetch_containers.command, "fetch-containers")
|
||||
|
@ -1,45 +0,0 @@
|
||||
# Copyright © 2022, 2023 Vulcanize
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
# env vars:
|
||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||
|
||||
|
||||
import click
|
||||
import os
|
||||
|
||||
from decouple import config
|
||||
from git import exc
|
||||
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.repos.setup_repositories import process_repo
|
||||
from stack_orchestrator.util import error_exit
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument('stack-locator')
|
||||
@click.option('--git-ssh', is_flag=True, default=False)
|
||||
@click.option('--check-only', is_flag=True, default=False)
|
||||
@click.option('--pull', is_flag=True, default=False)
|
||||
@click.pass_context
|
||||
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
||||
'''optionally resolve then git clone a repository containing one or more stack definitions'''
|
||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||
if not opts.o.quiet:
|
||||
print(f"Dev Root is: {dev_root_path}")
|
||||
try:
|
||||
process_repo(pull, check_only, git_ssh, dev_root_path, None, stack_locator)
|
||||
except exc.GitCommandError as error:
|
||||
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
@ -26,7 +26,6 @@ import importlib.resources
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from stack_orchestrator.constants import stack_file_name
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit
|
||||
|
||||
|
||||
@ -88,8 +87,8 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
|
||||
|
||||
|
||||
# TODO: fix the messy arg list here
|
||||
def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
||||
if opts.o.verbose:
|
||||
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
||||
if verbose:
|
||||
print(f"Processing repo: {fully_qualified_repo}")
|
||||
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
||||
git_ssh_prefix = f"git@{repo_host}:"
|
||||
@ -101,7 +100,7 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
||||
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
|
||||
full_filesystem_repo_path
|
||||
) if is_present else (None, None)
|
||||
if not opts.o.quiet:
|
||||
if not quiet:
|
||||
present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
|
||||
else 'Needs to be fetched'
|
||||
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
||||
@ -112,25 +111,25 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
||||
sys.exit(1)
|
||||
else:
|
||||
if pull:
|
||||
if opts.o.verbose:
|
||||
if verbose:
|
||||
print(f"Running git pull for {full_filesystem_repo_path}")
|
||||
if not check_only:
|
||||
if is_branch:
|
||||
git_repo = git.Repo(full_filesystem_repo_path)
|
||||
origin = git_repo.remotes.origin
|
||||
origin.pull(progress=None if opts.o.quiet else GitProgress())
|
||||
origin.pull(progress=None if quiet else GitProgress())
|
||||
else:
|
||||
print("skipping pull because this repo checked out a tag")
|
||||
else:
|
||||
print("(git pull skipped)")
|
||||
if not is_present:
|
||||
# Clone
|
||||
if opts.o.verbose:
|
||||
if verbose:
|
||||
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
|
||||
if not opts.o.dry_run:
|
||||
if not dry_run:
|
||||
git.Repo.clone_from(full_github_repo_path,
|
||||
full_filesystem_repo_path,
|
||||
progress=None if opts.o.quiet else GitProgress())
|
||||
progress=None if quiet else GitProgress())
|
||||
else:
|
||||
print("(git clone skipped)")
|
||||
# Checkout the requested branch, if one was specified
|
||||
@ -151,13 +150,13 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
||||
current_repo_branch_or_tag and (
|
||||
current_repo_branch_or_tag != branch_to_checkout)
|
||||
):
|
||||
if not opts.o.quiet:
|
||||
if not quiet:
|
||||
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
||||
git_repo = git.Repo(full_filesystem_repo_path)
|
||||
# git checkout works for both branches and tags
|
||||
git_repo.git.checkout(branch_to_checkout)
|
||||
else:
|
||||
if opts.o.verbose:
|
||||
if verbose:
|
||||
print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
|
||||
|
||||
|
||||
@ -183,18 +182,36 @@ def parse_branches(branches_string):
|
||||
@click.option('--check-only', is_flag=True, default=False)
|
||||
@click.option('--pull', is_flag=True, default=False)
|
||||
@click.option("--branches", help="override branches for repositories")
|
||||
@click.option('--branches-file', help="checkout branches specified in this file")
|
||||
@click.pass_context
|
||||
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||
def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches_file):
|
||||
'''git clone the set of repositories required to build the complete system from source'''
|
||||
|
||||
quiet = opts.o.quiet
|
||||
verbose = opts.o.verbose
|
||||
stack = opts.o.stack
|
||||
quiet = ctx.obj.quiet
|
||||
verbose = ctx.obj.verbose
|
||||
dry_run = ctx.obj.dry_run
|
||||
stack = ctx.obj.stack
|
||||
|
||||
branches_array = []
|
||||
|
||||
# TODO: branches file needs to be re-worked in the context of stacks
|
||||
if branches_file:
|
||||
if branches:
|
||||
print("Error: can't specify both --branches and --branches-file")
|
||||
sys.exit(1)
|
||||
else:
|
||||
if verbose:
|
||||
print(f"loading branches from: {branches_file}")
|
||||
with open(branches_file) as branches_file_open:
|
||||
branches_array = branches_file_open.read().splitlines()
|
||||
|
||||
print(f"branches: {branches}")
|
||||
if branches:
|
||||
branches_array = parse_branches(branches)
|
||||
if branches_file:
|
||||
print("Error: can't specify both --branches and --branches-file")
|
||||
sys.exit(1)
|
||||
else:
|
||||
branches_array = parse_branches(branches)
|
||||
|
||||
if branches_array and verbose:
|
||||
print(f"Branches are: {branches_array}")
|
||||
@ -254,6 +271,7 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||
|
||||
for repo in repos:
|
||||
try:
|
||||
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
||||
process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
||||
except git.exc.GitCommandError as error:
|
||||
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
||||
print(f"\n******* git command returned error exit status:\n{error}")
|
||||
sys.exit(1)
|
||||
|
@ -94,38 +94,10 @@ def get_plugin_code_paths(stack) -> List[Path]:
|
||||
return list(result)
|
||||
|
||||
|
||||
# # Find a config directory, looking first in any external stack
|
||||
# and if not found there, internally
|
||||
def resolve_config_dir(stack, config_dir_name: str):
|
||||
if stack_is_external(stack):
|
||||
# First try looking in the external stack for the compose file
|
||||
config_base = Path(stack).parent.parent.joinpath("config")
|
||||
proposed_dir = config_base.joinpath(config_dir_name)
|
||||
if proposed_dir.exists():
|
||||
return proposed_dir
|
||||
# If we don't find it fall through to the internal case
|
||||
config_base = get_internal_config_dir()
|
||||
return config_base.joinpath(config_dir_name)
|
||||
|
||||
|
||||
# Find a compose file, looking first in any external stack
|
||||
# and if not found there, internally
|
||||
def resolve_compose_file(stack, pod_name: str):
|
||||
if stack_is_external(stack):
|
||||
# First try looking in the external stack for the compose file
|
||||
compose_base = Path(stack).parent.parent.joinpath("compose")
|
||||
proposed_file = compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
||||
if proposed_file.exists():
|
||||
return proposed_file
|
||||
# If we don't find it fall through to the internal case
|
||||
compose_base = get_internal_compose_file_dir()
|
||||
return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
||||
|
||||
|
||||
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
||||
def get_pod_file_path(parsed_stack, pod_name: str):
|
||||
pods = parsed_stack["pods"]
|
||||
if type(pods[0]) is str:
|
||||
result = resolve_compose_file(stack, pod_name)
|
||||
result = os.path.join(get_compose_file_dir(), f"docker-compose-{pod_name}.yml")
|
||||
else:
|
||||
for pod in pods:
|
||||
if pod["name"] == pod_name:
|
||||
@ -159,7 +131,7 @@ def pod_has_scripts(parsed_stack, pod_name: str):
|
||||
return result
|
||||
|
||||
|
||||
def get_internal_compose_file_dir():
|
||||
def get_compose_file_dir():
|
||||
# TODO: refactor to use common code with deploy command
|
||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||
@ -167,7 +139,7 @@ def get_internal_compose_file_dir():
|
||||
return source_compose_dir
|
||||
|
||||
|
||||
def get_internal_config_dir():
|
||||
def get_config_file_dir():
|
||||
# TODO: refactor to use common code with deploy command
|
||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||
source_config_dir = data_dir.joinpath("config")
|
||||
|
@ -1,185 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
# Dump environment variables for debugging
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
if [ "$1" == "from-path" ]; then
|
||||
TEST_TARGET_SO="laconic-so"
|
||||
else
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
fi
|
||||
|
||||
delete_cluster_exit () {
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Test basic stack-orchestrator deploy
|
||||
echo "Running stack-orchestrator external stack deploy test"
|
||||
# Set a non-default repo dir
|
||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||
echo "Testing this package: $TEST_TARGET_SO"
|
||||
echo "Test version command"
|
||||
reported_version_string=$( $TEST_TARGET_SO version )
|
||||
echo "Version reported is: ${reported_version_string}"
|
||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||
rm -rf $CERC_REPO_BASE_DIR
|
||||
mkdir -p $CERC_REPO_BASE_DIR
|
||||
# Clone the external test stack
|
||||
$TEST_TARGET_SO fetch-stack git.vdb.to/cerc-io/test-external-stack
|
||||
stack_name="$CERC_REPO_BASE_DIR/test-external-stack/stack-orchestrator/stacks/test-external-stack"
|
||||
TEST_TARGET_SO_STACK="$TEST_TARGET_SO --stack ${stack_name}"
|
||||
# Test bringing the test container up and down
|
||||
# with and without volume removal
|
||||
$TEST_TARGET_SO_STACK setup-repositories
|
||||
$TEST_TARGET_SO_STACK build-containers
|
||||
# Test deploy command execution
|
||||
$TEST_TARGET_SO_STACK deploy setup $CERC_REPO_BASE_DIR
|
||||
# Check that we now have the expected output directory
|
||||
container_output_dir=$CERC_REPO_BASE_DIR/container-output-dir
|
||||
if [ ! -d "$container_output_dir" ]; then
|
||||
echo "deploy setup test: output directory not present"
|
||||
echo "deploy setup test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "$container_output_dir/output-file" ]; then
|
||||
echo "deploy setup test: output file not present"
|
||||
echo "deploy setup test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
output_file_content=$(<$container_output_dir/output-file)
|
||||
if [ ! "$output_file_content" == "output-data" ]; then
|
||||
echo "deploy setup test: output file contents not correct"
|
||||
echo "deploy setup test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check that we now have the expected output file
|
||||
$TEST_TARGET_SO_STACK deploy up
|
||||
# Test deploy port command
|
||||
deploy_port_output=$( $TEST_TARGET_SO_STACK deploy port test 80 )
|
||||
if [[ "$deploy_port_output" =~ ^0.0.0.0:[1-9][0-9]* ]]; then
|
||||
echo "Deploy port test: passed"
|
||||
else
|
||||
echo "Deploy port test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
$TEST_TARGET_SO_STACK deploy down
|
||||
# The next time we bring the container up the volume will be old (from the previous run above)
|
||||
$TEST_TARGET_SO_STACK deploy up
|
||||
log_output_1=$( $TEST_TARGET_SO_STACK deploy logs )
|
||||
if [[ "$log_output_1" == *"filesystem is old"* ]]; then
|
||||
echo "Retain volumes test: passed"
|
||||
else
|
||||
echo "Retain volumes test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
$TEST_TARGET_SO_STACK deploy down --delete-volumes
|
||||
# Now when we bring the container up the volume will be new again
|
||||
$TEST_TARGET_SO_STACK deploy up
|
||||
log_output_2=$( $TEST_TARGET_SO_STACK deploy logs )
|
||||
if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then
|
||||
echo "Delete volumes test: passed"
|
||||
else
|
||||
echo "Delete volumes test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
$TEST_TARGET_SO_STACK deploy down --delete-volumes
|
||||
# Basic test of creating a deployment
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
||||
$TEST_TARGET_SO_STACK deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED,CERC_TEST_PARAM_3=FAST
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
echo "deploy init test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
echo "deploy create test: deployment directory not present"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy create test: passed"
|
||||
# Check the file writted by the create command in the stack now exists
|
||||
if [ ! -f "$test_deployment_dir/create-file" ]; then
|
||||
echo "deploy create test: create output file not present"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# And has the right content
|
||||
create_file_content=$(<$test_deployment_dir/create-file)
|
||||
if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
||||
echo "deploy create test: create output file contents not correct"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add a config file to be picked up by the ConfigMap before starting.
|
||||
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
|
||||
|
||||
echo "deploy create output file test: passed"
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
# Check logs command works
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_1 was passed correctly
|
||||
if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
|
||||
echo "deployment config test: passed"
|
||||
else
|
||||
echo "deployment config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
|
||||
if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
|
||||
echo "deployment compose config test: passed"
|
||||
else
|
||||
echo "deployment compose config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_3 was passed correctly
|
||||
if [[ "$log_output_3" == *"Test-param-3: FAST"* ]]; then
|
||||
echo "deployment config test: passed"
|
||||
else
|
||||
echo "deployment config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that the ConfigMap is mounted and contains the expected content.
|
||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
||||
echo "deployment ConfigMap test: passed"
|
||||
else
|
||||
echo "deployment ConfigMap test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop then start again and check the volume was preserved
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||
# Sleep a bit just in case
|
||||
# sleep for longer to check if that's why the subsequent create cluster fails
|
||||
sleep 20
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_5" == *"filesystem is old"* ]]; then
|
||||
echo "Retain volumes test: passed"
|
||||
else
|
||||
echo "Retain volumes test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Laconic registry CLI tests"
|
||||
env
|
||||
cat /etc/hosts
|
||||
# Bit of a hack, test the most recent package
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
|
||||
export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX)
|
||||
echo "$(date +"%Y-%m-%d %T"): Cloning laconic-registry-cli repository into: $CERC_REPO_BASE_DIR"
|
||||
$TEST_TARGET_SO --stack fixturenet-laconicd setup-repositories --include git.vdb.to/cerc-io/laconic-registry-cli
|
||||
|
||||
echo "$(date +"%Y-%m-%d %T"): Starting stack"
|
||||
TEST_AUCTION_ENABLED=true BASE_DIR=${CERC_REPO_BASE_DIR} $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up
|
||||
echo "$(date +"%Y-%m-%d %T"): Stack started"
|
||||
|
||||
# Verify that the fixturenet is up and running
|
||||
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps
|
||||
|
||||
# Get the fixturenet account address
|
||||
laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}')
|
||||
|
||||
# Copy over config
|
||||
docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/
|
||||
|
||||
# Wait for the laconid endpoint to come up
|
||||
echo "Waiting for the RPC endpoint to come up"
|
||||
docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api"
|
||||
|
||||
# Run the tests
|
||||
echo "Running the tests"
|
||||
docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test'
|
||||
|
||||
# Clean up
|
||||
$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes
|
||||
echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories"
|
||||
rm -rf $CERC_REPO_BASE_DIR
|
||||
echo "$(date +"%Y-%m-%d %T"): Test finished"
|
@ -12,7 +12,6 @@ cat /etc/hosts
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
# Set a new unique repo dir
|
||||
export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX)
|
||||
|
||||
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
|
||||
echo "$(date +"%Y-%m-%d %T"): Test version command"
|
||||
reported_version_string=$( $TEST_TARGET_SO version )
|
||||
|
@ -32,14 +32,14 @@ set +e
|
||||
|
||||
CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local)
|
||||
sleep 3
|
||||
wget --tries 20 --retry-connrefused --waitretry=3 -O test.before -m http://localhost:3000
|
||||
wget -t 7 -O test.before -m http://localhost:3000
|
||||
|
||||
docker logs $CONTAINER_ID
|
||||
docker remove -f $CONTAINER_ID
|
||||
|
||||
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local)
|
||||
sleep 3
|
||||
wget --tries 20 --retry-connrefused --waitretry=3 -O test.after -m http://localhost:3000
|
||||
wget -t 7 -O test.after -m http://localhost:3000
|
||||
|
||||
docker logs $CONTAINER_ID
|
||||
docker remove -f $CONTAINER_ID
|
||||
|
Loading…
Reference in New Issue
Block a user