Dboreham/mainnet eth (#464)
This commit is contained in:
parent
1f9131ff5a
commit
29fc611885
56
app/data/compose/docker-compose-mainnet-eth.yml
Normal file
56
app/data/compose/docker-compose-mainnet-eth.yml
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
mainnet-eth-geth-1:
|
||||||
|
restart: always
|
||||||
|
hostname: mainnet-eth-geth-1
|
||||||
|
cap_add:
|
||||||
|
- SYS_PTRACE
|
||||||
|
environment:
|
||||||
|
CERC_REMOTE_DEBUG: "true"
|
||||||
|
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect}
|
||||||
|
CERC_STATEDIFF_DB_NODE_ID: 1
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
image: cerc/go-ethereum:local
|
||||||
|
entrypoint: /bin/sh
|
||||||
|
command: -c "/opt/run-geth.sh"
|
||||||
|
volumes:
|
||||||
|
- mainnet_eth_geth_1_data:/root/ethdata
|
||||||
|
- mainnet_eth_config_data:/etc/mainnet-eth
|
||||||
|
- ../config/mainnet-eth/scripts/run-geth.sh:/opt/run-geth.sh
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-v", "localhost", "8545"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 10
|
||||||
|
start_period: 3s
|
||||||
|
ports:
|
||||||
|
- "8545"
|
||||||
|
- "40000"
|
||||||
|
- "6060"
|
||||||
|
|
||||||
|
mainnet-eth-lighthouse-1:
|
||||||
|
restart: always
|
||||||
|
hostname: mainnet-eth-lighthouse-1
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 10
|
||||||
|
start_period: 30s
|
||||||
|
environment:
|
||||||
|
EXECUTION_ENDPOINT: "http://mainnet-eth-geth-1:8551"
|
||||||
|
image: cerc/lighthouse:local
|
||||||
|
entrypoint: /bin/sh
|
||||||
|
command: -c "/opt/run-lighthouse.sh"
|
||||||
|
volumes:
|
||||||
|
- mainnet_eth_lighthouse_1_data:/var/lighthouse-data-dir
|
||||||
|
- mainnet_eth_config_data:/etc/mainnet-eth
|
||||||
|
- ../config/mainnet-eth/scripts/run-lighthouse.sh:/opt/run-lighthouse.sh
|
||||||
|
ports:
|
||||||
|
- "8001"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
mainnet_eth_config_data:
|
||||||
|
mainnet_eth_geth_1_data:
|
||||||
|
mainnet_eth_lighthouse_1_data:
|
52
app/data/config/mainnet-eth/scripts/run-geth.sh
Executable file
52
app/data/config/mainnet-eth/scripts/run-geth.sh
Executable file
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_ETH_DATADIR=/root/ethdata
|
||||||
|
|
||||||
|
START_CMD="geth"
|
||||||
|
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
|
||||||
|
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
|
||||||
|
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
|
||||||
|
pkill -P ${geth_pid}
|
||||||
|
sleep 2
|
||||||
|
kill $(jobs -p)
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
|
||||||
|
$START_CMD \
|
||||||
|
--datadir="${CERC_ETH_DATADIR}" \
|
||||||
|
--authrpc.addr="0.0.0.0" \
|
||||||
|
--authrpc.port 8551 \
|
||||||
|
--authrpc.vhosts="*" \
|
||||||
|
--authrpc.jwtsecret="/etc/mainnet-eth/jwtsecret" \
|
||||||
|
--ws \
|
||||||
|
--ws.addr="0.0.0.0" \
|
||||||
|
--ws.origins="*" \
|
||||||
|
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
|
||||||
|
--http.corsdomain="*" \
|
||||||
|
--gcmode full \
|
||||||
|
--txlookuplimit=0 \
|
||||||
|
--cache.preimages \
|
||||||
|
--syncmode=snap \
|
||||||
|
&
|
||||||
|
|
||||||
|
geth_pid=$!
|
||||||
|
|
||||||
|
|
||||||
|
wait $geth_pid
|
||||||
|
|
||||||
|
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
|
||||||
|
while [ 1 -eq 1 ]; do
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
fi
|
22
app/data/config/mainnet-eth/scripts/run-lighthouse.sh
Executable file
22
app/data/config/mainnet-eth/scripts/run-lighthouse.sh
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEBUG_LEVEL=${CERC_LIGHTHOUSE_DEBUG_LEVEL:-info}
|
||||||
|
|
||||||
|
data_dir=/var/lighthouse-data-dir
|
||||||
|
|
||||||
|
network_port=9001
|
||||||
|
http_port=8001
|
||||||
|
authrpc_port=8551
|
||||||
|
|
||||||
|
exec lighthouse \
|
||||||
|
bn \
|
||||||
|
--debug-level $DEBUG_LEVEL \
|
||||||
|
--datadir $data_dir \
|
||||||
|
--network mainnet \
|
||||||
|
--execution-endpoint $EXECUTION_ENDPOINT \
|
||||||
|
--execution-jwt /etc/mainnet-eth/jwtsecret \
|
||||||
|
--disable-deposit-contract-sync \
|
||||||
|
--checkpoint-sync-url https://beaconstate.ethstaker.cc
|
104
app/data/stacks/mainnet-eth/README.md
Normal file
104
app/data/stacks/mainnet-eth/README.md
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# mainnet-eth
|
||||||
|
|
||||||
|
Deploys a "head-tracking" mainnet Ethereum stack comprising a [go-ethereum](https://github.com/cerc-io/go-ethereum) execution layer node and a [lighthouse](https://github.com/sigp/lighthouse) consensus layer node.
|
||||||
|
|
||||||
|
## Clone required repositories
|
||||||
|
|
||||||
|
```
|
||||||
|
$ laconic-so --stack mainnet-eth setup-repositories
|
||||||
|
```
|
||||||
|
|
||||||
|
## Build containers
|
||||||
|
|
||||||
|
```
|
||||||
|
$ laconic-so --stack mainnet-eth build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create a deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
$ laconic-so --stack mainnet-eth deploy init --output mainnet-eth-spec.yml
|
||||||
|
$ laconic-so deploy create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment
|
||||||
|
```
|
||||||
|
## Start the stack
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir mainnet-eth-deployment start
|
||||||
|
```
|
||||||
|
Display stack status:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir mainnet-eth-deployment ps
|
||||||
|
Running containers:
|
||||||
|
id: f39608eca04d72d6b0f1f3acefc5ebb52908da06e221d20c7138f7e3dff5e423, name: laconic-ef641b4d13eb61ed561b19be67063241-foundry-1, ports:
|
||||||
|
id: 4052b1eddd886ae0d6b41f9ff22e68a70f267b2bfde10f4b7b79b5bd1eeddcac, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-geth-1-1, ports: 30303/tcp, 30303/udp, 0.0.0.0:49184->40000/tcp, 0.0.0.0:49185->6060/tcp, 0.0.0.0:49186->8545/tcp, 8546/tcp
|
||||||
|
id: ac331232e597944b621b3b8942ace5dafb14524302cab338ff946c7f6e5a1d52, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1, ports: 0.0.0.0:49187->8001/tcp
|
||||||
|
```
|
||||||
|
See stack logs:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir mainnet-eth-deployment logs
|
||||||
|
time="2023-07-25T09:46:29-06:00" level=warning msg="The \"CERC_SCRIPT_DEBUG\" variable is not set. Defaulting to a blank string."
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.362 INFO Logging to file path: "/var/lighthouse-data-dir/beacon/logs/beacon.log"
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Lighthouse started version: Lighthouse/v4.1.0-693886b
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Configured for network name: mainnet
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Data directory initialised datadir: /var/lighthouse-data-dir
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Deposit contract address: 0x00000000219ab540356cbb839cbe05303d7705fa, deploy_block: 11184524
|
||||||
|
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.424 INFO Starting checkpoint sync remote_url: https://beaconstate.ethstaker.cc/, service: beacon
|
||||||
|
```
|
||||||
|
## Monitoring stack sync progress
|
||||||
|
Both go-ethereum and lighthouse will engage in an initial chain sync phase that will last up to several hours depending on hardware performance and network capacity.
|
||||||
|
Syncing can be monitored by looking for these log messages:
|
||||||
|
```
|
||||||
|
Jul 24 12:34:17.001 INFO Downloading historical blocks est_time: 5 days 11 hrs, speed: 14.67 slots/sec, distance: 6932481 slots (137 weeks 3 days), service: slot_notifier
|
||||||
|
INFO [07-24|12:14:52.493] Syncing beacon headers downloaded=145,920 left=17,617,968 eta=1h23m32.815s
|
||||||
|
INFO [07-24|12:33:15.238] Syncing: chain download in progress synced=1.86% chain=148.94MiB headers=368,640@95.03MiB bodies=330,081@40.56MiB receipts=330,081@13.35MiB eta=37m54.505s
|
||||||
|
INFO [07-24|12:35:13.028] Syncing: state download in progress synced=1.32% state=4.64GiB accounts=2,850,314@677.57MiB slots=18,663,070@3.87GiB codes=26662@111.14MiB eta=3h18m0.699s
|
||||||
|
```
|
||||||
|
Once synced up these log messages will be observed:
|
||||||
|
```
|
||||||
|
INFO Synced slot: 6952515, block: 0x5bcb…f6d9, epoch: 217266, finalized_epoch: 217264, finalized_root: 0x6342…2c5c, exec_hash: 0x8d8c…2443 (verified), peers: 31, service: slot_notifier
|
||||||
|
INFO [07-25|03:04:48.941] Imported new potential chain segment number=17,767,316 hash=84f6e7..bc2cb0 blocks=1 txs=137 mgas=16.123 elapsed=57.087ms mgasps=282.434 dirty=461.46MiB
|
||||||
|
INFO [07-25|03:04:49.042] Chain head was updated number=17,767,316 hash=84f6e7..bc2cb0 root=ca58b2..8258c1 elapsed=2.480111ms
|
||||||
|
```
|
||||||
|
## Clean up
|
||||||
|
|
||||||
|
Stop the stack:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir mainnet-eth-deployment stop
|
||||||
|
```
|
||||||
|
This leaves data volumes in place, allowing the stack to be subsequently re-started.
|
||||||
|
To permanently *delete* the stack's data volumes run:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes
|
||||||
|
```
|
||||||
|
After deleting the volumes, any subsequent re-start will begin chain sync from cold.
|
||||||
|
## Data volumes
|
||||||
|
Container data volumes are bind-mounted to specified paths in the host filesystem.
|
||||||
|
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
|
||||||
|
```
|
||||||
|
$ cat mainnet-eth-spec.yml
|
||||||
|
stack: mainnet-eth
|
||||||
|
volumes:
|
||||||
|
mainnet_eth_config_data: ./data/mainnet_eth_config_data
|
||||||
|
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
|
||||||
|
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
|
||||||
|
```
|
||||||
|
A synced-up stack will consume around 900GB of data volume space:
|
||||||
|
```
|
||||||
|
$ sudo du -h mainnet-eth-deployment/data/
|
||||||
|
150M mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/freezer_db
|
||||||
|
25G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/chain_db
|
||||||
|
16K mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/network
|
||||||
|
368M mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/logs
|
||||||
|
26G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon
|
||||||
|
26G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data
|
||||||
|
8.0K mainnet-eth-deployment/data/mainnet_eth_config_data
|
||||||
|
4.0K mainnet-eth-deployment/data/mainnet_eth_geth_1_data/keystore
|
||||||
|
527G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata/ancient/chain
|
||||||
|
527G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata/ancient
|
||||||
|
859G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata
|
||||||
|
4.8M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/nodes
|
||||||
|
242M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/ethash
|
||||||
|
669M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/triecache
|
||||||
|
860G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth
|
||||||
|
860G mainnet-eth-deployment/data/mainnet_eth_geth_1_data
|
||||||
|
885G mainnet-eth-deployment/data/
|
||||||
|
```
|
31
app/data/stacks/mainnet-eth/deploy/commands.py
Normal file
31
app/data/stacks/mainnet-eth/deploy/commands.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright © 2023 Cerc
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from secrets import token_hex
|
||||||
|
|
||||||
|
def init(ctx):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def setup(ctx):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def create(ctx):
|
||||||
|
# Generate the JWT secret and save to its config file
|
||||||
|
secret = token_hex(32)
|
||||||
|
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
|
||||||
|
with open(jwt_file_path, 'w+') as jwt_file:
|
||||||
|
jwt_file.write(secret)
|
15
app/data/stacks/mainnet-eth/stack.yml
Normal file
15
app/data/stacks/mainnet-eth/stack.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
version: "1.1"
|
||||||
|
name: mainnet-eth
|
||||||
|
decription: "Ethereum Mainnet"
|
||||||
|
repos:
|
||||||
|
- github.com/cerc-io/go-ethereum
|
||||||
|
- github.com/cerc-io/lighthouse
|
||||||
|
- github.com/dboreham/foundry
|
||||||
|
containers:
|
||||||
|
- cerc/go-ethereum
|
||||||
|
- cerc/lighthouse
|
||||||
|
- cerc/lighthouse-cli
|
||||||
|
- cerc/foundry
|
||||||
|
pods:
|
||||||
|
- mainnet-eth
|
||||||
|
- foundry
|
@ -61,7 +61,7 @@ def create_deploy_context(global_context, stack, include, exclude, cluster, env_
|
|||||||
return DeployCommandContext(cluster_context, docker)
|
return DeployCommandContext(cluster_context, docker)
|
||||||
|
|
||||||
|
|
||||||
def up_operation(ctx, services_list):
|
def up_operation(ctx, services_list, stay_attached=False):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
@ -73,7 +73,7 @@ def up_operation(ctx, services_list):
|
|||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.docker.compose.up(detach=True, services=services_list)
|
deploy_context.docker.compose.up(detach=not stay_attached, services=services_list)
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
|
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
|
||||||
|
@ -54,22 +54,24 @@ def make_deploy_context(ctx):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, extra_args):
|
def up(ctx, stay_attached, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list)
|
up_operation(ctx, services_list, stay_attached)
|
||||||
|
|
||||||
|
|
||||||
# start is the preferred alias for up
|
# start is the preferred alias for up
|
||||||
@command.command()
|
@command.command()
|
||||||
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def start(ctx, extra_args):
|
def start(ctx, stay_attached, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list)
|
up_operation(ctx, services_list, stay_attached)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@ -91,7 +93,7 @@ def down(ctx, delete_volumes, extra_args):
|
|||||||
def stop(ctx, delete_volumes, extra_args):
|
def stop(ctx, delete_volumes, extra_args):
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
down_operation(ctx, delete_volumes, extra_args, None)
|
down_operation(ctx, delete_volumes, extra_args)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
from dataclasses import dataclass
|
||||||
from importlib import util
|
from importlib import util
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -21,6 +22,11 @@ from shutil import copyfile, copytree
|
|||||||
import sys
|
import sys
|
||||||
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
|
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DeploymentContext:
|
||||||
|
stack: str
|
||||||
|
deployment_dir: Path
|
||||||
|
|
||||||
|
|
||||||
def _make_default_deployment_dir():
|
def _make_default_deployment_dir():
|
||||||
return "deployment-001"
|
return "deployment-001"
|
||||||
@ -111,6 +117,18 @@ def call_stack_deploy_setup(stack):
|
|||||||
return imported_stack.setup(None)
|
return imported_stack.setup(None)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: fold this with function above
|
||||||
|
def call_stack_deploy_create(deployment_context):
|
||||||
|
# Link with the python file in the stack
|
||||||
|
# Call a function in it
|
||||||
|
# If no function found, return None
|
||||||
|
python_file_path = get_stack_file_path(deployment_context.stack).parent.joinpath("deploy", "commands.py")
|
||||||
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
imported_stack = util.module_from_spec(spec)
|
||||||
|
spec.loader.exec_module(imported_stack)
|
||||||
|
return imported_stack.create(deployment_context)
|
||||||
|
|
||||||
|
|
||||||
# Inspect the pod yaml to find config files referenced in subdirectories
|
# Inspect the pod yaml to find config files referenced in subdirectories
|
||||||
# other than the one associated with the pod
|
# other than the one associated with the pod
|
||||||
def _find_extra_config_dirs(parsed_pod_file, pod):
|
def _find_extra_config_dirs(parsed_pod_file, pod):
|
||||||
@ -138,7 +156,8 @@ def init(ctx, output):
|
|||||||
verbose = global_options(ctx).verbose
|
verbose = global_options(ctx).verbose
|
||||||
default_spec_file_content = call_stack_deploy_init(stack)
|
default_spec_file_content = call_stack_deploy_init(stack)
|
||||||
spec_file_content = {"stack": stack}
|
spec_file_content = {"stack": stack}
|
||||||
spec_file_content.update(default_spec_file_content)
|
if default_spec_file_content:
|
||||||
|
spec_file_content.update(default_spec_file_content)
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Creating spec file for stack: {stack}")
|
print(f"Creating spec file for stack: {stack}")
|
||||||
named_volumes = _get_named_volumes(stack)
|
named_volumes = _get_named_volumes(stack)
|
||||||
@ -197,6 +216,9 @@ def create(ctx, spec_file, deployment_dir):
|
|||||||
# If the same config dir appears in multiple pods, it may already have been copied
|
# If the same config dir appears in multiple pods, it may already have been copied
|
||||||
if not os.path.exists(destination_config_dir):
|
if not os.path.exists(destination_config_dir):
|
||||||
copytree(source_config_dir, destination_config_dir)
|
copytree(source_config_dir, destination_config_dir)
|
||||||
|
# Delegate to the stack's Python code
|
||||||
|
deployment_context = DeploymentContext(stack_name, Path(deployment_dir))
|
||||||
|
call_stack_deploy_create(deployment_context)
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@ -209,4 +231,3 @@ def create(ctx, spec_file, deployment_dir):
|
|||||||
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network):
|
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network):
|
||||||
stack = global_options(ctx).stack
|
stack = global_options(ctx).stack
|
||||||
call_stack_deploy_setup(stack)
|
call_stack_deploy_setup(stack)
|
||||||
|
|
||||||
|
32
tests/mainnet-eth/run-test.sh
Executable file
32
tests/mainnet-eth/run-test.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running stack-orchestrator Ethereum mainnet test"
|
||||||
|
# Bit of a hack, test the most recent package
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
# Set a new unique repo dir
|
||||||
|
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-mainnet-eth-test.XXXXXXXXXX)
|
||||||
|
DEPLOYMENT_DIR=mainnet-eth-deployment-test
|
||||||
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
echo "Test version command"
|
||||||
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
echo "Version reported is: ${reported_version_string}"
|
||||||
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
|
$TEST_TARGET_SO --stack mainnet-eth setup-repositories
|
||||||
|
$TEST_TARGET_SO --stack mainnet-eth build-containers
|
||||||
|
$TEST_TARGET_SO --stack mainnet-eth deploy init --output mainnet-eth-spec.yml
|
||||||
|
$TEST_TARGET_SO deploy create --spec-file mainnet-eth-spec.yml --deployment-dir $DEPLOYMENT_DIR
|
||||||
|
# Start the stack
|
||||||
|
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR start
|
||||||
|
# Verify that the stack is up and running
|
||||||
|
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR ps
|
||||||
|
#TODO: add a check that the container logs show good startup
|
||||||
|
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR stop --delete-volumes
|
||||||
|
echo "Removing deployment directory"
|
||||||
|
rm -rf $DEPLOYMENT_DIR
|
||||||
|
echo "Removing cloned repositories"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
exit $test_result
|
Loading…
Reference in New Issue
Block a user