Merge branch 'main' into ci-test

This commit is contained in:
David Boreham 2023-07-27 20:14:45 -06:00
commit 16bef78312
24 changed files with 484 additions and 96 deletions

View File

@ -15,7 +15,7 @@
import os
from abc import ABC, abstractmethod
from .deploy import get_stack_status
from app.deploy import get_stack_status
from decouple import config

View File

@ -27,8 +27,8 @@ import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
from .base import get_npm_registry_url
from app.util import include_exclude_check, get_parsed_stack_config
from app.base import get_npm_registry_url
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@ -67,7 +67,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
from app import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines()

View File

@ -25,8 +25,8 @@ from decouple import config
import click
import importlib.resources
from python_on_whales import docker, DockerException
from .base import get_stack
from .util import include_exclude_check, get_parsed_stack_config
from app.base import get_stack
from app.util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local"
@ -81,7 +81,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
from app import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
all_packages = package_list_file.read().splitlines()

View File

@ -24,9 +24,9 @@ services:
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
volumes:
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- l2_accounts:/l2-accounts

View File

@ -0,0 +1,56 @@
services:
mainnet-eth-geth-1:
restart: always
hostname: mainnet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect}
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
image: cerc/go-ethereum:local
entrypoint: /bin/sh
command: -c "/opt/run-geth.sh"
volumes:
- mainnet_eth_geth_1_data:/root/ethdata
- mainnet_eth_config_data:/etc/mainnet-eth
- ../config/mainnet-eth/scripts/run-geth.sh:/opt/run-geth.sh
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- "8545"
- "40000"
- "6060"
mainnet-eth-lighthouse-1:
restart: always
hostname: mainnet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
environment:
EXECUTION_ENDPOINT: "http://mainnet-eth-geth-1:8551"
image: cerc/lighthouse:local
entrypoint: /bin/sh
command: -c "/opt/run-lighthouse.sh"
volumes:
- mainnet_eth_lighthouse_1_data:/var/lighthouse-data-dir
- mainnet_eth_config_data:/etc/mainnet-eth
- ../config/mainnet-eth/scripts/run-lighthouse.sh:/opt/run-lighthouse.sh
ports:
- "8001"
volumes:
mainnet_eth_config_data:
mainnet_eth_geth_1_data:
mainnet_eth_lighthouse_1_data:

View File

@ -2,7 +2,7 @@ services:
laconicd:
restart: no
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
command: ["/bin/sh", "-c", "while :; do sleep 600; done"]
volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd/data

View File

@ -0,0 +1,52 @@
#!/bin/sh
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
fi
CERC_ETH_DATADIR=/root/ethdata
START_CMD="geth"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
fi
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
cleanup() {
echo "Signal received, cleaning up..."
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
pkill -P ${geth_pid}
sleep 2
kill $(jobs -p)
wait
echo "Done"
}
$START_CMD \
--datadir="${CERC_ETH_DATADIR}" \
--authrpc.addr="0.0.0.0" \
--authrpc.port 8551 \
--authrpc.vhosts="*" \
--authrpc.jwtsecret="/etc/mainnet-eth/jwtsecret" \
--ws \
--ws.addr="0.0.0.0" \
--ws.origins="*" \
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
--http.corsdomain="*" \
--gcmode full \
--txlookuplimit=0 \
--cache.preimages \
--syncmode=snap \
&
geth_pid=$!
wait $geth_pid
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
while [ 1 -eq 1 ]; do
sleep 60
done
fi

View File

@ -0,0 +1,22 @@
#!/bin/bash
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
fi
DEBUG_LEVEL=${CERC_LIGHTHOUSE_DEBUG_LEVEL:-info}
data_dir=/var/lighthouse-data-dir
network_port=9001
http_port=8001
authrpc_port=8551
exec lighthouse \
bn \
--debug-level $DEBUG_LEVEL \
--datadir $data_dir \
--network mainnet \
--execution-endpoint $EXECUTION_ENDPOINT \
--execution-jwt /etc/mainnet-eth/jwtsecret \
--disable-deposit-contract-sync \
--checkpoint-sync-url https://beaconstate.ethstaker.cc

View File

@ -0,0 +1,104 @@
# mainnet-eth
Deploys a "head-tracking" mainnet Ethereum stack comprising a [go-ethereum](https://github.com/cerc-io/go-ethereum) execution layer node and a [lighthouse](https://github.com/sigp/lighthouse) consensus layer node.
## Clone required repositories
```
$ laconic-so --stack mainnet-eth setup-repositories
```
## Build containers
```
$ laconic-so --stack mainnet-eth build-containers
```
## Create a deployment
```
$ laconic-so --stack mainnet-eth deploy init --output mainnet-eth-spec.yml
$ laconic-so deploy create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment
```
## Start the stack
```
$ laconic-so deployment --dir mainnet-eth-deployment start
```
Display stack status:
```
$ laconic-so deployment --dir mainnet-eth-deployment ps
Running containers:
id: f39608eca04d72d6b0f1f3acefc5ebb52908da06e221d20c7138f7e3dff5e423, name: laconic-ef641b4d13eb61ed561b19be67063241-foundry-1, ports:
id: 4052b1eddd886ae0d6b41f9ff22e68a70f267b2bfde10f4b7b79b5bd1eeddcac, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-geth-1-1, ports: 30303/tcp, 30303/udp, 0.0.0.0:49184->40000/tcp, 0.0.0.0:49185->6060/tcp, 0.0.0.0:49186->8545/tcp, 8546/tcp
id: ac331232e597944b621b3b8942ace5dafb14524302cab338ff946c7f6e5a1d52, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1, ports: 0.0.0.0:49187->8001/tcp
```
See stack logs:
```
$ laconic-so deployment --dir mainnet-eth-deployment logs
time="2023-07-25T09:46:29-06:00" level=warning msg="The \"CERC_SCRIPT_DEBUG\" variable is not set. Defaulting to a blank string."
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.362 INFO Logging to file path: "/var/lighthouse-data-dir/beacon/logs/beacon.log"
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Lighthouse started version: Lighthouse/v4.1.0-693886b
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Configured for network name: mainnet
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Data directory initialised datadir: /var/lighthouse-data-dir
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Deposit contract address: 0x00000000219ab540356cbb839cbe05303d7705fa, deploy_block: 11184524
laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-lighthouse-1-1 | Jul 25 15:45:13.424 INFO Starting checkpoint sync remote_url: https://beaconstate.ethstaker.cc/, service: beacon
```
## Monitoring stack sync progress
Both go-ethereum and lighthouse will engage in an initial chain sync phase that will last up to several hours depending on hardware performance and network capacity.
Syncing can be monitored by looking for these log messages:
```
Jul 24 12:34:17.001 INFO Downloading historical blocks est_time: 5 days 11 hrs, speed: 14.67 slots/sec, distance: 6932481 slots (137 weeks 3 days), service: slot_notifier
INFO [07-24|12:14:52.493] Syncing beacon headers downloaded=145,920 left=17,617,968 eta=1h23m32.815s
INFO [07-24|12:33:15.238] Syncing: chain download in progress synced=1.86% chain=148.94MiB headers=368,640@95.03MiB bodies=330,081@40.56MiB receipts=330,081@13.35MiB eta=37m54.505s
INFO [07-24|12:35:13.028] Syncing: state download in progress synced=1.32% state=4.64GiB accounts=2,850,314@677.57MiB slots=18,663,070@3.87GiB codes=26662@111.14MiB eta=3h18m0.699s
```
Once synced up these log messages will be observed:
```
INFO Synced slot: 6952515, block: 0x5bcb…f6d9, epoch: 217266, finalized_epoch: 217264, finalized_root: 0x6342…2c5c, exec_hash: 0x8d8c…2443 (verified), peers: 31, service: slot_notifier
INFO [07-25|03:04:48.941] Imported new potential chain segment number=17,767,316 hash=84f6e7..bc2cb0 blocks=1 txs=137 mgas=16.123 elapsed=57.087ms mgasps=282.434 dirty=461.46MiB
INFO [07-25|03:04:49.042] Chain head was updated number=17,767,316 hash=84f6e7..bc2cb0 root=ca58b2..8258c1 elapsed=2.480111ms
```
## Clean up
Stop the stack:
```
$ laconic-so deployment --dir mainnet-eth-deployment stop
```
This leaves data volumes in place, allowing the stack to be subsequently re-started.
To permanently *delete* the stack's data volumes run:
```
$ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes
```
After deleting the volumes, any subsequent re-start will begin chain sync from cold.
## Data volumes
Container data volumes are bind-mounted to specified paths in the host filesystem.
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
```
A synced-up stack will consume around 900GB of data volume space:
```
$ sudo du -h mainnet-eth-deployment/data/
150M mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/freezer_db
25G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/chain_db
16K mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/network
368M mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon/logs
26G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data/beacon
26G mainnet-eth-deployment/data/mainnet_eth_lighthouse_1_data
8.0K mainnet-eth-deployment/data/mainnet_eth_config_data
4.0K mainnet-eth-deployment/data/mainnet_eth_geth_1_data/keystore
527G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata/ancient/chain
527G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata/ancient
859G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/chaindata
4.8M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/nodes
242M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/ethash
669M mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth/triecache
860G mainnet-eth-deployment/data/mainnet_eth_geth_1_data/geth
860G mainnet-eth-deployment/data/mainnet_eth_geth_1_data
885G mainnet-eth-deployment/data/
```

View File

@ -0,0 +1,31 @@
# Copyright © 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from secrets import token_hex
def init(ctx):
return None
def setup(ctx):
return None
def create(ctx):
# Generate the JWT secret and save to its config file
secret = token_hex(32)
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
with open(jwt_file_path, 'w+') as jwt_file:
jwt_file.write(secret)

View File

@ -0,0 +1,15 @@
version: "1.1"
name: mainnet-eth
decription: "Ethereum Mainnet"
repos:
- github.com/cerc-io/go-ethereum
- github.com/cerc-io/lighthouse
- github.com/dboreham/foundry
containers:
- cerc/go-ethereum
- cerc/lighthouse
- cerc/lighthouse-cli
- cerc/foundry
pods:
- mainnet-eth
- foundry

View File

@ -13,45 +13,50 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from shutil import copyfile
import sys
from .util import get_stack_config_filename, get_parsed_deployment_spec
from dataclasses import dataclass
from app.util import get_yaml
from app.stack_state import State
default_spec_file_content = """stack: mainnet-laconic
data_dir: /my/path
node_name: my-node-name
default_spec_file_content = """config:
node_moniker: my-node-name
chain_id: my-chain-id
"""
init_help_text = """Add helpful text here on setting config variables.
"""
def make_default_deployment_dir():
return "deployment-001"
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
with open(output, "w") as output_file:
output_file.write(default_spec_file_content)
@dataclass
class VolumeMapping:
host_path: str
container_path: str
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
if ctx.debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
stack_file = get_stack_config_filename(parsed_spec.stack)
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# In order to make this, we need the ability to run the stack
# In theory we can make this same way as we would run deploy up
def run_container_command(ctx, ontainer, command, mounts):
deploy_context = ctx.obj
pass
def setup(ctx):
node_moniker = "dbdb-node"
chain_id = "laconic_81337-1"
mounts = [
VolumeMapping("./path", "~/.laconicd")
]
output, status = run_container_command(ctx, "laconicd", f"laconicd init {node_moniker} --chain-id {chain_id}", mounts)
def init(command_context):
print(init_help_text)
yaml = get_yaml()
return yaml.load(default_spec_file_content)
def get_state(command_context):
print("Here we get state")
return State.CONFIGURED
def change_state(command_context):
pass

View File

@ -25,7 +25,4 @@ containers:
pods:
- mainnet-laconicd
- fixturenet-laconic-console
config:
cli:
key: laconicd.mykey
address: laconicd.myaddress

View File

@ -26,9 +26,10 @@ import subprocess
from python_on_whales import DockerClient, DockerException
import click
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config, global_options2
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init
from app.util import include_exclude_check, get_parsed_stack_config, global_options2
from app.deployment_create import create as deployment_create
from app.deployment_create import init as deployment_init
from app.deployment_create import setup as deployment_setup
class DeployCommandContext(object):
@ -60,7 +61,7 @@ def create_deploy_context(global_context, stack, include, exclude, cluster, env_
return DeployCommandContext(cluster_context, docker)
def up_operation(ctx, services_list):
def up_operation(ctx, services_list, stay_attached=False):
global_context = ctx.parent.parent.obj
deploy_context = ctx.obj
if not global_context.dry_run:
@ -72,7 +73,7 @@ def up_operation(ctx, services_list):
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command)
deploy_context.docker.compose.up(detach=True, services=services_list)
deploy_context.docker.compose.up(detach=not stay_attached, services=services_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
@ -263,7 +264,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
print(f"Using cluster name: {cluster}")
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
from app import data
with resources.open_text(data, "pod-list.txt") as pod_list_file:
all_pods = pod_list_file.read().splitlines()
@ -420,3 +421,4 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
command.add_command(deployment_init)
command.add_command(deployment_create)
command.add_command(deployment_setup)

View File

@ -17,8 +17,8 @@ import click
from dataclasses import dataclass
from pathlib import Path
import sys
from .deploy import up_operation, down_operation, ps_operation, port_operation, exec_operation, logs_operation, create_deploy_context
from .util import global_options
from app.deploy import up_operation, down_operation, ps_operation, port_operation, exec_operation, logs_operation, create_deploy_context
from app.util import global_options
@dataclass
@ -54,22 +54,24 @@ def make_deploy_context(ctx):
@command.command()
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
def up(ctx, stay_attached, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
up_operation(ctx, services_list, stay_attached)
# start is the preferred alias for up
@command.command()
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def start(ctx, extra_args):
def start(ctx, stay_attached, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
up_operation(ctx, services_list, stay_attached)
@command.command()
@ -85,12 +87,13 @@ def down(ctx, delete_volumes, extra_args):
# stop is the preferred alias for down
@command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
@click.pass_context
def stop(ctx, extra_args):
def stop(ctx, delete_volumes, extra_args):
# TODO: add cluster name and env file here
ctx.obj = make_deploy_context(ctx)
down_operation(ctx, extra_args, None)
down_operation(ctx, delete_volumes, extra_args)
@command.command()
@ -127,15 +130,3 @@ def logs(ctx, extra_args):
@click.pass_context
def status(ctx):
print(f"Context: {ctx.parent.obj}")
#from importlib import resources, util
# TODO: figure out how to do this dynamically
#stack = "mainnet-laconic"
#module_name = "commands"
#spec = util.spec_from_file_location(module_name, "./app/data/stacks/" + stack + "/deploy/commands.py")
#imported_stack = util.module_from_spec(spec)
#spec.loader.exec_module(imported_stack)
#command.add_command(imported_stack.init)
#command.add_command(imported_stack.create)

View File

@ -14,20 +14,18 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from importlib import util
import os
from pathlib import Path
from shutil import copyfile, copytree
import sys
import ruamel.yaml
from .util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
def _get_yaml():
# See: https://stackoverflow.com/a/45701840/1701505
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.indent(sequence=3, offset=1)
return yaml
@dataclass
class DeploymentContext:
stack: str
deployment_dir: Path
def _make_default_deployment_dir():
@ -47,7 +45,7 @@ def _get_named_volumes(stack):
named_volumes = []
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = _get_yaml()
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
@ -96,6 +94,41 @@ def _fixup_pod_file(pod, spec, compose_dir):
pod["volumes"][volume] = new_volume_spec
def call_stack_deploy_init(stack):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.init(None)
# TODO: fold this with function above
def call_stack_deploy_setup(stack):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.setup(None)
# TODO: fold this with function above
def call_stack_deploy_create(deployment_context):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(deployment_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.create(deployment_context)
# Inspect the pod yaml to find config files referenced in subdirectories
# other than the one associated with the pod
def _find_extra_config_dirs(parsed_pod_file, pod):
@ -118,17 +151,20 @@ def _find_extra_config_dirs(parsed_pod_file, pod):
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
yaml = _get_yaml()
yaml = get_yaml()
stack = global_options(ctx).stack
verbose = global_options(ctx).verbose
default_spec_file_content = call_stack_deploy_init(stack)
spec_file_content = {"stack": stack}
if default_spec_file_content:
spec_file_content.update(default_spec_file_content)
if verbose:
print(f"Creating spec file for stack: {stack}")
named_volumes = _get_named_volumes(stack)
if named_volumes:
volume_descriptors = {}
for named_volume in named_volumes:
volume_descriptors[named_volume] = f"../data/{named_volume}"
volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file)
@ -160,7 +196,7 @@ def create(ctx, spec_file, deployment_dir):
destination_compose_dir = os.path.join(deployment_dir, "compose")
os.mkdir(destination_compose_dir)
data_dir = Path(__file__).absolute().parent.joinpath("data")
yaml = _get_yaml()
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
@ -180,3 +216,18 @@ def create(ctx, spec_file, deployment_dir):
# If the same config dir appears in multiple pods, it may already have been copied
if not os.path.exists(destination_config_dir):
copytree(source_config_dir, destination_config_dir)
# Delegate to the stack's Python code
deployment_context = DeploymentContext(stack_name, Path(deployment_dir))
call_stack_deploy_create(deployment_context)
@click.command()
@click.option("--node-moniker", help="Help goes here")
@click.option("--key-name", help="Help goes here")
@click.option("--initialize-network", is_flag=True, default=False, help="Help goes here")
@click.option("--join-network", is_flag=True, default=False, help="Help goes here")
@click.option("--create-network", is_flag=True, default=False, help="Help goes here")
@click.pass_context
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network):
stack = global_options(ctx).stack
call_stack_deploy_setup(stack)

View File

@ -25,7 +25,7 @@ import click
import importlib.resources
from pathlib import Path
import yaml
from .util import include_exclude_check
from app.util import include_exclude_check
class GitProgress(git.RemoteProgress):
@ -227,7 +227,7 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
os.makedirs(dev_root_path)
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
from app import data
with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file:
all_repos = repository_list_file.read().splitlines()

22
app/stack_state.py Normal file
View File

@ -0,0 +1,22 @@
# Copyright © 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from enum import Enum
class State(Enum):
CREATED = 1
CONFIGURED = 2
STARTED = 3
STOPPED = 4

View File

@ -15,7 +15,7 @@
import os.path
import sys
import yaml
import ruamel.yaml
from pathlib import Path
@ -42,7 +42,7 @@ def get_parsed_stack_config(stack):
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
try:
with stack_file_path:
stack_config = yaml.safe_load(open(stack_file_path, "r"))
stack_config = get_yaml().load(open(stack_file_path, "r"))
return stack_config
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
@ -60,7 +60,7 @@ def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:
with spec_file_path:
deploy_spec = yaml.safe_load(open(spec_file_path, "r"))
deploy_spec = get_yaml().load(open(spec_file_path, "r"))
return deploy_spec
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
@ -69,6 +69,14 @@ def get_parsed_deployment_spec(spec_file):
sys.exit(1)
def get_yaml():
# See: https://stackoverflow.com/a/45701840/1701505
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.indent(sequence=3, offset=1)
return yaml
# TODO: this is fragile wrt to the subcommand depth
# See also: https://github.com/pallets/click/issues/108
def global_options(ctx):

View File

@ -22,7 +22,7 @@ def command(ctx):
'''print tool version'''
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
from app import data
with importlib.resources.open_text(data, "build_tag.txt") as version_file:
# TODO: code better version that skips comment lines
version_string = version_file.read().splitlines()[1]

32
tests/mainnet-eth/run-test.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
echo "Running stack-orchestrator Ethereum mainnet test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a new unique repo dir
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-mainnet-eth-test.XXXXXXXXXX)
DEPLOYMENT_DIR=mainnet-eth-deployment-test
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
$TEST_TARGET_SO --stack mainnet-eth setup-repositories
$TEST_TARGET_SO --stack mainnet-eth build-containers
$TEST_TARGET_SO --stack mainnet-eth deploy init --output mainnet-eth-spec.yml
$TEST_TARGET_SO deploy create --spec-file mainnet-eth-spec.yml --deployment-dir $DEPLOYMENT_DIR
# Start the stack
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR start
# Verify that the stack is up and running
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR ps
#TODO: add a check that the container logs show good startup
$TEST_TARGET_SO deployment --dir $DEPLOYMENT_DIR stop --delete-volumes
echo "Removing deployment directory"
rm -rf $DEPLOYMENT_DIR
echo "Removing cloned repositories"
rm -rf $CERC_REPO_BASE_DIR
exit $test_result