1
0

Compare commits

..

2 Commits

Author SHA1 Message Date
69d9ae1b66 [wip] factor out eth genesis & add plugeth loaded stack
fix geth flag

refactors genesis generation into new image
2023-06-29 00:45:51 +08:00
c0c2501307 forward more vars for debugging
forward CERC_GETH_VERBOSITY

forward CERC_STATEDIFF_DB_LOG_STATEMENTS

forward CERC_REMOTE_DEBUG
2023-06-29 00:45:35 +08:00
51 changed files with 374 additions and 1054 deletions

View File

@ -15,9 +15,8 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from .deploy_system import get_stack_status
from decouple import config from decouple import config
from .deploy import get_stack_status
from .util import _log
def get_stack(config, stack): def get_stack(config, stack):
@ -50,7 +49,7 @@ class package_registry_stack(base_stack):
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment: if url_from_environment:
if self.config.verbose: if self.config.verbose:
_log(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}") print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
self.url = url_from_environment self.url = url_from_environment
else: else:
# Otherwise we expect to use the local package-registry stack # Otherwise we expect to use the local package-registry stack
@ -59,13 +58,13 @@ class package_registry_stack(base_stack):
if registry_running: if registry_running:
# If it is available, get its mapped port and construct its URL # If it is available, get its mapped port and construct its URL
if self.config.debug: if self.config.debug:
_log("Found local package registry stack is up") print("Found local package registry stack is up")
# TODO: get url from deploy-stack # TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else: else:
# If not, print a message about how to start it and return fail to the caller # If not, print a message about how to start it and return fail to the caller
_log("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL") print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
_log("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up") print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
return False return False
return True return True

View File

@ -27,7 +27,7 @@ import subprocess
import click import click
import importlib.resources import importlib.resources
from pathlib import Path from pathlib import Path
from .util import _log, include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config
from .base import get_npm_registry_url from .base import get_npm_registry_url
# TODO: find a place for this # TODO: find a place for this
@ -56,15 +56,15 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet: if not quiet:
_log(f'Dev Root is: {dev_root_path}') print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
_log('Dev root directory doesn\'t exist, creating') print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from . import data from . import data
@ -79,9 +79,9 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
containers_in_scope = all_containers containers_in_scope = all_containers
if verbose: if verbose:
_log(f'Containers: {containers_in_scope}') print(f'Containers: {containers_in_scope}')
if stack: if stack:
_log(f"Stack: {stack}") print(f"Stack: {stack}")
# TODO: make this configurable # TODO: make this configurable
container_build_env = { container_build_env = {
@ -102,16 +102,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
def process_container(container): def process_container(container):
if not quiet: if not quiet:
_log(f"Building: {container}") print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-")) build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh") build_script_filename = os.path.join(build_dir, "build.sh")
if verbose: if verbose:
_log(f"Build script filename: {build_script_filename}") print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename): if os.path.exists(build_script_filename):
build_command = build_script_filename build_command = build_script_filename
else: else:
if verbose: if verbose:
_log(f"No script file found: {build_script_filename}, using default build script") print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1] repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere # TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir # Check if we have a repo for this container. If not, set the context dir to the container-build subdir
@ -120,23 +120,23 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run: if not dry_run:
if verbose: if verbose:
_log(f"Executing: {build_command} with environment: {container_build_env}") print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env) build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose: if verbose:
_log(f"Return code is: {build_result.returncode}") print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0: if build_result.returncode != 0:
_log(f"Error running build for {container}") print(f"Error running build for {container}")
if not continue_on_error: if not continue_on_error:
_log("FATAL Error: container build failed and --continue-on-error not set, exiting") print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1) sys.exit(1)
else: else:
_log("****** Container Build Error, continuing because --continue-on-error is set") print("****** Container Build Error, continuing because --continue-on-error is set")
else: else:
_log("Skipped") print("Skipped")
for container in containers_in_scope: for container in containers_in_scope:
if include_exclude_check(container, include, exclude): if include_exclude_check(container, include, exclude):
process_container(container) process_container(container)
else: else:
if verbose: if verbose:
_log(f"Excluding: {container}") print(f"Excluding: {container}")

View File

@ -25,6 +25,3 @@ services:
image: cerc/laconic-registry-cli:local image: cerc/laconic-registry-cli:local
volumes: volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
volumes:
laconicd-data:

View File

@ -6,13 +6,23 @@ services:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
environment: environment:
RUN_BOOTNODE: "true" RUN_BOOTNODE: "true"
image: cerc/fixturenet-plugeth-plugeth:local image: cerc/fixturenet-eth-plugeth:local
volumes: volumes:
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata - fixturenet_plugeth_bootnode_geth_data:/root/ethdata
ports: ports:
- "9898" - "9898"
- "30303" - "30303"
# Workaround: since ethdata is mounted as a volume, we can't easily add the plugin lib as part of the image.
# Instead, this copies the lib from its image into the volume before running geth.
fixturenet-plugeth-plugin:
hostname: fixturenet-plugeth-plugin
image: cerc/plugeth-statediff:local
volumes:
- fixturenet_plugeth_geth_1_data:/root/ethdata
command: >-
sh -c "mkdir -p /root/ethdata/plugins && cp /usr/local/lib/statediff.so /root/ethdata/plugins/"
fixturenet-eth-geth-1: fixturenet-eth-geth-1:
restart: always restart: always
hostname: fixturenet-eth-geth-1 hostname: fixturenet-eth-geth-1
@ -25,7 +35,7 @@ services:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-plugeth-plugeth:local image: cerc/fixturenet-eth-plugeth:local
volumes: volumes:
- fixturenet_plugeth_geth_1_data:/root/ethdata - fixturenet_plugeth_geth_1_data:/root/ethdata
healthcheck: healthcheck:
@ -35,6 +45,7 @@ services:
retries: 10 retries: 10
start_period: 3s start_period: 3s
depends_on: depends_on:
- fixturenet-plugeth-plugin
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
ports: ports:
- "8545" - "8545"
@ -54,7 +65,7 @@ services:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true" CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file: env_file:
- ../config/fixturenet-eth/fixturenet-eth.env - ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-plugeth-plugeth:local image: cerc/fixturenet-eth-plugeth:local
depends_on: depends_on:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
volumes: volumes:

View File

@ -25,7 +25,7 @@ services:
PROM_HTTP: "true" PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0" PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090" PROM_HTTP_PORT: "8090"
LOG_LEVEL: "debug" LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true} CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true}
volumes: volumes:
- type: bind - type: bind

View File

@ -1,30 +0,0 @@
services:
laconicd:
restart: no
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd/data
# TODO: look at folding these scripts into the container
- ../config/mainnet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/mainnet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/mainnet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473:9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
cli:
image: cerc/laconic-registry-cli:local
volumes:
- ../config/mainnet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
volumes:
laconicd-data:

View File

@ -23,7 +23,7 @@ services:
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "127.0.0.1:3002:80" - "0.0.0.0:3002:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s
@ -55,7 +55,7 @@ services:
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "127.0.0.1:3004:80" - "0.0.0.0:3004:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -18,7 +18,7 @@ services:
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh - ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
- peers_ids:/peers - peers_ids:/peers
ports: ports:
- "127.0.0.1:3003:80" - "0.0.0.0:3003:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "80"] test: ["CMD", "nc", "-v", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -1,35 +0,0 @@
version: "3.8"
services:
reth:
restart: unless-stopped
hostname: reth
image: cerc/reth:local
entrypoint: ["sh", "/docker-entrypoint-scripts.d/start-reth.sh"]
volumes:
- ../config/reth/start-reth.sh:/docker-entrypoint-scripts.d/start-reth.sh
- reth_data:/root/.local/share/reth
- shared_data:/root/.shared_data
ports:
- "8545:8545" # http rpc
- "8546:8546" # ws rpc
- "30303:30303" # network listening port
- "30303:30303/udp"
- "8551" # consensus auth
lighthouse:
restart: unless-stopped
hostname: lighthouse
image: cerc/lighthouse:local
entrypoint: ["sh", "/docker-entrypoint-scripts.d/start-lighthouse.sh"]
volumes:
- ../config/reth/start-lighthouse.sh:/docker-entrypoint-scripts.d/start-lighthouse.sh
- lighthouse_data:/root/.lighthouse/mainnet
- shared_data:/root/.shared_data
ports:
- "8001"
volumes:
reth_data:
lighthouse_data:
shared_data:

View File

@ -5,7 +5,7 @@ services:
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
volumes: volumes:
- test-data:/data - test-data:/var
ports: ports:
- "80" - "80"

View File

@ -14,7 +14,7 @@ services:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data - mobymask_watcher_db_data:/var/lib/postgresql/data
ports: ports:
- "127.0.0.1:15432:5432" - "0.0.0.0:15432:5432"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"] test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s interval: 20s
@ -95,9 +95,9 @@ services:
- mobymask_deployment:/server - mobymask_deployment:/server
# Expose GQL, metrics and relay node ports # Expose GQL, metrics and relay node ports
ports: ports:
- "127.0.0.1:3001:3001" - "0.0.0.0:3001:3001"
- "127.0.0.1:9001:9001" - "0.0.0.0:9001:9001"
- "127.0.0.1:9090:9090" - "0.0.0.0:9090:9090"
healthcheck: healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"] test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s interval: 20s

View File

@ -1,118 +0,0 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -1,2 +0,0 @@
#!/bin/sh
laconicd keys show mykey | grep address | cut -d ' ' -f 3

View File

@ -1,2 +0,0 @@
#!/bin/sh
echo y | laconicd keys export mykey --unarmored-hex --unsafe

View File

@ -1,9 +0,0 @@
services:
cns:
restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY
bondId:
chainId: laconic_9000-1
gas: 250000
fees: 200000aphoton

View File

@ -1,16 +0,0 @@
#!/bin/bash
# Wait for reth container to create jwt auth token
while [ ! -f /root/.shared_data/jwt.hex ]; do
echo "Jwt auth token not found, sleeping for 5s..."
sleep 5
done
echo "Jwt token found. Starting Lighthouse..."
export RUST_LOG=info
lighthouse bn \
--network mainnet \
--execution-endpoint http://reth:8551 \
--execution-jwt /root/.shared_data/jwt.hex \
--checkpoint-sync-url https://mainnet.checkpoint.sigp.io \
--disable-deposit-contract-sync

View File

@ -1,24 +0,0 @@
#!/bin/bash
# generate jwt token for reth/lighthouse authentication
echo "Installing OpenSSL..."
apt update
apt install openssl
echo "Generating jwt token for lighthouse auth..."
openssl rand -hex 32 | tr -d "\n" | tee /root/.shared_data/jwt.hex
# start reth
echo "Starting Reth..."
export RUST_LOG=info
reth node \
--authrpc.jwtsecret /root/.shared_data/jwt.hex \
--authrpc.addr 0.0.0.0 \
--authrpc.port 8551 \
--http \
--http.addr 0.0.0.0 \
--http.corsdomain * \
--http.api eth,web3,net,rpc \
--ws \
--ws.addr 0.0.0.0 \
--ws.origins * \
--ws.api eth,web3,net,rpc

View File

@ -2,20 +2,21 @@ FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a
FROM golang:1.20-alpine as builder FROM golang:1.20-alpine as builder
RUN apk add --no-cache python3 py3-pip
COPY genesis /opt/genesis COPY genesis /opt/genesis
# Install ethereum-genesis-generator tools
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/ COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/ COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
COPY --from=ethgen /apps /apps COPY --from=ethgen /apps /apps
# Install ethereum-genesis-generator
RUN apk add --no-cache python3 py3-pip
RUN cd /apps/el-gen && pip3 install -r requirements.txt RUN cd /apps/el-gen && pip3 install -r requirements.txt
# web3==5.24.0 used by el-gen is broken on python 3.11 # web3==5.24.0 is broken on python 3.11
RUN pip3 install --upgrade "web3==6.5.0" RUN pip3 install --upgrade "web3==6.5.0"
# Build genesis config # Build genesis config
RUN apk add --no-cache make bash envsubst jq RUN apk add --no-cache bash envsubst jq
RUN apk add --no-cache make
RUN cd /opt/genesis && make genesis-el RUN cd /opt/genesis && make genesis-el
# Snag the genesis block info. # Snag the genesis block info.

View File

@ -7,7 +7,8 @@ RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM alpine:3.17 FROM alpine:3.17
RUN apk add --no-cache bash wget python3 bind-tools postgresql-client RUN apk add --no-cache bash wget python3
RUN apk add --no-cache bind-tools postgresql-client
COPY run-el.sh /opt/testnet/run.sh COPY run-el.sh /opt/testnet/run.sh

View File

@ -7,12 +7,11 @@ fi
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'` NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
HOME_DIR=`pwd`
cd /opt/testnet/build/el cd /opt/testnet/build/el
python3 -m http.server 9898 & python3 -m http.server 9898 &
cd $HOME cd $HOME_DIR
START_CMD="geth" START_CMD="geth"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
@ -35,7 +34,7 @@ trap 'cleanup' SIGINT SIGTERM
if [ "true" == "$RUN_BOOTNODE" ]; then if [ "true" == "$RUN_BOOTNODE" ]; then
$START_CMD \ $START_CMD \
--datadir="${CERC_ETH_DATADIR}" \ --datadir=~/ethdata \
--nodekeyhex="${BOOTNODE_KEY}" \ --nodekeyhex="${BOOTNODE_KEY}" \
--nodiscover \ --nodiscover \
--ipcdisable \ --ipcdisable \
@ -83,7 +82,7 @@ else
fi fi
fi fi
done done
STATEDIFF_OPTS="--statediff \ STATEDIFF_OPTS="--statediff=true \
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \ --statediff.db.host=$CERC_STATEDIFF_DB_HOST \
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \ --statediff.db.name=$CERC_STATEDIFF_DB_NAME \
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
@ -95,15 +94,10 @@ else
--statediff.waitforsync=true \ --statediff.waitforsync=true \
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \ --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
--statediff.writing=true" --statediff.writing=true"
if [ -d "${CERC_PLUGINS_DIR}" ]; then
# With plugeth, we separate the statediff options by prefixing with ' -- '
STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
fi
fi fi
$START_CMD \ $START_CMD \
--datadir="${CERC_ETH_DATADIR}" \ --datadir=~/ethdata \
--bootnodes="${ENODE}" \ --bootnodes="${ENODE}" \
--allow-insecure-unlock \ --allow-insecure-unlock \
--http \ --http \
@ -131,8 +125,7 @@ else
--metrics.addr="0.0.0.0" \ --metrics.addr="0.0.0.0" \
--verbosity=${CERC_GETH_VERBOSITY:-3} \ --verbosity=${CERC_GETH_VERBOSITY:-3} \
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ --log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
--miner.etherbase="${ETHERBASE}" \ --miner.etherbase="${ETHERBASE}" ${STATEDIFF_OPTS} \
${STATEDIFF_OPTS} \
& &
geth_pid=$! geth_pid=$!

View File

@ -1,25 +1,20 @@
FROM cerc/fixturenet-eth-genesis:local as fnetgen FROM cerc/fixturenet-eth-genesis:local as fnetgen
# FIXME: DEV - patched build of plugeth
FROM cerc/plugeth:local as geth
# Using the same golang image as used to build geth: https://github.com/cerc-io/go-ethereum/blob/HEAD/Dockerfile # Using the same golang image as used to build geth: https://github.com/cerc-io/go-ethereum/blob/HEAD/Dockerfile
FROM golang:1.20-alpine as delve FROM golang:1.20-alpine as delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM cerc/plugeth-statediff:local as statediff
# FIXME: fork of plugeth, use stock after upstreaming patches
FROM cerc/plugeth:local as geth
FROM alpine:3.17 FROM alpine:3.17
RUN apk add --no-cache bash wget python3 bind-tools postgresql-client RUN apk add --no-cache bash wget python3
COPY run-el.sh /opt/testnet/run.sh COPY run-el.sh /opt/testnet/run.sh
COPY --from=delve /go/bin/dlv /usr/local/bin/ COPY --from=delve /go/bin/dlv /usr/local/bin/
COPY --from=geth /usr/local/bin/geth /usr/local/bin/ COPY --from=geth /usr/local/bin/geth /usr/local/bin/
COPY --from=fnetgen /opt/genesis /opt/testnet COPY --from=fnetgen /opt/genesis /opt/testnet
COPY --from=statediff /usr/local/lib/statediff.so /usr/local/lib/plugeth/
# Initialize the geth db with our config
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
ENTRYPOINT ["/opt/testnet/run.sh"] ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
# Build cerc/fixturenet-eth-plugeth
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/fixturenet-eth-plugeth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR

View File

@ -0,0 +1,141 @@
#!/bin/bash
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
HOME_DIR=`pwd`
cd /opt/testnet/build/el
python3 -m http.server 9898 &
cd $HOME_DIR
START_CMD="geth"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
fi
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
cleanup() {
echo "Signal received, cleaning up..."
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
pkill -P ${geth_pid}
sleep 2
kill $(jobs -p)
wait
echo "Done"
}
trap 'cleanup' SIGINT SIGTERM
if [ "true" == "$RUN_BOOTNODE" ]; then
$START_CMD \
--datadir=~/ethdata \
--nodekeyhex="${BOOTNODE_KEY}" \
--nodiscover \
--ipcdisable \
--networkid=${NETWORK_ID} \
--netrestrict="${NETRESTRICT}" \
&
geth_pid=$!
else
cd /opt/testnet/accounts
./import_keys.sh
echo -n "$JWT" > /opt/testnet/build/el/jwtsecret
if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then
dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short)
dig_status_code=$?
if [[ $dig_status_code = 0 && -n $dig_result ]]; then
echo "Statediff DB at $CERC_STATEDIFF_DB_HOST"
CERC_RUN_STATEDIFF="true"
else
echo "No statediff DB available."
CERC_RUN_STATEDIFF="false"
fi
fi
STATEDIFF_OPTS=""
if [ "$CERC_RUN_STATEDIFF" == "true" ]; then
ready=0
echo "Waiting for statediff DB..."
while [ $ready -eq 0 ]; do
sleep 1
export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
-p "$CERC_STATEDIFF_DB_PORT" \
-U "$CERC_STATEDIFF_DB_USER" \
-d "$CERC_STATEDIFF_DB_NAME" \
-t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
if [ -n "$result" ]; then
echo "DB ready..."
if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
ready=1
else
echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
fi
fi
done
STATEDIFF_OPTS="--statediff \
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
--statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \
--statediff.db.port=$CERC_STATEDIFF_DB_PORT \
--statediff.db.user=$CERC_STATEDIFF_DB_USER \
--statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
--statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
--statediff.waitforsync=true \
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
--statediff.writing=true"
fi
$START_CMD \
--datadir=~/ethdata \
--bootnodes="${ENODE}" \
--allow-insecure-unlock \
--http \
--http.addr="0.0.0.0" \
--http.vhosts="*" \
--http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
--http.corsdomain="*" \
--authrpc.addr="0.0.0.0" \
--authrpc.vhosts="*" \
--authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \
--ws \
--ws.addr="0.0.0.0" \
--ws.origins="*" \
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
--http.corsdomain="*" \
--networkid="${NETWORK_ID}" \
--netrestrict="${NETRESTRICT}" \
--gcmode archive \
--txlookuplimit=0 \
--cache.preimages \
--syncmode=full \
--mine \
--miner.threads=1 \
--metrics \
--metrics.addr="0.0.0.0" \
--verbosity=${CERC_GETH_VERBOSITY:-3} \
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
--miner.etherbase="${ETHERBASE}" \
-- ${STATEDIFF_OPTS} \
&
geth_pid=$!
fi
wait $geth_pid
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
while [ 1 -eq 1 ]; do
sleep 60
done
fi

View File

@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Build cerc/fixturenet-plugeth-plugeth
set -x
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
if [ ! -e "${SCRIPT_DIR}/run-el.sh" ]; then
cp -fp ${SCRIPT_DIR}/../cerc-fixturenet-eth-geth/run-el.sh ${SCRIPT_DIR}/
fi
docker build -t cerc/fixturenet-plugeth-plugeth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR

View File

@ -2,6 +2,10 @@
# Build cerc/go-opera # Build cerc/go-opera
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# Checkout appropriate release; refer to https://docs.fantom.foundation/
OPERA_TAG=${OPERA_TAG:-release/1.1.2-rc.5}
git -C ${CERC_REPO_BASE_DIR}/go-opera checkout ${OPERA_TAG}
# Repo's dockerfile gives build error because it's hardcoded for go 1.17; go 1.19 is required # Repo's dockerfile gives build error because it's hardcoded for go 1.17; go 1.19 is required
sed -i 's/FROM golang:1\.[0-9]*-alpine as builder/FROM golang:1.19-alpine as builder/' ${CERC_REPO_BASE_DIR}/go-opera/docker/Dockerfile.opera sed -i 's/FROM golang:1\.[0-9]*-alpine as builder/FROM golang:1.19-alpine as builder/' ${CERC_REPO_BASE_DIR}/go-opera/docker/Dockerfile.opera

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
# Build cerc/plugeth-statediff
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/plugeth-statediff:local ${build_command_args} ${CERC_REPO_BASE_DIR}/plugeth-statediff

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
# Build cerc/plugeth
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/plugeth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/plugeth

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
# Build cerc/go-opera
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/reth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/reth

View File

@ -4,7 +4,7 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
# Test if the container's filesystem is old (run previously) or new # Test if the container's filesystem is old (run previously) or new
EXISTSFILENAME=/data/exists EXISTSFILENAME=/var/exists
echo "Test container starting" echo "Test container starting"
if [[ -f "$EXISTSFILENAME" ]]; if [[ -f "$EXISTSFILENAME" ]];
then then

View File

@ -43,4 +43,3 @@ cerc/watcher-gelato
cerc/lotus cerc/lotus
cerc/go-opera cerc/go-opera
cerc/lasso cerc/lasso
cerc/reth

View File

@ -5,6 +5,7 @@ go-ethereum-foundry
ipld-eth-beacon-db ipld-eth-beacon-db
ipld-eth-beacon-indexer ipld-eth-beacon-indexer
ipld-eth-server ipld-eth-server
lighthouse
laconicd laconicd
fixturenet-laconicd fixturenet-laconicd
fixturenet-eth fixturenet-eth
@ -29,4 +30,3 @@ watcher-gelato
fixturenet-lotus fixturenet-lotus
mainnet-go-opera mainnet-go-opera
lasso lasso
reth

View File

@ -36,6 +36,3 @@ github.com/filecoin-project/lotus
git.vdb.to/cerc-io/test-project git.vdb.to/cerc-io/test-project
github.com/Fantom-foundation/go-opera github.com/Fantom-foundation/go-opera
github.com/cerc-io/lasso github.com/cerc-io/lasso
github.com/paradigmxyz/reth
git.vdb.to/cerc-io/plugeth
git.vdb.to/cerc-io/plugeth-statediff

View File

@ -0,0 +1,6 @@
# fixturenet-plugeth-loaded
A "loaded" version of `fixturenet-eth` that uses PluGeth instead of go-ethereum,
with all the bells and whistles enabled.
See `stacks/fixturenet-eth/README.md` for more information.

View File

@ -0,0 +1,29 @@
version: "1.0"
name: fixturenet-plugeth-loaded
decription: "Loaded Plugeth-based Ethereum Fixturenet"
repos:
- git.vdb.to/cerc-io/plugeth # fixme: dev
- git.vdb.to/cerc-io/plugeth-statediff
- github.com/cerc-io/tx-spammer
- github.com/cerc-io/ipld-eth-server
- github.com/cerc-io/ipld-eth-db
- github.com/cerc-io/lighthouse
containers:
- cerc/plugeth
- cerc/plugeth-statediff
- cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-genesis
- cerc/fixturenet-eth-plugeth
- cerc/fixturenet-eth-lighthouse
- cerc/ipld-eth-server
- cerc/ipld-eth-db
- cerc/keycloak
- cerc/tx-spammer
pods:
- fixturenet-plugeth
- tx-spammer
- fixturenet-eth-metrics
- keycloak
- ipld-eth-server
- ipld-eth-db

View File

@ -5,25 +5,15 @@ repos:
- github.com/cerc-io/tx-spammer - github.com/cerc-io/tx-spammer
- github.com/dboreham/foundry - github.com/dboreham/foundry
- github.com/cerc-io/lighthouse - github.com/cerc-io/lighthouse
- github.com/cerc-io/ipld-eth-db@v5
- github.com/cerc-io/ipld-eth-server@v5
- git.vdb.to/cerc-io/plugeth@statediff-wip
- git.vdb.to/cerc-io/plugeth-statediff@wip
containers: containers:
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli - cerc/lighthouse-cli
- cerc/plugeth-statediff
- cerc/plugeth
- cerc/fixturenet-eth-genesis - cerc/fixturenet-eth-genesis
- cerc/fixturenet-plugeth-plugeth - cerc/fixturenet-eth-plugeth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/tx-spammer - cerc/tx-spammer
- cerc/foundry - cerc/foundry
- cerc/ipld-eth-db
- cerc/ipld-eth-server
pods: pods:
- ipld-eth-db
- ipld-eth-server
- fixturenet-plugeth - fixturenet-plugeth
- foundry - foundry
- tx-spammer - tx-spammer

View File

@ -2,7 +2,7 @@ version: "1.1"
name: mainnet-opera name: mainnet-opera
decription: "Fantom mainnet node" decription: "Fantom mainnet node"
repos: repos:
- github.com/Fantom-foundation/go-opera@release/1.1.2-rc.5 - github.com/Fantom-foundation/go-opera
containers: containers:
- cerc/go-opera - cerc/go-opera
pods: pods:

View File

@ -1,2 +0,0 @@
# Laconic Mainnet Deployment (experimental)

View File

@ -1,57 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from shutil import copyfile
import sys
from .util import get_stack_config_filename, get_parsed_deployment_spec
default_spec_file_content = """stack: mainnet-laconic
data_dir: /my/path
node_name: my-node-name
"""
def make_default_deployment_dir():
return "deployment-001"
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
with open(output, "w") as output_file:
output_file.write(default_spec_file_content)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
if ctx.debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
stack_file = get_stack_config_filename(parsed_spec.stack)
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))

View File

@ -1,31 +0,0 @@
version: "1.0"
name: mainnet-laconic
description: "Mainnet laconic node"
repos:
- cerc-io/laconicd
- lirewine/debug
- lirewine/crypto
- lirewine/gem
- lirewine/sdk
- cerc-io/laconic-sdk
- cerc-io/laconic-registry-cli
- cerc-io/laconic-console
npms:
- laconic-sdk
- laconic-registry-cli
- debug
- crypto
- sdk
- gem
- laconic-console
containers:
- cerc/laconicd
- cerc/laconic-registry-cli
- cerc/laconic-console-host
pods:
- mainnet-laconicd
- fixturenet-laconic-console
config:
cli:
key: laconicd.mykey
address: laconicd.myaddress

View File

@ -1,48 +0,0 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Dump environment variables for debugging
echo "Environment variables:"
env
# Test laconic stack
echo "Running laconic stack test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
rm -rf $CERC_REPO_BASE_DIR
mkdir -p $CERC_REPO_BASE_DIR
# Test bringing the test container up and down
# with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers
$TEST_TARGET_SO --stack test deploy up
$TEST_TARGET_SO --stack test deploy down
# The next time we bring the container up the volume will be old (from the previous run above)
$TEST_TARGET_SO --stack test deploy up
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
echo "Retain volumes test: passed"
else
echo "Retain volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
# Now when we bring the container up the volume will be new again
$TEST_TARGET_SO --stack test deploy up
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
echo "Delete volumes test: passed"
else
echo "Delete volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
echo "Test passed"

View File

@ -110,8 +110,8 @@ To list down and monitor the running containers:
# Expected output: # Expected output:
# Running containers: # Running containers:
# id: 25cc3a1cbda27fcd9c2ad4c772bd753ccef1e178f901a70e6ff4191d4a8684e9, name: mobymask_v2-mobymask-watcher-db-1, ports: 127.0.0.1:15432->5432/tcp # id: 25cc3a1cbda27fcd9c2ad4c772bd753ccef1e178f901a70e6ff4191d4a8684e9, name: mobymask_v2-mobymask-watcher-db-1, ports: 0.0.0.0:15432->5432/tcp
# id: c9806f78680d68292ffe942222af2003aa3ed5d5c69d7121b573f5028444391d, name: mobymask_v2-mobymask-watcher-server-1, ports: 127.0.0.1:3001->3001/tcp, 127.0.0.1:9001->9001/tcp, 127.0.0.1:9090->9090/tcp # id: c9806f78680d68292ffe942222af2003aa3ed5d5c69d7121b573f5028444391d, name: mobymask_v2-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp
# id: 6b30a1d313a88fb86f8a3b37a1b1a3bc053f238664e4b2d196c3ec74e04faf13, name: mobymask_v2-peer-tests-1, ports: # id: 6b30a1d313a88fb86f8a3b37a1b1a3bc053f238664e4b2d196c3ec74e04faf13, name: mobymask_v2-peer-tests-1, ports:
@ -122,8 +122,8 @@ To list down and monitor the running containers:
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# 6b30a1d313a8 cerc/watcher-ts:local "docker-entrypoint.s…" 5 minutes ago Up 4 minutes mobymask_v2-peer-tests-1 # 6b30a1d313a8 cerc/watcher-ts:local "docker-entrypoint.s…" 5 minutes ago Up 4 minutes mobymask_v2-peer-tests-1
# c9806f78680d cerc/watcher-mobymask-v2:local "sh start-server.sh" 5 minutes ago Up 5 minutes (healthy) 127.0.0.1:3001->3001/tcp, 127.0.0.1:9001->9001/tcp, 127.0.0.1:9090->9090/tcp mobymask_v2-mobymask-watcher-server-1 # c9806f78680d cerc/watcher-mobymask-v2:local "sh start-server.sh" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp mobymask_v2-mobymask-watcher-server-1
# 25cc3a1cbda2 postgres:14-alpine "docker-entrypoint.s…" 5 minutes ago Up 5 minutes (healthy) 127.0.0.1:15432->5432/tcp mobymask_v2-mobymask-watcher-db-1 # 25cc3a1cbda2 postgres:14-alpine "docker-entrypoint.s…" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:15432->5432/tcp mobymask_v2-mobymask-watcher-db-1
# Check logs for a container # Check logs for a container

View File

@ -78,7 +78,7 @@ To monitor the running container:
# Expected output: # Expected output:
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 127.0.0.1:3004->80/tcp mm_v2-lxdao-mobymask-app-1 # f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3004->80/tcp mm_v2-lxdao-mobymask-app-1
# Check logs for a container # Check logs for a container
docker logs -f mm_v2-lxdao-mobymask-app-1 docker logs -f mm_v2-lxdao-mobymask-app-1

View File

@ -1,65 +0,0 @@
# Reth
Deploy a Reth API node alongside Lighthouse.
## Clone required repositories
```
$ laconic-so --stack reth setup-repositories
```
## Build the Reth stack containers
```
$ laconic-so --stack reth build-containers
```
## Deploy the stack
```
$ laconic-so --stack reth deploy up
```
## Check logs
```
$ laconic-so --stack reth deploy logs
```
Verify that your node is syncing. You should see entries similar to this from the Lighthouse container:
```
laconic-200e8f8ff7891515d777cd0f719078e3-lighthouse-1 | Jun 23 20:59:01.226 INFO New block received root: 0x9cd4a2dd9333cf802c2963c2f029deb0f94e511d2481fa0724ae8752e4c49b15, slot: 6727493
```
and entries similar to this from the Reth container:
```
laconic-200e8f8ff7891515d777cd0f719078e3-reth-1 | 2023-06-23T20:59:11.557389Z INFO reth::node::events: Stage committed progress pipeline_stages=1/13 stage=Headers block=0 checkpoint=4.9% eta=1h 3m 57s
```
## Test the API
Reth's http api is accessible on port `8545` and the websocket api is accessible on port `8546`.
```
$ curl --request POST \
--url http://localhost:8545/ \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc": "2.0",
"method": "eth_blockNumber",
"params": [],
"id": 0
}'
# Response
{"jsonrpc":"2.0","result":"0x0","id":0}
```
## Clean up
Stop all services running in the background:
```bash
$ laconic-so --stack reth deploy down
```
To also delete the docker data volumes:
```bash
$ laconic-so --stack reth deploy down --delete-volumes

View File

@ -1,10 +0,0 @@
version: "1.1"
name: reth
decription: "Reth node"
repos:
- github.com/paradigmxyz/reth
containers:
- cerc/reth
- cerc/lighthouse
pods:
- reth

View File

@ -21,15 +21,12 @@ import os
import sys import sys
from dataclasses import dataclass from dataclasses import dataclass
from decouple import config from decouple import config
from importlib import resources
import subprocess import subprocess
from python_on_whales import DockerClient, DockerException from python_on_whales import DockerClient, DockerException
import click import click
import importlib.resources
from pathlib import Path from pathlib import Path
from .util import _log, include_exclude_check, get_parsed_stack_config, global_options2 from .util import include_exclude_check, get_parsed_stack_config
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init
class DeployCommandContext(object): class DeployCommandContext(object):
def __init__(self, cluster_context, docker): def __init__(self, cluster_context, docker):
@ -46,43 +43,47 @@ class DeployCommandContext(object):
def command(ctx, include, exclude, env_file, cluster): def command(ctx, include, exclude, env_file, cluster):
'''deploy a stack''' '''deploy a stack'''
if ctx.parent.obj.debug: cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file)
print(f"ctx.parent.obj: {ctx.parent.obj}")
ctx.obj = create_deploy_context(global_options2(ctx), global_options2(ctx).stack, include, exclude, cluster, env_file)
# Subcommand is executed now, by the magic of click
def create_deploy_context(global_context, stack, include, exclude, cluster, env_file):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file) compose_env_file=cluster_context.env_file)
return DeployCommandContext(cluster_context, docker)
ctx.obj = DeployCommandContext(cluster_context, docker)
# Subcommand is executed now, by the magic of click
def up_operation(ctx, services_list): @command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
deploy_context = ctx.obj extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
cluster_context = deploy_context.cluster_context cluster_context = ctx.obj.cluster_context
container_exec_env = _make_runtime_env(global_context) container_exec_env = _make_runtime_env(global_context)
for attr, value in container_exec_env.items(): for attr, value in container_exec_env.items():
os.environ[attr] = value os.environ[attr] = value
if global_context.verbose: if global_context.verbose:
_log(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
for pre_start_command in cluster_context.pre_start_commands: for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command) _run_command(global_context, cluster_context.cluster, pre_start_command)
deploy_context.docker.compose.up(detach=True, services=services_list) ctx.obj.docker.compose.up(detach=True, services=extra_args_list)
for post_start_command in cluster_context.post_start_commands: for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command) _run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env) _orchestrate_cluster_config(global_context, cluster_context.config, ctx.obj.docker, container_exec_env)
def down_operation(ctx, delete_volumes, extra_args_list): @command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
_log("Running compose down") print("Running compose down")
timeout_arg = None timeout_arg = None
if extra_args_list: if extra_args_list:
timeout_arg = extra_args_list[0] timeout_arg = extra_args_list[0]
@ -90,11 +91,13 @@ def down_operation(ctx, delete_volumes, extra_args_list):
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes) ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
def ps_operation(ctx): @command.command()
@click.pass_context
def ps(ctx):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
_log("Running compose ps") print("Running compose ps")
container_list = ctx.obj.docker.compose.ps() container_list = ctx.obj.docker.compose.ps()
if len(container_list) > 0: if len(container_list) > 0:
print("Running containers:") print("Running containers:")
@ -115,91 +118,56 @@ def ps_operation(ctx):
print("No containers running") print("No containers running")
def port_operation(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
_log("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if global_context.verbose:
_log(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
def exec_operation(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
_log("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(global_context)
if global_context.verbose:
_log(f"Running compose exec {service_name} {command_to_exec}")
try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
_log(f"container command returned error exit status")
def logs_operation(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
_log("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
extra_args_list = list(extra_args) or None
up_operation(ctx, extra_args_list)
@command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
extra_args_list = list(extra_args) or None
down_operation(ctx, delete_volumes, extra_args_list)
@command.command()
@click.pass_context
def ps(ctx):
ps_operation(ctx)
@command.command() @command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2> @click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context @click.pass_context
def port(ctx, extra_args): def port(ctx, extra_args):
port_operation(ctx, extra_args) global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@command.command() @command.command()
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command> @click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context @click.pass_context
def exec(ctx, extra_args): def exec(ctx, extra_args):
exec_operation(ctx, extra_args) global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(global_context)
if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
print(f"container command returned error exit status")
@command.command() @command.command()
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2> @click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context @click.pass_context
def logs(ctx, extra_args): def logs(ctx, extra_args):
logs_operation(ctx, extra_args) global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
def get_stack_status(ctx, stack): def get_stack_status(ctx, stack):
@ -207,19 +175,19 @@ def get_stack_status(ctx, stack):
ctx_copy = copy.copy(ctx) ctx_copy = copy.copy(ctx)
ctx_copy.stack = stack ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None) cluster_context = _make_cluster_context(ctx_copy, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above # TODO: refactor to avoid duplicating this code above
if ctx.verbose: if ctx.verbose:
_log("Running compose ps") print("Running compose ps")
container_list = docker.compose.ps() container_list = docker.compose.ps()
if len(container_list) > 0: if len(container_list) > 0:
if ctx.debug: if ctx.debug:
_log(f"Container list from compose ps: {container_list}") print(f"Container list from compose ps: {container_list}")
return True return True
else: else:
if ctx.debug: if ctx.debug:
_log("No containers found from compose ps") print("No containers found from compose ps")
False False
@ -232,44 +200,37 @@ def _make_runtime_env(ctx):
return container_exec_env return container_exec_env
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack def _make_cluster_context(ctx, include, exclude, cluster, env_file):
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if ctx.local_stack: if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
# TODO: huge hack, fix this # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
# If the caller passed a path for the stack file, then we know that we can get the compose files compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
# from the same directory
if isinstance(stack, os.PathLike):
compose_dir = stack.parent.joinpath("compose")
else:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
if cluster is None: if cluster is None:
# Create default unique, stable cluster name from confile file path and stack name if provided # Create default unique, stable cluster name from confile file path and stack name if provided
# TODO: change this to the config file path # TODO: change this to the config file path
path = os.path.realpath(sys.argv[0]) path = os.path.realpath(sys.argv[0])
unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" unique_cluster_descriptor = f"{path},{ctx.stack},{include},{exclude}"
if ctx.debug: if ctx.debug:
_log(f"pre-hash descriptor: {unique_cluster_descriptor}") print(f"pre-hash descriptor: {unique_cluster_descriptor}")
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
cluster = f"laconic-{hash}" cluster = f"laconic-{hash}"
if ctx.verbose: if ctx.verbose:
_log(f"Using cluster name: {cluster}") print(f"Using cluster name: {cluster}")
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from . import data from . import data
with resources.open_text(data, "pod-list.txt") as pod_list_file: with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
all_pods = pod_list_file.read().splitlines() all_pods = pod_list_file.read().splitlines()
pods_in_scope = [] pods_in_scope = []
if stack: if ctx.stack:
stack_config = get_parsed_stack_config(stack) stack_config = get_parsed_stack_config(ctx.stack)
# TODO: syntax check the input here # TODO: syntax check the input here
pods_in_scope = stack_config['pods'] pods_in_scope = stack_config['pods']
cluster_config = stack_config['config'] if 'config' in stack_config else None cluster_config = stack_config['config'] if 'config' in stack_config else None
@ -281,7 +242,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
pods_in_scope = _convert_to_new_format(pods_in_scope) pods_in_scope = _convert_to_new_format(pods_in_scope)
if ctx.verbose: if ctx.verbose:
_log(f"Pods: {pods_in_scope}") print(f"Pods: {pods_in_scope}")
# Construct a docker compose command suitable for our purpose # Construct a docker compose command suitable for our purpose
@ -307,10 +268,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
compose_files.append(compose_file_name) compose_files.append(compose_file_name)
else: else:
if ctx.verbose: if ctx.verbose:
_log(f"Excluding: {pod_name}") print(f"Excluding: {pod_name}")
if ctx.verbose: if ctx.verbose:
_log(f"files: {compose_files}") print(f"files: {compose_files}")
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file) return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
@ -342,7 +303,7 @@ def _convert_to_new_format(old_pod_array):
def _run_command(ctx, cluster_name, command): def _run_command(ctx, cluster_name, command):
if ctx.verbose: if ctx.verbose:
_log(f"Running command: {command}") print(f"Running command: {command}")
command_dir = os.path.dirname(command) command_dir = os.path.dirname(command)
command_file = os.path.join(".", os.path.basename(command)) command_file = os.path.join(".", os.path.basename(command))
command_env = os.environ.copy() command_env = os.environ.copy()
@ -351,7 +312,7 @@ def _run_command(ctx, cluster_name, command):
command_env["CERC_SCRIPT_DEBUG"] = "true" command_env["CERC_SCRIPT_DEBUG"] = "true"
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir) command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
if command_result.returncode != 0: if command_result.returncode != 0:
_log(f"FATAL Error running command: {command}") print(f"FATAL Error running command: {command}")
sys.exit(1) sys.exit(1)
@ -368,7 +329,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
for container in cluster_config: for container in cluster_config:
container_config = cluster_config[container] container_config = cluster_config[container]
if ctx.verbose: if ctx.verbose:
_log(f"{container} config: {container_config}") print(f"{container} config: {container_config}")
for directive in container_config: for directive in container_config:
pd = ConfigDirective( pd = ConfigDirective(
container_config[directive].split(".")[0], container_config[directive].split(".")[0],
@ -377,11 +338,10 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
directive directive
) )
if ctx.verbose: if ctx.verbose:
_log(f"Setting {pd.destination_container}.{pd.destination_variable}" print(f"Setting {pd.destination_container}.{pd.destination_variable}"
f" = {pd.source_container}.{pd.source_variable}") f" = {pd.source_container}.{pd.source_variable}")
# TODO: add a timeout # TODO: add a timeout
waiting_for_data = True waiting_for_data = True
destination_output = "*** no output received yet ***"
while waiting_for_data: while waiting_for_data:
# TODO: fix the script paths so they're consistent between containers # TODO: fix the script paths so they're consistent between containers
source_value = None source_value = None
@ -394,19 +354,19 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
envs=container_exec_env) envs=container_exec_env)
except DockerException as error: except DockerException as error:
if ctx.debug: if ctx.debug:
_log(f"Docker exception reading config source: {error}") print(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get: # If the script executed failed for some reason, we get:
# "It returned with code 1" # "It returned with code 1"
if "It returned with code 1" in str(error): if "It returned with code 1" in str(error):
if ctx.verbose: if ctx.verbose:
_log("Config export script returned an error, re-trying") print("Config export script returned an error, re-trying")
# If the script failed to execute (e.g. the file is not there) then we get: # If the script failed to execute (e.g. the file is not there) then we get:
# "It returned with code 2" # "It returned with code 2"
if "It returned with code 2" in str(error): if "It returned with code 2" in str(error):
_log(f"Fatal error reading config source: {error}") print(f"Fatal error reading config source: {error}")
if source_value: if source_value:
if ctx.debug: if ctx.debug:
_log(f"fetched source value: {source_value}") print(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container, destination_output = docker.compose.execute(pd.destination_container,
["sh", "-c", ["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh" f"sh /scripts/import-{pd.destination_variable}.sh"
@ -415,8 +375,4 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
envs=container_exec_env) envs=container_exec_env)
waiting_for_data = False waiting_for_data = False
if ctx.debug: if ctx.debug:
_log(f"destination output: {destination_output}") print(f"destination output: {destination_output}")
command.add_command(deployment_init)
command.add_command(deployment_create)

View File

@ -1,140 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from pathlib import Path
import sys
from .deploy import up_operation, down_operation, ps_operation, port_operation, exec_operation, logs_operation, create_deploy_context
from .util import global_options
@dataclass
class DeploymentContext:
dir: Path
@click.group()
@click.option("--dir", required=True, help="path to deployment directory")
@click.pass_context
def command(ctx, dir):
# Check that --stack wasn't supplied
if ctx.parent.obj.stack:
print("Error: --stack can't be supplied with the deployment command")
sys.exit(1)
# Check dir is valid
dir_path = Path(dir)
if not dir_path.exists():
print(f"Error: deployment directory {dir} does not exist")
sys.exit(1)
if not dir_path.is_dir():
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
sys.exit(1)
# Store the deployment context for subcommands
ctx.obj = DeploymentContext(dir_path)
def make_deploy_context(ctx):
# Get the stack config file name
stack_file_path = ctx.obj.dir.joinpath("stack.yml")
# TODO: add cluster name and env file here
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, None, None)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
# start is the preferred alias for up
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def start(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
@click.pass_context
def down(ctx, extra_args):
# Get the stack config file name
# TODO: add cluster name and env file here
ctx.obj = make_deploy_context(ctx)
down_operation(ctx, extra_args, None)
# stop is the preferred alias for down
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
@click.pass_context
def stop(ctx, extra_args):
# TODO: add cluster name and env file here
ctx.obj = make_deploy_context(ctx)
down_operation(ctx, extra_args, None)
@command.command()
@click.pass_context
def ps(ctx):
ctx.obj = make_deploy_context(ctx)
ps_operation(ctx)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context
def port(ctx, extra_args):
port_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context
def exec(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
exec_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
logs_operation(ctx, extra_args)
@command.command()
@click.pass_context
def status(ctx):
print(f"Context: {ctx.parent.obj}")
#from importlib import resources, util
# TODO: figure out how to do this dynamically
#stack = "mainnet-laconic"
#module_name = "commands"
#spec = util.spec_from_file_location(module_name, "./app/data/stacks/" + stack + "/deploy/commands.py")
#imported_stack = util.module_from_spec(spec)
#spec.loader.exec_module(imported_stack)
#command.add_command(imported_stack.init)
#command.add_command(imported_stack.create)

View File

@ -1,155 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from pathlib import Path
from shutil import copyfile, copytree
import sys
import ruamel.yaml
from .util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options
def _get_yaml():
# See: https://stackoverflow.com/a/45701840/1701505
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.indent(sequence=3, offset=1)
return yaml
def _make_default_deployment_dir():
return "deployment-001"
def _get_compose_file_dir():
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
data_dir = Path(__file__).absolute().parent.joinpath("data")
source_compose_dir = data_dir.joinpath("compose")
return source_compose_dir
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
named_volumes = []
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = _get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
for volume in volumes.keys():
# Volume definition looks like:
# 'laconicd-data': None
named_volumes.append(volume)
return named_volumes
# If we're mounting a volume from a relatie path, then we
# assume the directory doesn't exist yet and create it
# so the deployment will start
# Also warn if the path is absolute and doesn't exist
def _create_bind_dir_if_relative(volume, path_string, compose_dir):
path = Path(path_string)
if not path.is_absolute():
absolute_path = Path(compose_dir).parent.joinpath(path)
absolute_path.mkdir(parents=True, exist_ok=True)
else:
if not path.exists():
print(f"WARNING: mount path for volume {volume} does not exist: {path_string}")
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod, spec, compose_dir):
# Fix up volumes
if "volumes" in spec:
spec_volumes = spec["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
"type": "none",
"device": volume_spec_fixedup,
"o": "bind"
}
}
pod["volumes"][volume] = new_volume_spec
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
yaml = _get_yaml()
stack = global_options(ctx).stack
verbose = global_options(ctx).verbose
spec_file_content = {"stack": stack}
if verbose:
print(f"Creating spec file for stack: {stack}")
named_volumes = _get_named_volumes(stack)
if named_volumes:
volume_descriptors = {}
for named_volume in named_volumes:
volume_descriptors[named_volume] = f"../data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_name = parsed_spec['stack']
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = _make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy the pod files into the deployment dir, fixing up content
pods = parsed_stack['pods']
destination_compose_dir = os.path.join(deployment_dir, "compose")
os.mkdir(destination_compose_dir)
data_dir = Path(__file__).absolute().parent.joinpath("data")
yaml = _get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open(os.path.join(destination_compose_dir, os.path.basename(pod_file_path)), "w") as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any
source_config_dir = data_dir.joinpath("config", pod)
if os.path.exists(source_config_dir):
copytree(source_config_dir, os.path.join(deployment_dir, "config", pod))

View File

@ -25,7 +25,7 @@ import click
import importlib.resources import importlib.resources
from pathlib import Path from pathlib import Path
import yaml import yaml
from .util import _log, include_exclude_check from .util import include_exclude_check
class GitProgress(git.RemoteProgress): class GitProgress(git.RemoteProgress):
@ -88,7 +88,7 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
# TODO: fix the messy arg list here # TODO: fix the messy arg list here
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo): def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
if verbose: if verbose:
_log(f"Processing repo: {fully_qualified_repo}") print(f"Processing repo: {fully_qualified_repo}")
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo) repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:" git_ssh_prefix = f"git@{repo_host}:"
git_http_prefix = f"https://{repo_host}/" git_http_prefix = f"https://{repo_host}/"
@ -100,40 +100,40 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
if not quiet: if not quiet:
present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \ present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
else 'Needs to be fetched' else 'Needs to be fetched'
_log(f"Checking: {full_filesystem_repo_path}: {present_text}") print(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo # Quick check that it's actually a repo
if is_present: if is_present:
if not is_git_repo(full_filesystem_repo_path): if not is_git_repo(full_filesystem_repo_path):
_log(f"Error: {full_filesystem_repo_path} does not contain a valid git repository") print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
sys.exit(1) sys.exit(1)
else: else:
if pull: if pull:
if verbose: if verbose:
_log(f"Running git pull for {full_filesystem_repo_path}") print(f"Running git pull for {full_filesystem_repo_path}")
if not check_only: if not check_only:
if is_branch: if is_branch:
git_repo = git.Repo(full_filesystem_repo_path) git_repo = git.Repo(full_filesystem_repo_path)
origin = git_repo.remotes.origin origin = git_repo.remotes.origin
origin.pull(progress=None if quiet else GitProgress()) origin.pull(progress=None if quiet else GitProgress())
else: else:
_log(f"skipping pull because this repo checked out a tag") print(f"skipping pull because this repo checked out a tag")
else: else:
_log("(git pull skipped)") print("(git pull skipped)")
if not is_present: if not is_present:
# Clone # Clone
if verbose: if verbose:
_log(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}') print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
if not dry_run: if not dry_run:
git.Repo.clone_from(full_github_repo_path, git.Repo.clone_from(full_github_repo_path,
full_filesystem_repo_path, full_filesystem_repo_path,
progress=None if quiet else GitProgress()) progress=None if quiet else GitProgress())
else: else:
_log("(git clone skipped)") print("(git clone skipped)")
# Checkout the requested branch, if one was specified # Checkout the requested branch, if one was specified
branch_to_checkout = None branch_to_checkout = None
if branches_array: if branches_array:
# Find the current repo in the branches list # Find the current repo in the branches list
_log("Checking") print("Checking")
for repo_branch in branches_array: for repo_branch in branches_array:
repo_branch_tuple = repo_branch.split(" ") repo_branch_tuple = repo_branch.split(" ")
if repo_branch_tuple[0] == branch_strip(fully_qualified_repo): if repo_branch_tuple[0] == branch_strip(fully_qualified_repo):
@ -145,13 +145,13 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
if branch_to_checkout: if branch_to_checkout:
if current_repo_branch_or_tag is None or (current_repo_branch_or_tag and (current_repo_branch_or_tag != branch_to_checkout)): if current_repo_branch_or_tag is None or (current_repo_branch_or_tag and (current_repo_branch_or_tag != branch_to_checkout)):
if not quiet: if not quiet:
_log(f"switching to branch {branch_to_checkout} in repo {repo_path}") print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
git_repo = git.Repo(full_filesystem_repo_path) git_repo = git.Repo(full_filesystem_repo_path)
# git checkout works for both branches and tags # git checkout works for both branches and tags
git_repo.git.checkout(branch_to_checkout) git_repo.git.checkout(branch_to_checkout)
else: else:
if verbose: if verbose:
_log(f"repo {repo_path} is already on branch/tag {branch_to_checkout}") print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
def parse_branches(branches_string): def parse_branches(branches_string):
@ -161,7 +161,7 @@ def parse_branches(branches_string):
for branch_directive in branches_directives: for branch_directive in branches_directives:
split_directive = branch_directive.split("@") split_directive = branch_directive.split("@")
if len(split_directive) != 2: if len(split_directive) != 2:
_log(f"Error: branch specified is not valid: {branch_directive}") print(f"Error: branch specified is not valid: {branch_directive}")
sys.exit(1) sys.exit(1)
result_array.append(f"{split_directive[0]} {split_directive[1]}") result_array.append(f"{split_directive[0]} {split_directive[1]}")
return result_array return result_array
@ -191,39 +191,39 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
# TODO: branches file needs to be re-worked in the context of stacks # TODO: branches file needs to be re-worked in the context of stacks
if branches_file: if branches_file:
if branches: if branches:
_log("Error: can't specify both --branches and --branches-file") print("Error: can't specify both --branches and --branches-file")
sys.exit(1) sys.exit(1)
else: else:
if verbose: if verbose:
_log(f"loading branches from: {branches_file}") print(f"loading branches from: {branches_file}")
with open(branches_file) as branches_file_open: with open(branches_file) as branches_file_open:
branches_array = branches_file_open.read().splitlines() branches_array = branches_file_open.read().splitlines()
_log(f"branches: {branches}") print(f"branches: {branches}")
if branches: if branches:
if branches_file: if branches_file:
_log("Error: can't specify both --branches and --branches-file") print("Error: can't specify both --branches and --branches-file")
sys.exit(1) sys.exit(1)
else: else:
branches_array = parse_branches(branches) branches_array = parse_branches(branches)
if branches_array and verbose: if branches_array and verbose:
_log(f"Branches are: {branches_array}") print(f"Branches are: {branches_array}")
local_stack = ctx.obj.local_stack local_stack = ctx.obj.local_stack
if local_stack: if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
_log(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}") print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}")
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet: if not quiet:
_log(f"Dev Root is: {dev_root_path}") print(f"Dev Root is: {dev_root_path}")
if not os.path.isdir(dev_root_path): if not os.path.isdir(dev_root_path):
if not quiet: if not quiet:
_log('Dev root directory doesn\'t exist, creating') print('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path) os.makedirs(dev_root_path)
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
@ -244,9 +244,9 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
repos_in_scope = all_repos repos_in_scope = all_repos
if verbose: if verbose:
_log(f"Repos: {repos_in_scope}") print(f"Repos: {repos_in_scope}")
if stack: if stack:
_log(f"Stack: {stack}") print(f"Stack: {stack}")
repos = [] repos = []
for repo in repos_in_scope: for repo in repos_in_scope:
@ -254,11 +254,11 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
repos.append(repo) repos.append(repo)
else: else:
if verbose: if verbose:
_log(f"Excluding: {repo}") print(f"Excluding: {repo}")
for repo in repos: for repo in repos:
try: try:
process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo) process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
except git.exc.GitCommandError as error: except git.exc.GitCommandError as error:
_log(f"\n******* git command returned error exit status:\n{error}") print(f"\n******* git command returned error exit status:\n{error}")
sys.exit(1) sys.exit(1)

View File

@ -19,10 +19,6 @@ import yaml
from pathlib import Path from pathlib import Path
def _log(*args):
print(*args, file=sys.stderr)
def include_exclude_check(s, include, exclude): def include_exclude_check(s, include, exclude):
if include is None and exclude is None: if include is None and exclude is None:
return True return True
@ -34,16 +30,10 @@ def include_exclude_check(s, include, exclude):
return s not in exclude_list return s not in exclude_list
def get_stack_file_path(stack): def get_parsed_stack_config(stack):
# In order to be compatible with Python 3.8 we need to use this hack to get the path: # In order to be compatible with Python 3.8 we need to use this hack to get the path:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml") stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
return stack_file_path
# Caller can pass either the name of a stack, or a path to a stack file
def get_parsed_stack_config(stack):
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
try: try:
with stack_file_path: with stack_file_path:
stack_config = yaml.safe_load(open(stack_file_path, "r")) stack_config = yaml.safe_load(open(stack_file_path, "r"))
@ -58,27 +48,3 @@ def get_parsed_stack_config(stack):
print(f"Error: stack: {stack} does not exist") print(f"Error: stack: {stack} does not exist")
print(f"Exiting, error: {error}") print(f"Exiting, error: {error}")
sys.exit(1) sys.exit(1)
def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:
with spec_file_path:
deploy_spec = yaml.safe_load(open(spec_file_path, "r"))
return deploy_spec
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
print(f"Error: spec file: {spec_file_path} does not exist")
print(f"Exiting, error: {error}")
sys.exit(1)
# TODO: this is fragile wrt to the subcommand depth
# See also: https://github.com/pallets/click/issues/108
def global_options(ctx):
return ctx.parent.parent.obj
# TODO: hack
def global_options2(ctx):
return ctx.parent.obj

8
cli.py
View File

@ -19,9 +19,8 @@ from dataclasses import dataclass
from app import setup_repositories from app import setup_repositories
from app import build_containers from app import build_containers
from app import build_npms from app import build_npms
from app import deploy from app import deploy_system
from app import version from app import version
from app import deployment
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@ -55,7 +54,6 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(setup_repositories.command, "setup-repositories")
cli.add_command(build_containers.command, "build-containers") cli.add_command(build_containers.command, "build-containers")
cli.add_command(build_npms.command, "build-npms") cli.add_command(build_npms.command, "build-npms")
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy_system.command, "deploy") # deploy is an alias for deploy-system
cli.add_command(deploy.command, "deploy-system") cli.add_command(deploy_system.command, "deploy-system")
cli.add_command(deployment.command, "deployment")
cli.add_command(version.command, "version") cli.add_command(version.command, "version")

View File

@ -4,4 +4,3 @@ tqdm>=4.64.0
python-on-whales>=0.58.0 python-on-whales>=0.58.0
click>=8.1.3 click>=8.1.3
pyyaml>=6.0 pyyaml>=6.0
ruamel.yaml>=0.17.32