Update fixturenet eth test #506

Merged
telackey merged 54 commits from dboreham/update-fixturenet-eth-test into main 2023-08-17 19:24:07 +00:00
18 changed files with 328 additions and 162 deletions
Showing only changes of commit 6ff81b2049 - Show all commits

View File

@ -14,12 +14,14 @@ services:
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
CERC_RELEASE: "v0.1.5"
CERC_USE_NPM: true
CERC_CONFIG_FILE: "src/config.json"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/network/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json
- peers_ids:/peers
- mobymask_deployment:/server
ports:
@ -46,12 +48,14 @@ services:
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
CERC_RELEASE: "v0.1.5-lxdao-0.1.1"
CERC_USE_NPM: false
CERC_CONFIG_FILE: "src/utils/config.json"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/network/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json
- peers_ids:/peers
- mobymask_deployment:/server
ports:

View File

@ -0,0 +1,7 @@
{
"name": "MobyMask",
"relayNodes": [],
"peer": {
"enableDebugInfo": true
}
}

View File

@ -33,11 +33,23 @@ else
echo "Taking deployed contract details from env"
fi
# Use yq to create config.yml with environment variables
yq -n ".address = env(CERC_DEPLOYED_CONTRACT)" > /config/config.yml
yq ".watcherUrl = env(CERC_APP_WATCHER_URL)" -i /config/config.yml
yq ".chainId = env(CERC_CHAIN_ID)" -i /config/config.yml
yq ".relayNodes = strenv(CERC_RELAY_NODES)" -i /config/config.yml
yq ".denyMultiaddrs = strenv(CERC_DENY_MULTIADDRS)" -i /config/config.yml
cd /app
git checkout $CERC_RELEASE
/scripts/start-serving-app.sh
# Export config values in a json file
jq --arg address "$CERC_DEPLOYED_CONTRACT" \
--argjson chainId "$CERC_CHAIN_ID" \
--argjson relayNodes "$CERC_RELAY_NODES" \
--argjson denyMultiaddrs "$CERC_DENY_MULTIADDRS" \
'.address = $address | .chainId = $chainId | .relayNodes = $relayNodes | .peer.denyMultiaddrs = $denyMultiaddrs' \
/app/src/mobymask-app-config.json > /app/${CERC_CONFIG_FILE}
if [ "${CERC_USE_NPM}" = "true" ]; then
npm install
REACT_APP_WATCHER_URI="$CERC_APP_WATCHER_URL/graphql" npm run build
else
yarn install
REACT_APP_WATCHER_URI="$CERC_APP_WATCHER_URL/graphql" yarn build
fi
http-server -p 80 /app/build

View File

@ -6,9 +6,6 @@ FROM node:${VARIANT}
ARG USERNAME=node
ARG NPM_GLOBAL=/usr/local/share/npm-global
# This container pulls npm package from a registry configured via env var
ARG CERC_NPM_REGISTRY_URL
# Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH}
# Prevents npm from printing version warnings
@ -33,28 +30,14 @@ RUN \
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends jq bash netcat
# We do this to get a yq binary from the published container, for the correct architecture we're building here
COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq
RUN mkdir -p /scripts
COPY ./apply-webapp-config.sh /scripts
COPY ./start-serving-app.sh /scripts
# Configure the local npm registry
RUN npm config set @cerc-io:registry ${CERC_NPM_REGISTRY_URL}
RUN mkdir -p /config
# Install simple web server for now (use nginx perhaps later)
RUN yarn global add http-server
# Globally install both versions of the payload web app package
# Install old version of MobyMask web app
RUN yarn global add @cerc-io/mobymask-ui@0.1.4
# Install the LXDAO version of MobyMask web app
RUN yarn global add @cerc-io/mobymask-ui-lxdao@npm:@cerc-io/mobymask-ui@0.1.4-lxdao-0.1.1
WORKDIR /app
COPY . .
RUN npm install
# Expose port for http
EXPOSE 80
CMD ["/scripts/start-serving-app.sh"]

View File

@ -1,42 +0,0 @@
#!/usr/bin/env bash
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
if [[ $# -ne 3 ]]; then
echo "Illegal number of parameters" >&2
exit 1
fi
config_file_name=$1
webapp_files_dir=$2
config_prefix=$3
if ! [[ -f ${config_file_name} ]]; then
echo "Config file ${config_file_name} does not exist" >&2
exit 1
fi
if ! [[ -d ${webapp_files_dir} ]]; then
echo "Webapp directory ${webapp_files_dir} does not exist" >&2
exit 1
fi
# First some magic using sed to translate our yaml config file into an array of key value pairs like:
# ${config_prefix}<path-through-objects>=<value>
# sed "s/'//g" is used to remove single quote for relayNodes value
readarray -t config_kv_pair_array < <( sed -E 's/([^:]+):\s*(.*)/\1=\2/g' ${config_file_name} | sed "s/'//g" | sed "s/^/${config_prefix}_/" )
declare -p config_kv_pair_array
# Then iterate over that kv array making the template substitution in our web app files
for kv_pair_string in "${config_kv_pair_array[@]}"
do
kv_pair=(${kv_pair_string//=/ })
template_string_to_replace=${kv_pair[0]}
template_value_to_substitute=${kv_pair[1]}
# Run find and sed to do the substitution of one variable over all files
# See: https://stackoverflow.com/a/21479607/1701505
echo "Substituting: ${template_string_to_replace} = ${template_value_to_substitute}"
# TODO: Pass keys to be replaced without double quotes
if [[ "$template_string_to_replace" =~ ^${config_prefix}_(relayNodes|chainId|denyMultiaddrs)$ ]]; then
find ${webapp_files_dir} -type f -exec sed -i 's#"'"${template_string_to_replace}"'"#'"${template_value_to_substitute}"'#g' {} +
else
# Note: we do not escape our strings, on the expectation they do not container the '#' char.
find ${webapp_files_dir} -type f -exec sed -i 's#'${template_string_to_replace}'#'${template_value_to_substitute}'#g' {} +
fi
done

View File

@ -8,5 +8,4 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
CERC_NPM_REGISTRY_URL="https://git.vdb.to/api/packages/cerc-io/npm/"
docker build -t cerc/mobymask-ui:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile \
--build-arg CERC_NPM_REGISTRY_URL ${SCRIPT_DIR}
docker build -t cerc/mobymask-ui:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/mobymask-ui

View File

@ -1,9 +0,0 @@
#!/usr/bin/env bash
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO: Don't hard wire this:
webapp_files_dir="/usr/local/share/.config/yarn/global/node_modules/${CERC_BUILD_DIR}"
/scripts/apply-webapp-config.sh /config/config.yml ${webapp_files_dir} MOBYMASK_HOSTED_CONFIG
http-server -p 80 ${webapp_files_dir}

View File

@ -13,9 +13,10 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from dataclasses import dataclass
from app.util import get_yaml
from app.deploy_types import DeployCommandContext, DeploymentContext
from app.stack_state import State
from app.deploy_util import VolumeMapping, run_container_command
default_spec_file_content = """config:
node_moniker: my-node-name
@ -25,38 +26,26 @@ default_spec_file_content = """config:
init_help_text = """Add helpful text here on setting config variables.
"""
@dataclass
class VolumeMapping:
host_path: str
container_path: str
# In order to make this, we need the ability to run the stack
# In theory we can make this same way as we would run deploy up
def run_container_command(ctx, ontainer, command, mounts):
deploy_context = ctx.obj
pass
def setup(ctx):
def setup(command_context: DeployCommandContext):
node_moniker = "dbdb-node"
chain_id = "laconic_81337-1"
mounts = [
VolumeMapping("./path", "~/.laconicd")
]
output, status = run_container_command(ctx, "laconicd", f"laconicd init {node_moniker} --chain-id {chain_id}", mounts)
output, status = run_container_command(command_context.cluster_context, "laconicd", f"laconicd init {node_moniker} --chain-id {chain_id}", mounts)
def init(command_context):
def init(command_context: DeployCommandContext):
print(init_help_text)
yaml = get_yaml()
return yaml.load(default_spec_file_content)
def get_state(command_context):
def get_state(command_context: DeployCommandContext):
print("Here we get state")
return State.CONFIGURED
def change_state(command_context):
def change_state(command_context: DeployCommandContext):
pass

View File

@ -7,8 +7,9 @@ repos:
- github.com/ethereum-optimism/optimism@v1.0.4
- github.com/ethereum-optimism/op-geth@v1.101105.2
- github.com/cerc-io/watcher-ts@v0.2.43
- github.com/cerc-io/mobymask-v2-watcher-ts@v0.1.1
- github.com/cerc-io/mobymask-v2-watcher-ts@v0.1.2
- github.com/cerc-io/MobyMask@v0.1.2
- github.com/cerc-io/mobymask-ui
containers:
- cerc/go-ethereum
- cerc/lighthouse

View File

@ -0,0 +1,61 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from app.util import get_yaml
from app.deploy_types import DeployCommandContext, DeploymentContext
from app.stack_state import State
from app.deploy_util import VolumeMapping, run_container_command
import os
from pathlib import Path
default_spec_file_content = """config:
config_variable: test-value
"""
init_help_text = """Add helpful text here on setting config variables.
"""
# Output a known string to a know file in the bind mounted directory ./container-output-dir
# for test purposes -- test checks that the file was written.
def setup(command_context: DeployCommandContext, extra_args):
host_directory = "./container-output-dir"
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
host_directory_absolute.mkdir(parents=True, exist_ok=True)
mounts = [
VolumeMapping(host_directory_absolute, "/data")
]
output, status = run_container_command(command_context, "test", "echo output-data > /data/output-file && echo success", mounts)
def init(command_context: DeployCommandContext):
print(init_help_text)
yaml = get_yaml()
return yaml.load(default_spec_file_content)
def create(command_context: DeployCommandContext):
data = "create-command-output-data"
output_file_path = command_context.deployment_dir.joinpath("create-file")
with open(output_file_path, 'w+') as output_file:
output_file.write(data)
def get_state(command_context: DeployCommandContext):
print("Here we get state")
return State.CONFIGURED
def change_state(command_context: DeployCommandContext):
pass

View File

@ -27,17 +27,12 @@ from python_on_whales import DockerClient, DockerException
import click
from pathlib import Path
from app.util import include_exclude_check, get_parsed_stack_config, global_options2
from app.deploy_types import ClusterContext, DeployCommandContext
from app.deployment_create import create as deployment_create
from app.deployment_create import init as deployment_init
from app.deployment_create import setup as deployment_setup
class DeployCommandContext(object):
def __init__(self, cluster_context, docker):
self.cluster_context = cluster_context
self.docker = docker
@click.group()
@click.option("--include", help="only start these components")
@click.option("--exclude", help="don\'t start these components")
@ -58,7 +53,7 @@ def create_deploy_context(global_context, stack, include, exclude, cluster, env_
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(cluster_context, docker)
return DeployCommandContext(stack, cluster_context, docker)
def up_operation(ctx, services_list, stay_attached=False):
@ -149,14 +144,16 @@ def exec_operation(ctx, extra_args):
print(f"container command returned error exit status")
def logs_operation(ctx, extra_args):
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
services_list = extra_args_list if extra_args_list is not None else []
logs_stream = ctx.obj.docker.compose.logs(services=services_list, tail=tail, follow=follow, stream=True)
for stream_type, stream_content in logs_stream:
print(stream_content.decode("utf-8"), end="")
@command.command()
@ -197,10 +194,12 @@ def exec(ctx, extra_args):
@command.command()
@click.option("--tail", "-n", default=None, help="number of lines to display")
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
logs_operation(ctx, extra_args)
def logs(ctx, tail, follow, extra_args):
logs_operation(ctx, tail, follow, extra_args)
def get_stack_status(ctx, stack):
@ -313,17 +312,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if ctx.verbose:
print(f"files: {compose_files}")
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
class cluster_context:
def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config, env_file) -> None:
self.cluster = cluster
self.compose_files = compose_files
self.pre_start_commands = pre_start_commands
self.post_start_commands = post_start_commands
self.config = config
self.env_file = env_file
return ClusterContext(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
def _convert_to_new_format(old_pod_array):

47
app/deploy_types.py Normal file
View File

@ -0,0 +1,47 @@
# Copyright © 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from typing import List
from dataclasses import dataclass
from pathlib import Path
from python_on_whales import DockerClient
@dataclass
class ClusterContext:
cluster: str
compose_files: List[str]
pre_start_commands: List[str]
post_start_commands: List[str]
config: str
env_file: str
@dataclass
class DeployCommandContext:
stack: str
cluster_context: ClusterContext
docker: DockerClient
@dataclass
class DeploymentContext:
deployment_dir: Path
command_context: DeployCommandContext
@dataclass
class VolumeMapping:
host_path: str
container_path: str

56
app/deploy_util.py Normal file
View File

@ -0,0 +1,56 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
from typing import List
from dataclasses import dataclass
from app.deploy_types import DeployCommandContext, VolumeMapping
from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir
def _container_image_from_service(stack:str, service: str):
# Parse the compose files looking for the image name of the specified service
image_name = None
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "services" in parsed_pod_file:
services = parsed_pod_file["services"]
if service in services:
service_definition = services[service]
if "image" in service_definition:
image_name = service_definition["image"]
return image_name
def _volumes_to_docker(mounts: List[VolumeMapping]):
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
result = []
for mount in mounts:
docker_volume = (mount.host_path, mount.container_path)
result.append(docker_volume)
return result
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
docker = ctx.docker
container_image = _container_image_from_service(ctx.stack, service)
docker_volumes = _volumes_to_docker(mounts)
docker_output = docker.run(container_image, ["-c", command], entrypoint="bash", volumes=docker_volumes)
# There doesn't seem to be a way to get an exit code from docker.run()
return (docker_output, 0)

View File

@ -119,11 +119,13 @@ def exec(ctx, extra_args):
@command.command()
@click.option("--tail", "-n", default=None, help="number of lines to display")
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
def logs(ctx, tail, follow, extra_args):
ctx.obj = make_deploy_context(ctx)
logs_operation(ctx, extra_args)
logs_operation(ctx, tail, follow, extra_args)
@command.command()

View File

@ -20,26 +20,14 @@ import os
from pathlib import Path
from shutil import copyfile, copytree
import sys
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
@dataclass
class DeploymentContext:
stack: str
deployment_dir: Path
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_compose_file_dir
from app.deploy_types import DeploymentContext, DeployCommandContext
def _make_default_deployment_dir():
return "deployment-001"
def _get_compose_file_dir():
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
data_dir = Path(__file__).absolute().parent.joinpath("data")
source_compose_dir = data_dir.joinpath("compose")
return source_compose_dir
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
named_volumes = []
@ -47,7 +35,7 @@ def _get_named_volumes(stack):
pods = parsed_stack["pods"]
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
@ -94,27 +82,27 @@ def _fixup_pod_file(pod, spec, compose_dir):
pod["volumes"][volume] = new_volume_spec
def call_stack_deploy_init(stack):
def call_stack_deploy_init(deploy_command_context):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(stack).parent.joinpath("deploy", "commands.py")
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.init(None)
return imported_stack.init(deploy_command_context)
# TODO: fold this with function above
def call_stack_deploy_setup(stack):
def call_stack_deploy_setup(deploy_command_context, extra_args):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(stack).parent.joinpath("deploy", "commands.py")
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.setup(None)
return imported_stack.setup(deploy_command_context, extra_args)
# TODO: fold this with function above
@ -122,7 +110,7 @@ def call_stack_deploy_create(deployment_context):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(deployment_context.stack).parent.joinpath("deploy", "commands.py")
python_file_path = get_stack_file_path(deployment_context.command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
@ -154,7 +142,7 @@ def init(ctx, output):
yaml = get_yaml()
stack = global_options(ctx).stack
verbose = global_options(ctx).verbose
default_spec_file_content = call_stack_deploy_init(stack)
default_spec_file_content = call_stack_deploy_init(ctx.obj)
spec_file_content = {"stack": stack}
if default_spec_file_content:
spec_file_content.update(default_spec_file_content)
@ -198,7 +186,7 @@ def create(ctx, spec_file, deployment_dir):
data_dir = Path(__file__).absolute().parent.joinpath("data")
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
if global_options(ctx).debug:
@ -217,7 +205,11 @@ def create(ctx, spec_file, deployment_dir):
if not os.path.exists(destination_config_dir):
copytree(source_config_dir, destination_config_dir)
# Delegate to the stack's Python code
deployment_context = DeploymentContext(stack_name, Path(deployment_dir))
# The deploy create command doesn't require a --stack argument so we need to insert the
# stack member here.
deployment_command_context = ctx.obj
deployment_command_context.stack = stack_name
deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context)
call_stack_deploy_create(deployment_context)
@ -227,7 +219,7 @@ def create(ctx, spec_file, deployment_dir):
@click.option("--initialize-network", is_flag=True, default=False, help="Help goes here")
@click.option("--join-network", is_flag=True, default=False, help="Help goes here")
@click.option("--create-network", is_flag=True, default=False, help="Help goes here")
@click.argument('extra_args', nargs=-1)
@click.pass_context
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network):
stack = global_options(ctx).stack
call_stack_deploy_setup(stack)
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network, extra_args):
call_stack_deploy_setup(ctx.obj, extra_args)

View File

@ -56,6 +56,14 @@ def get_parsed_stack_config(stack):
sys.exit(1)
def get_compose_file_dir():
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
data_dir = Path(__file__).absolute().parent.joinpath("data")
source_compose_dir = data_dir.joinpath("compose")
return source_compose_dir
def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:

View File

@ -1,7 +1,8 @@
python-decouple>=3.6
GitPython>=3.1.27
tqdm>=4.64.0
python-on-whales>=0.58.0
click>=8.1.3
pyyaml>=6.0
python-decouple>=3.8
GitPython>=3.1.32
tqdm>=4.65.0
python-on-whales>=0.64.0
click>=8.1.6
PyYAML>=6.0.1
ruamel.yaml>=0.17.32
pydantic==1.10.9

View File

@ -23,6 +23,27 @@ mkdir -p $CERC_REPO_BASE_DIR
# with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers
# Test deploy command execution
$TEST_TARGET_SO --stack test deploy setup $CERC_REPO_BASE_DIR
# Check that we now have the expected output directory
container_output_dir=$CERC_REPO_BASE_DIR/container-output-dir
if [ ! -d "$container_output_dir" ]; then
echo "deploy setup test: output directory not present"
echo "deploy setup test: FAILED"
exit 1
fi
if [ ! -f "$container_output_dir/output-file" ]; then
echo "deploy setup test: output file not present"
echo "deploy setup test: FAILED"
exit 1
fi
output_file_content=$(<$container_output_dir/output-file)
if [ ! "$output_file_content" == "output-data" ]; then
echo "deploy setup test: output file contents not correct"
echo "deploy setup test: FAILED"
exit 1
fi
# Check that we now have the expected output file
$TEST_TARGET_SO --stack test deploy up
# Test deploy port command
deploy_port_output=$( $TEST_TARGET_SO --stack test deploy port test 80 )
@ -53,4 +74,49 @@ else
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
# Basic test of creating a deployment
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
$TEST_TARGET_SO --stack test deploy init --output $test_deployment_spec
# Check the file now exists
if [ ! -f "$test_deployment_spec" ]; then
echo "deploy init test: spec file not present"
echo "deploy init test: FAILED"
exit 1
fi
echo "deploy init test: passed"
$TEST_TARGET_SO deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
# Check the deployment dir exists
if [ ! -d "$test_deployment_dir" ]; then
echo "deploy create test: deployment directory not present"
echo "deploy create test: FAILED"
exit 1
fi
echo "deploy create test: passed"
# Check the file writted by the create command in the stack now exists
if [ ! -f "$test_deployment_dir/create-file" ]; then
echo "deploy create test: create output file not present"
echo "deploy create test: FAILED"
exit 1
fi
# And has the right content
create_file_content=$(<$test_deployment_dir/create-file)
if [ ! "$create_file_content" == "create-command-output-data" ]; then
echo "deploy create test: create output file contents not correct"
echo "deploy create test: FAILED"
exit 1
fi
echo "deploy create output file test: passed"
# Try to start the deployment
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
# Check logs command works
log_output_2=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
echo "deployment logs test: passed"
else
echo "deployment logs test: FAILED"
exit 1
fi
# Stop and clean up
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
echo "Test passed"