From 789b2dd3a7ed36de83f6baa6cb75a2bde41a2897 Mon Sep 17 00:00:00 2001 From: Roy Crihfield Date: Fri, 17 Oct 2025 23:21:23 +0800 Subject: [PATCH] Add --update option to deploy create To allow updating an existing deployment - Check the deployment dir exists when updating - Write to temp dir, then safely copy tree - Don't overwrite data dir or config.env --- docs/cli.md | 68 ++++ .../data/compose/docker-compose-test.yml | 2 + stack_orchestrator/data/config/test/script.sh | 3 + .../data/config/test/settings.env | 1 + .../cerc-test-container/Dockerfile | 7 +- .../cerc-test-container/run.sh | 12 +- .../deploy/deployment_create.py | 348 ++++++++++++------ .../deploy/webapp/deploy_webapp.py | 2 +- tests/database/run-test.sh | 2 +- tests/deploy/run-deploy-test.sh | 97 +++++ tests/external-stack/run-test.sh | 43 +++ 11 files changed, 463 insertions(+), 122 deletions(-) create mode 100644 stack_orchestrator/data/config/test/script.sh create mode 100644 stack_orchestrator/data/config/test/settings.env diff --git a/docs/cli.md b/docs/cli.md index 1421291e..92cf776a 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -65,3 +65,71 @@ Force full rebuild of packages: ``` $ laconic-so build-npms --include --force-rebuild ``` + +## deploy + +The `deploy` command group manages persistent deployments. The general workflow is `deploy init` to generate a spec file, then `deploy create` to create a deployment directory from the spec, then runtime commands like `deploy up` and `deploy down`. + +### deploy init + +Generate a deployment spec file from a stack definition: +``` +$ laconic-so --stack deploy init --output +``` + +Options: +- `--output` (required): write spec file here +- `--config`: provide config variables for the deployment +- `--config-file`: provide config variables in a file +- `--kube-config`: provide a config file for a k8s deployment +- `--image-registry`: provide a container image registry url for this k8s cluster +- `--map-ports-to-host`: map ports to the host (`any-variable-random`, `localhost-same`, `any-same`, `localhost-fixed-random`, `any-fixed-random`) + +### deploy create + +Create a deployment directory from a spec file: +``` +$ laconic-so --stack deploy create --spec-file --deployment-dir +``` + +Update an existing deployment in-place (preserving data volumes and env file): +``` +$ laconic-so --stack deploy create --spec-file --deployment-dir --update +``` + +Options: +- `--spec-file` (required): spec file to use +- `--deployment-dir`: target directory for deployment files +- `--update`: update an existing deployment directory, preserving data volumes and env file. Changed files are backed up with a `.bak` suffix. The deployment's `config.env` and `deployment.yml` are also preserved. +- `--network-dir`: network configuration supplied in this directory +- `--initial-peers`: initial set of persistent peers + +### deploy up + +Start a deployment: +``` +$ laconic-so deployment --dir up +``` + +### deploy down + +Stop a deployment: +``` +$ laconic-so deployment --dir down +``` +Use `--delete-volumes` to also remove data volumes. + +### deploy ps + +Show running services: +``` +$ laconic-so deployment --dir ps +``` + +### deploy logs + +View service logs: +``` +$ laconic-so deployment --dir logs +``` +Use `-f` to follow and `-n ` to tail. diff --git a/stack_orchestrator/data/compose/docker-compose-test.yml b/stack_orchestrator/data/compose/docker-compose-test.yml index 7c8e8e95..ae11ca13 100644 --- a/stack_orchestrator/data/compose/docker-compose-test.yml +++ b/stack_orchestrator/data/compose/docker-compose-test.yml @@ -8,6 +8,8 @@ services: CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE" CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED} volumes: + - ../config/test/script.sh:/opt/run.sh + - ../config/test/settings.env:/opt/settings.env - test-data-bind:/data - test-data-auto:/data2 - test-config:/config:ro diff --git a/stack_orchestrator/data/config/test/script.sh b/stack_orchestrator/data/config/test/script.sh new file mode 100644 index 00000000..34a19ab1 --- /dev/null +++ b/stack_orchestrator/data/config/test/script.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Hello" diff --git a/stack_orchestrator/data/config/test/settings.env b/stack_orchestrator/data/config/test/settings.env new file mode 100644 index 00000000..813f7578 --- /dev/null +++ b/stack_orchestrator/data/config/test/settings.env @@ -0,0 +1 @@ +ANSWER=42 diff --git a/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile b/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile index f4ef5506..46021613 100644 --- a/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile @@ -1,9 +1,6 @@ -FROM ubuntu:latest +FROM alpine:latest -RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \ - apt-get install -y software-properties-common && \ - apt-get install -y nginx && \ - apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +RUN apk add --no-cache nginx EXPOSE 80 diff --git a/stack_orchestrator/data/container-build/cerc-test-container/run.sh b/stack_orchestrator/data/container-build/cerc-test-container/run.sh index d06f4df4..b4da05ed 100755 --- a/stack_orchestrator/data/container-build/cerc-test-container/run.sh +++ b/stack_orchestrator/data/container-build/cerc-test-container/run.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh set -e if [ -n "$CERC_SCRIPT_DEBUG" ]; then @@ -8,14 +8,14 @@ fi echo "Test container starting" DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }') -if [[ -n "$DATA_DEVICE" ]]; then +if [ -n "$DATA_DEVICE" ]; then echo "/data: MOUNTED dev=${DATA_DEVICE}" else echo "/data: not mounted" fi DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }') -if [[ -n "$DATA_DEVICE" ]]; then +if [ -n "$DATA_DEVICE" ]; then echo "/data2: MOUNTED dev=${DATA2_DEVICE}" else echo "/data2: not mounted" @@ -23,7 +23,7 @@ fi # Test if the container's filesystem is old (run previously) or new for d in /data /data2; do - if [[ -f "$d/exists" ]]; + if [ -f "$d/exists" ]; then TIMESTAMP=`cat $d/exists` echo "$d filesystem is old, created: $TIMESTAMP" @@ -52,7 +52,7 @@ fi if [ -d "/config" ]; then echo "/config: EXISTS" for f in /config/*; do - if [[ -f "$f" ]] || [[ -L "$f" ]]; then + if [ -f "$f" ] || [ -L "$f" ]; then echo "$f:" cat "$f" echo "" @@ -64,4 +64,4 @@ else fi # Run nginx which will block here forever -/usr/sbin/nginx -g "daemon off;" +nginx -g "daemon off;" diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 601f6c77..e7586059 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -19,9 +19,12 @@ import os from pathlib import Path from typing import List import random -from shutil import copy, copyfile, copytree +from shutil import copy, copyfile, copytree, rmtree from secrets import token_hex import sys +import filecmp +import tempfile + from stack_orchestrator import constants from stack_orchestrator.opts import opts from stack_orchestrator.util import ( @@ -524,6 +527,12 @@ def _check_volume_definitions(spec): "--spec-file", required=True, help="Spec file to use to create this deployment" ) @click.option("--deployment-dir", help="Create deployment files in this directory") +@click.option( + "--update", + is_flag=True, + default=False, + help="Update existing deployment directory, preserving data volumes and env file", +) @click.option( "--helm-chart", is_flag=True, @@ -536,13 +545,21 @@ def _check_volume_definitions(spec): @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED) @click.pass_context def create( - ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers, extra_args + ctx, + spec_file, + deployment_dir, + update, + helm_chart, + network_dir, + initial_peers, + extra_args, ): deployment_command_context = ctx.obj return create_operation( deployment_command_context, spec_file, deployment_dir, + update, helm_chart, network_dir, initial_peers, @@ -556,6 +573,7 @@ def create_operation( deployment_command_context, spec_file, deployment_dir, + update=False, helm_chart=False, network_dir=None, initial_peers=None, @@ -568,23 +586,23 @@ def create_operation( stack_name = parsed_spec["stack"] deployment_type = parsed_spec[constants.deploy_to_key] - stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name) - parsed_stack = get_parsed_stack_config(stack_name) if opts.o.debug: print(f"parsed spec: {parsed_spec}") + if deployment_dir is None: deployment_dir_path = _make_default_deployment_dir() else: deployment_dir_path = Path(deployment_dir) - if deployment_dir_path.exists(): - error_exit(f"{deployment_dir_path} already exists") - os.mkdir(deployment_dir_path) - # Copy spec file and the stack file into the deployment dir - copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) - copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name)) - # Create deployment.yml with cluster-id - _create_deployment_file(deployment_dir_path) + if deployment_dir_path.exists(): + if not update: + error_exit(f"{deployment_dir_path} already exists") + if opts.o.debug: + print(f"Updating existing deployment at {deployment_dir_path}") + else: + if update: + error_exit(f"--update requires that {deployment_dir_path} already exists") + os.mkdir(deployment_dir_path) # Branch to Helm chart generation flow if --helm-chart flag is set if deployment_type == "k8s" and helm_chart: @@ -595,104 +613,41 @@ def create_operation( generate_helm_chart(stack_name, spec_file, deployment_dir_path) return # Exit early for helm chart generation - # Existing deployment flow continues unchanged - # Copy any config varibles from the spec file into an env file suitable for compose - _write_config_file( - spec_file, deployment_dir_path.joinpath(constants.config_file_name) - ) - # Copy any k8s config file into the deployment dir - if deployment_type == "k8s": - _write_kube_config_file( - Path(parsed_spec[constants.kube_config_key]), - deployment_dir_path.joinpath(constants.kube_config_filename), - ) - # Copy the pod files into the deployment dir, fixing up content - pods = get_pod_list(parsed_stack) - destination_compose_dir = deployment_dir_path.joinpath("compose") - os.mkdir(destination_compose_dir) - destination_pods_dir = deployment_dir_path.joinpath("pods") - os.mkdir(destination_pods_dir) - yaml = get_yaml() - for pod in pods: - pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod) - if pod_file_path is None: - continue - parsed_pod_file = yaml.load(open(pod_file_path, "r")) - extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) - destination_pod_dir = destination_pods_dir.joinpath(pod) - os.mkdir(destination_pod_dir) - if opts.o.debug: - print(f"extra config dirs: {extra_config_dirs}") - _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) - with open( - destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w" - ) as output_file: - yaml.dump(parsed_pod_file, output_file) - # Copy the config files for the pod, if any - config_dirs = {pod} - config_dirs = config_dirs.union(extra_config_dirs) - for config_dir in config_dirs: - source_config_dir = resolve_config_dir(stack_name, config_dir) - if os.path.exists(source_config_dir): - destination_config_dir = deployment_dir_path.joinpath( - "config", config_dir - ) - # If the same config dir appears in multiple pods, it may already have - # been copied - if not os.path.exists(destination_config_dir): - copytree(source_config_dir, destination_config_dir) - # Copy the script files for the pod, if any - if pod_has_scripts(parsed_stack, pod): - destination_script_dir = destination_pod_dir.joinpath("scripts") - os.mkdir(destination_script_dir) - script_paths = get_pod_script_paths(parsed_stack, pod) - _copy_files_to_directory(script_paths, destination_script_dir) - if parsed_spec.is_kubernetes_deployment(): - for configmap in parsed_spec.get_configmaps(): - source_config_dir = resolve_config_dir(stack_name, configmap) - if os.path.exists(source_config_dir): - destination_config_dir = deployment_dir_path.joinpath( - "configmaps", configmap - ) - copytree( - source_config_dir, destination_config_dir, dirs_exist_ok=True - ) - else: - # TODO: We should probably only do this if the volume is marked :ro. - for volume_name, volume_path in parsed_spec.get_volumes().items(): - source_config_dir = resolve_config_dir(stack_name, volume_name) - # Only copy if the source exists and is _not_ empty. - if os.path.exists(source_config_dir) and os.listdir(source_config_dir): - destination_config_dir = deployment_dir_path.joinpath(volume_path) - # Only copy if the destination exists and _is_ empty. - if os.path.exists(destination_config_dir) and not os.listdir( - destination_config_dir - ): - copytree( - source_config_dir, - destination_config_dir, - dirs_exist_ok=True, - ) + if update: + # Sync mode: write to temp dir, then copy to deployment dir with backups + temp_dir = Path(tempfile.mkdtemp(prefix="deployment-sync-")) + try: + # Write deployment files to temp dir (skip deployment.yml to preserve cluster ID) + _write_deployment_files( + temp_dir, + Path(spec_file), + parsed_spec, + stack_name, + deployment_type, + include_deployment_file=False, + ) - # Copy the job files into the deployment dir (for Docker deployments) - jobs = get_job_list(parsed_stack) - if jobs and not parsed_spec.is_kubernetes_deployment(): - destination_compose_jobs_dir = deployment_dir_path.joinpath("compose-jobs") - os.mkdir(destination_compose_jobs_dir) - for job in jobs: - job_file_path = get_job_file_path(stack_name, parsed_stack, job) - if job_file_path and job_file_path.exists(): - parsed_job_file = yaml.load(open(job_file_path, "r")) - _fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir) - with open( - destination_compose_jobs_dir.joinpath( - "docker-compose-%s.yml" % job - ), - "w", - ) as output_file: - yaml.dump(parsed_job_file, output_file) - if opts.o.debug: - print(f"Copied job compose file: {job}") + # Copy from temp to deployment dir, excluding data volumes and backing up changed files + # Exclude data/* to avoid touching user data volumes + # Exclude config file to preserve deployment settings (XXX breaks passing config vars + # from spec. could warn about this or not exclude...) + exclude_patterns = ["data", "data/*", constants.config_file_name] + _safe_copy_tree( + temp_dir, deployment_dir_path, exclude_patterns=exclude_patterns + ) + finally: + # Clean up temp dir + rmtree(temp_dir) + else: + # Normal mode: write directly to deployment dir + _write_deployment_files( + deployment_dir_path, + Path(spec_file), + parsed_spec, + stack_name, + deployment_type, + include_deployment_file=True, + ) # Delegate to the stack's Python code # The deploy create command doesn't require a --stack argument so we need @@ -712,6 +667,181 @@ def create_operation( ) +def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: List[str] = None): + """ + Recursively copy a directory tree, backing up changed files with .bak suffix. + + :param src: Source directory + :param dst: Destination directory + :param exclude_patterns: List of path patterns to exclude (relative to src) + """ + if exclude_patterns is None: + exclude_patterns = [] + + def should_exclude(path: Path) -> bool: + """Check if path matches any exclude pattern.""" + rel_path = path.relative_to(src) + for pattern in exclude_patterns: + if rel_path.match(pattern): + return True + return False + + def safe_copy_file(src_file: Path, dst_file: Path): + """Copy file, backing up destination if it differs.""" + if ( + dst_file.exists() + and not dst_file.is_dir() + and not filecmp.cmp(src_file, dst_file) + ): + os.rename(dst_file, f"{dst_file}.bak") + copy(src_file, dst_file) + + # Walk the source tree + for src_path in src.rglob("*"): + if should_exclude(src_path): + continue + + rel_path = src_path.relative_to(src) + dst_path = dst / rel_path + + if src_path.is_dir(): + dst_path.mkdir(parents=True, exist_ok=True) + else: + dst_path.parent.mkdir(parents=True, exist_ok=True) + safe_copy_file(src_path, dst_path) + + +def _write_deployment_files( + target_dir: Path, + spec_file: Path, + parsed_spec: Spec, + stack_name: str, + deployment_type: str, + include_deployment_file: bool = True, +): + """ + Write deployment files to target directory. + + :param target_dir: Directory to write files to + :param spec_file: Path to spec file + :param parsed_spec: Parsed spec object + :param stack_name: Name of stack + :param deployment_type: Type of deployment + :param include_deployment_file: Whether to create deployment.yml file (skip for update) + """ + stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name) + parsed_stack = get_parsed_stack_config(stack_name) + + # Copy spec file and the stack file into the target dir + copyfile(spec_file, target_dir.joinpath(constants.spec_file_name)) + copyfile(stack_file, target_dir.joinpath(constants.stack_file_name)) + + # Create deployment file if requested + if include_deployment_file: + _create_deployment_file(target_dir) + + # Copy any config variables from the spec file into an env file suitable for compose + _write_config_file(spec_file, target_dir.joinpath(constants.config_file_name)) + + # Copy any k8s config file into the target dir + if deployment_type == "k8s": + _write_kube_config_file( + Path(parsed_spec[constants.kube_config_key]), + target_dir.joinpath(constants.kube_config_filename), + ) + + # Copy the pod files into the target dir, fixing up content + pods = get_pod_list(parsed_stack) + destination_compose_dir = target_dir.joinpath("compose") + os.makedirs(destination_compose_dir, exist_ok=True) + destination_pods_dir = target_dir.joinpath("pods") + os.makedirs(destination_pods_dir, exist_ok=True) + yaml = get_yaml() + + for pod in pods: + pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod) + if pod_file_path is None: + continue + parsed_pod_file = yaml.load(open(pod_file_path, "r")) + extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) + destination_pod_dir = destination_pods_dir.joinpath(pod) + os.makedirs(destination_pod_dir, exist_ok=True) + if opts.o.debug: + print(f"extra config dirs: {extra_config_dirs}") + _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) + with open( + destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w" + ) as output_file: + yaml.dump(parsed_pod_file, output_file) + + # Copy the config files for the pod, if any + config_dirs = {pod} + config_dirs = config_dirs.union(extra_config_dirs) + for config_dir in config_dirs: + source_config_dir = resolve_config_dir(stack_name, config_dir) + if os.path.exists(source_config_dir): + destination_config_dir = target_dir.joinpath("config", config_dir) + copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True) + + # Copy the script files for the pod, if any + if pod_has_scripts(parsed_stack, pod): + destination_script_dir = destination_pod_dir.joinpath("scripts") + os.makedirs(destination_script_dir, exist_ok=True) + script_paths = get_pod_script_paths(parsed_stack, pod) + _copy_files_to_directory(script_paths, destination_script_dir) + + if parsed_spec.is_kubernetes_deployment(): + for configmap in parsed_spec.get_configmaps(): + source_config_dir = resolve_config_dir(stack_name, configmap) + if os.path.exists(source_config_dir): + destination_config_dir = target_dir.joinpath( + "configmaps", configmap + ) + copytree( + source_config_dir, destination_config_dir, dirs_exist_ok=True + ) + else: + # TODO: + # this is odd - looks up config dir that matches a volume name, then copies as a mount dir? + # AFAICT this is not used by or relevant to any existing stack - roy + + # TODO: We should probably only do this if the volume is marked :ro. + for volume_name, volume_path in parsed_spec.get_volumes().items(): + source_config_dir = resolve_config_dir(stack_name, volume_name) + # Only copy if the source exists and is _not_ empty. + if os.path.exists(source_config_dir) and os.listdir(source_config_dir): + destination_config_dir = target_dir.joinpath(volume_path) + # Only copy if the destination exists and _is_ empty. + if os.path.exists(destination_config_dir) and not os.listdir( + destination_config_dir + ): + copytree( + source_config_dir, + destination_config_dir, + dirs_exist_ok=True, + ) + + # Copy the job files into the target dir (for Docker deployments) + jobs = get_job_list(parsed_stack) + if jobs and not parsed_spec.is_kubernetes_deployment(): + destination_compose_jobs_dir = target_dir.joinpath("compose-jobs") + os.makedirs(destination_compose_jobs_dir, exist_ok=True) + for job in jobs: + job_file_path = get_job_file_path(stack_name, parsed_stack, job) + if job_file_path and job_file_path.exists(): + parsed_job_file = yaml.load(open(job_file_path, "r")) + _fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir) + with open( + destination_compose_jobs_dir.joinpath( + "docker-compose-%s.yml" % job + ), + "w", + ) as output_file: + yaml.dump(parsed_job_file, output_file) + if opts.o.debug: + print(f"Copied job compose file: {job}") + + # TODO: this code should be in the stack .py files but # we haven't yet figured out how to integrate click across # the plugin boundary diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py index 6d5ea6c2..6170dbe3 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -94,7 +94,7 @@ def create_deployment( # Add the TLS and DNS spec _fixup_url_spec(spec_file_name, url) create_operation( - deploy_command_context, spec_file_name, deployment_dir, False, None, None + deploy_command_context, spec_file_name, deployment_dir, False, False, None, None ) # Fix up the container tag inside the deployment compose file _fixup_container_tag(deployment_dir, image) diff --git a/tests/database/run-test.sh b/tests/database/run-test.sh index 405f6d34..2b68cb2c 100755 --- a/tests/database/run-test.sh +++ b/tests/database/run-test.sh @@ -86,7 +86,7 @@ fi echo "deploy init test: passed" # Switch to a full path for the data dir so it gets provisioned as a host bind mounted volume and preserved beyond cluster lifetime -sed -i "s|^\(\s*db-data:$\)$|\1 ${test_deployment_dir}/data/db-data|" $test_deployment_spec +sed -i.bak "s|^\(\s*db-data:$\)$|\1 ${test_deployment_dir}/data/db-data|" $test_deployment_spec $TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir # Check the deployment dir exists diff --git a/tests/deploy/run-deploy-test.sh b/tests/deploy/run-deploy-test.sh index c18c5cb0..73f26da5 100755 --- a/tests/deploy/run-deploy-test.sh +++ b/tests/deploy/run-deploy-test.sh @@ -34,6 +34,7 @@ mkdir -p $CERC_REPO_BASE_DIR # with and without volume removal $TEST_TARGET_SO --stack test setup-repositories $TEST_TARGET_SO --stack test build-containers + # Test deploy command execution $TEST_TARGET_SO --stack test deploy setup $CERC_REPO_BASE_DIR # Check that we now have the expected output directory @@ -85,6 +86,7 @@ else exit 1 fi $TEST_TARGET_SO --stack test deploy down --delete-volumes + # Basic test of creating a deployment test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml @@ -122,6 +124,101 @@ fi echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config echo "deploy create output file test: passed" + +# Test sync functionality: update deployment without destroying data +# First, create a marker file in the data directory to verify it's preserved +test_data_marker="$test_deployment_dir/data/test-data-bind/sync-test-marker.txt" +echo "original-data-$(date +%s)" > "$test_data_marker" +original_marker_content=$(<$test_data_marker) + +# Modify a config file in the deployment to differ from source (to test backup) +test_config_file="$test_deployment_dir/config/test/settings.env" +test_config_file_original_content=$(<$test_config_file) +test_config_file_changed_content="ANSWER=69" +echo "$test_config_file_changed_content" > "$test_config_file" + +# Check a config file that matches the source (to test no backup for unchanged files) +test_unchanged_config="$test_deployment_dir/config/test/script.sh" + +# Modify spec file to simulate an update +sed -i.bak 's/CERC_TEST_PARAM_3:/CERC_TEST_PARAM_3: FASTER/' $test_deployment_spec + +# Create/modify config.env to test it isn't overwritten during sync +config_env_file="$test_deployment_dir/config.env" +config_env_persistent_content="PERSISTENT_VALUE=should-not-be-overwritten-$(date +%s)" +echo "$config_env_persistent_content" >> "$config_env_file" +original_config_env_content=$(<$config_env_file) + +# Run sync to update deployment files without destroying data +$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir --update + +# Verify config.env was not overwritten +synced_config_env_content=$(<$config_env_file) +if [ "$synced_config_env_content" == "$original_config_env_content" ]; then + echo "deployment update test: config.env preserved - passed" +else + echo "deployment update test: config.env was overwritten - FAILED" + echo "Expected: $original_config_env_content" + echo "Got: $synced_config_env_content" + exit 1 +fi + +# Verify the spec file was updated in deployment dir +updated_deployed_spec=$(<$test_deployment_dir/spec.yml) +if [[ "$updated_deployed_spec" == *"FASTER"* ]]; then + echo "deployment update test: spec file updated" +else + echo "deployment update test: spec file not updated - FAILED" + exit 1 +fi + +# Verify changed config file was backed up +test_config_backup="${test_config_file}.bak" +if [ -f "$test_config_backup" ]; then + backup_content=$(<$test_config_backup) + if [ "$backup_content" == "$test_config_file_changed_content" ]; then + echo "deployment update test: changed config file backed up - passed" + else + echo "deployment update test: backup content incorrect - FAILED" + exit 1 + fi +else + echo "deployment update test: backup file not created for changed file - FAILED" + exit 1 +fi + +# Verify unchanged config file was NOT backed up +test_unchanged_backup="$test_unchanged_config.bak" +if [ -f "$test_unchanged_backup" ]; then + echo "deployment update test: backup created for unchanged file - FAILED" + exit 1 +else + echo "deployment update test: no backup for unchanged file - passed" +fi + +# Verify the config file was updated from source +updated_config_content=$(<$test_config_file) +if [ "$updated_config_content" == "$test_config_file_original_content" ]; then + echo "deployment update test: config file updated from source - passed" +else + echo "deployment update test: config file not updated correctly - FAILED" + exit 1 +fi + +# Verify the data marker file still exists with original content +if [ ! -f "$test_data_marker" ]; then + echo "deployment update test: data file deleted - FAILED" + exit 1 +fi +synced_marker_content=$(<$test_data_marker) +if [ "$synced_marker_content" == "$original_marker_content" ]; then + echo "deployment update test: data preserved - passed" +else + echo "deployment update test: data corrupted - FAILED" + exit 1 +fi +echo "deployment update test: passed" + # Try to start the deployment $TEST_TARGET_SO deployment --dir $test_deployment_dir start # Check logs command works diff --git a/tests/external-stack/run-test.sh b/tests/external-stack/run-test.sh index 084f3b9d..54c6f18f 100755 --- a/tests/external-stack/run-test.sh +++ b/tests/external-stack/run-test.sh @@ -125,6 +125,49 @@ fi echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config echo "deploy create output file test: passed" + +# Test sync functionality: update deployment without destroying data +# First, create a marker file in the data directory to verify it's preserved +test_data_marker="$test_deployment_dir/data/test-data/sync-test-marker.txt" +mkdir -p "$test_deployment_dir/data/test-data" +echo "external-stack-data-$(date +%s)" > "$test_data_marker" +original_marker_content=$(<$test_data_marker) +# Verify deployment file exists and preserve its cluster ID +original_cluster_id=$(grep "cluster-id:" "$test_deployment_dir/deployment.yml" 2>/dev/null || echo "") +# Modify spec file to simulate an update +sed -i.bak 's/CERC_TEST_PARAM_1=PASSED/CERC_TEST_PARAM_1=UPDATED/' $test_deployment_spec +# Run sync to update deployment files without destroying data +$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir --update +# Verify the spec file was updated in deployment dir +updated_deployed_spec=$(<$test_deployment_dir/spec.yml) +if [[ "$updated_deployed_spec" == *"UPDATED"* ]]; then + echo "deploy sync test: spec file updated" +else + echo "deploy sync test: spec file not updated - FAILED" + exit 1 +fi +# Verify the data marker file still exists with original content +if [ ! -f "$test_data_marker" ]; then + echo "deploy sync test: data file deleted - FAILED" + exit 1 +fi +synced_marker_content=$(<$test_data_marker) +if [ "$synced_marker_content" == "$original_marker_content" ]; then + echo "deploy sync test: data preserved - passed" +else + echo "deploy sync test: data corrupted - FAILED" + exit 1 +fi +# Verify cluster ID was preserved (not regenerated) +new_cluster_id=$(grep "cluster-id:" "$test_deployment_dir/deployment.yml" 2>/dev/null || echo "") +if [ -n "$original_cluster_id" ] && [ "$original_cluster_id" == "$new_cluster_id" ]; then + echo "deploy sync test: cluster ID preserved - passed" +else + echo "deploy sync test: cluster ID not preserved - FAILED" + exit 1 +fi +echo "deploy sync test: passed" + # Try to start the deployment $TEST_TARGET_SO deployment --dir $test_deployment_dir start # Check logs command works