Add --update option to deploy create

To allow updating an existing deployment

- Check the deployment dir exists when updating
- Write to temp dir, then safely copy tree
- Don't overwrite data dir or config.env
This commit is contained in:
Roy Crihfield 2025-10-17 23:21:23 +08:00
parent 55b76b9b57
commit 789b2dd3a7
11 changed files with 463 additions and 122 deletions

View File

@ -65,3 +65,71 @@ Force full rebuild of packages:
``` ```
$ laconic-so build-npms --include <package-name> --force-rebuild $ laconic-so build-npms --include <package-name> --force-rebuild
``` ```
## deploy
The `deploy` command group manages persistent deployments. The general workflow is `deploy init` to generate a spec file, then `deploy create` to create a deployment directory from the spec, then runtime commands like `deploy up` and `deploy down`.
### deploy init
Generate a deployment spec file from a stack definition:
```
$ laconic-so --stack <stack-name> deploy init --output <spec-file>
```
Options:
- `--output` (required): write spec file here
- `--config`: provide config variables for the deployment
- `--config-file`: provide config variables in a file
- `--kube-config`: provide a config file for a k8s deployment
- `--image-registry`: provide a container image registry url for this k8s cluster
- `--map-ports-to-host`: map ports to the host (`any-variable-random`, `localhost-same`, `any-same`, `localhost-fixed-random`, `any-fixed-random`)
### deploy create
Create a deployment directory from a spec file:
```
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir>
```
Update an existing deployment in-place (preserving data volumes and env file):
```
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir> --update
```
Options:
- `--spec-file` (required): spec file to use
- `--deployment-dir`: target directory for deployment files
- `--update`: update an existing deployment directory, preserving data volumes and env file. Changed files are backed up with a `.bak` suffix. The deployment's `config.env` and `deployment.yml` are also preserved.
- `--network-dir`: network configuration supplied in this directory
- `--initial-peers`: initial set of persistent peers
### deploy up
Start a deployment:
```
$ laconic-so deployment --dir <deployment-dir> up
```
### deploy down
Stop a deployment:
```
$ laconic-so deployment --dir <deployment-dir> down
```
Use `--delete-volumes` to also remove data volumes.
### deploy ps
Show running services:
```
$ laconic-so deployment --dir <deployment-dir> ps
```
### deploy logs
View service logs:
```
$ laconic-so deployment --dir <deployment-dir> logs
```
Use `-f` to follow and `-n <count>` to tail.

View File

@ -8,6 +8,8 @@ services:
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE" CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED} CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
volumes: volumes:
- ../config/test/script.sh:/opt/run.sh
- ../config/test/settings.env:/opt/settings.env
- test-data-bind:/data - test-data-bind:/data
- test-data-auto:/data2 - test-data-auto:/data2
- test-config:/config:ro - test-config:/config:ro

View File

@ -0,0 +1,3 @@
#!/bin/sh
echo "Hello"

View File

@ -0,0 +1 @@
ANSWER=42

View File

@ -1,9 +1,6 @@
FROM ubuntu:latest FROM alpine:latest
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \ RUN apk add --no-cache nginx
apt-get install -y software-properties-common && \
apt-get install -y nginx && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
EXPOSE 80 EXPOSE 80

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env sh
set -e set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then if [ -n "$CERC_SCRIPT_DEBUG" ]; then
@ -8,14 +8,14 @@ fi
echo "Test container starting" echo "Test container starting"
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }') DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
if [[ -n "$DATA_DEVICE" ]]; then if [ -n "$DATA_DEVICE" ]; then
echo "/data: MOUNTED dev=${DATA_DEVICE}" echo "/data: MOUNTED dev=${DATA_DEVICE}"
else else
echo "/data: not mounted" echo "/data: not mounted"
fi fi
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }') DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
if [[ -n "$DATA_DEVICE" ]]; then if [ -n "$DATA_DEVICE" ]; then
echo "/data2: MOUNTED dev=${DATA2_DEVICE}" echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
else else
echo "/data2: not mounted" echo "/data2: not mounted"
@ -23,7 +23,7 @@ fi
# Test if the container's filesystem is old (run previously) or new # Test if the container's filesystem is old (run previously) or new
for d in /data /data2; do for d in /data /data2; do
if [[ -f "$d/exists" ]]; if [ -f "$d/exists" ];
then then
TIMESTAMP=`cat $d/exists` TIMESTAMP=`cat $d/exists`
echo "$d filesystem is old, created: $TIMESTAMP" echo "$d filesystem is old, created: $TIMESTAMP"
@ -52,7 +52,7 @@ fi
if [ -d "/config" ]; then if [ -d "/config" ]; then
echo "/config: EXISTS" echo "/config: EXISTS"
for f in /config/*; do for f in /config/*; do
if [[ -f "$f" ]] || [[ -L "$f" ]]; then if [ -f "$f" ] || [ -L "$f" ]; then
echo "$f:" echo "$f:"
cat "$f" cat "$f"
echo "" echo ""
@ -64,4 +64,4 @@ else
fi fi
# Run nginx which will block here forever # Run nginx which will block here forever
/usr/sbin/nginx -g "daemon off;" nginx -g "daemon off;"

View File

@ -19,9 +19,12 @@ import os
from pathlib import Path from pathlib import Path
from typing import List from typing import List
import random import random
from shutil import copy, copyfile, copytree from shutil import copy, copyfile, copytree, rmtree
from secrets import token_hex from secrets import token_hex
import sys import sys
import filecmp
import tempfile
from stack_orchestrator import constants from stack_orchestrator import constants
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
from stack_orchestrator.util import ( from stack_orchestrator.util import (
@ -524,6 +527,12 @@ def _check_volume_definitions(spec):
"--spec-file", required=True, help="Spec file to use to create this deployment" "--spec-file", required=True, help="Spec file to use to create this deployment"
) )
@click.option("--deployment-dir", help="Create deployment files in this directory") @click.option("--deployment-dir", help="Create deployment files in this directory")
@click.option(
"--update",
is_flag=True,
default=False,
help="Update existing deployment directory, preserving data volumes and env file",
)
@click.option( @click.option(
"--helm-chart", "--helm-chart",
is_flag=True, is_flag=True,
@ -536,13 +545,21 @@ def _check_volume_definitions(spec):
@click.argument("extra_args", nargs=-1, type=click.UNPROCESSED) @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
@click.pass_context @click.pass_context
def create( def create(
ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers, extra_args ctx,
spec_file,
deployment_dir,
update,
helm_chart,
network_dir,
initial_peers,
extra_args,
): ):
deployment_command_context = ctx.obj deployment_command_context = ctx.obj
return create_operation( return create_operation(
deployment_command_context, deployment_command_context,
spec_file, spec_file,
deployment_dir, deployment_dir,
update,
helm_chart, helm_chart,
network_dir, network_dir,
initial_peers, initial_peers,
@ -556,6 +573,7 @@ def create_operation(
deployment_command_context, deployment_command_context,
spec_file, spec_file,
deployment_dir, deployment_dir,
update=False,
helm_chart=False, helm_chart=False,
network_dir=None, network_dir=None,
initial_peers=None, initial_peers=None,
@ -568,23 +586,23 @@ def create_operation(
stack_name = parsed_spec["stack"] stack_name = parsed_spec["stack"]
deployment_type = parsed_spec[constants.deploy_to_key] deployment_type = parsed_spec[constants.deploy_to_key]
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
parsed_stack = get_parsed_stack_config(stack_name)
if opts.o.debug: if opts.o.debug:
print(f"parsed spec: {parsed_spec}") print(f"parsed spec: {parsed_spec}")
if deployment_dir is None: if deployment_dir is None:
deployment_dir_path = _make_default_deployment_dir() deployment_dir_path = _make_default_deployment_dir()
else: else:
deployment_dir_path = Path(deployment_dir) deployment_dir_path = Path(deployment_dir)
if deployment_dir_path.exists():
error_exit(f"{deployment_dir_path} already exists")
os.mkdir(deployment_dir_path)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
# Create deployment.yml with cluster-id if deployment_dir_path.exists():
_create_deployment_file(deployment_dir_path) if not update:
error_exit(f"{deployment_dir_path} already exists")
if opts.o.debug:
print(f"Updating existing deployment at {deployment_dir_path}")
else:
if update:
error_exit(f"--update requires that {deployment_dir_path} already exists")
os.mkdir(deployment_dir_path)
# Branch to Helm chart generation flow if --helm-chart flag is set # Branch to Helm chart generation flow if --helm-chart flag is set
if deployment_type == "k8s" and helm_chart: if deployment_type == "k8s" and helm_chart:
@ -595,104 +613,41 @@ def create_operation(
generate_helm_chart(stack_name, spec_file, deployment_dir_path) generate_helm_chart(stack_name, spec_file, deployment_dir_path)
return # Exit early for helm chart generation return # Exit early for helm chart generation
# Existing deployment flow continues unchanged if update:
# Copy any config varibles from the spec file into an env file suitable for compose # Sync mode: write to temp dir, then copy to deployment dir with backups
_write_config_file( temp_dir = Path(tempfile.mkdtemp(prefix="deployment-sync-"))
spec_file, deployment_dir_path.joinpath(constants.config_file_name) try:
) # Write deployment files to temp dir (skip deployment.yml to preserve cluster ID)
# Copy any k8s config file into the deployment dir _write_deployment_files(
if deployment_type == "k8s": temp_dir,
_write_kube_config_file( Path(spec_file),
Path(parsed_spec[constants.kube_config_key]), parsed_spec,
deployment_dir_path.joinpath(constants.kube_config_filename), stack_name,
) deployment_type,
# Copy the pod files into the deployment dir, fixing up content include_deployment_file=False,
pods = get_pod_list(parsed_stack) )
destination_compose_dir = deployment_dir_path.joinpath("compose")
os.mkdir(destination_compose_dir)
destination_pods_dir = deployment_dir_path.joinpath("pods")
os.mkdir(destination_pods_dir)
yaml = get_yaml()
for pod in pods:
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
if pod_file_path is None:
continue
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
destination_pod_dir = destination_pods_dir.joinpath(pod)
os.mkdir(destination_pod_dir)
if opts.o.debug:
print(f"extra config dirs: {extra_config_dirs}")
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open(
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w"
) as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any
config_dirs = {pod}
config_dirs = config_dirs.union(extra_config_dirs)
for config_dir in config_dirs:
source_config_dir = resolve_config_dir(stack_name, config_dir)
if os.path.exists(source_config_dir):
destination_config_dir = deployment_dir_path.joinpath(
"config", config_dir
)
# If the same config dir appears in multiple pods, it may already have
# been copied
if not os.path.exists(destination_config_dir):
copytree(source_config_dir, destination_config_dir)
# Copy the script files for the pod, if any
if pod_has_scripts(parsed_stack, pod):
destination_script_dir = destination_pod_dir.joinpath("scripts")
os.mkdir(destination_script_dir)
script_paths = get_pod_script_paths(parsed_stack, pod)
_copy_files_to_directory(script_paths, destination_script_dir)
if parsed_spec.is_kubernetes_deployment():
for configmap in parsed_spec.get_configmaps():
source_config_dir = resolve_config_dir(stack_name, configmap)
if os.path.exists(source_config_dir):
destination_config_dir = deployment_dir_path.joinpath(
"configmaps", configmap
)
copytree(
source_config_dir, destination_config_dir, dirs_exist_ok=True
)
else:
# TODO: We should probably only do this if the volume is marked :ro.
for volume_name, volume_path in parsed_spec.get_volumes().items():
source_config_dir = resolve_config_dir(stack_name, volume_name)
# Only copy if the source exists and is _not_ empty.
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
destination_config_dir = deployment_dir_path.joinpath(volume_path)
# Only copy if the destination exists and _is_ empty.
if os.path.exists(destination_config_dir) and not os.listdir(
destination_config_dir
):
copytree(
source_config_dir,
destination_config_dir,
dirs_exist_ok=True,
)
# Copy the job files into the deployment dir (for Docker deployments) # Copy from temp to deployment dir, excluding data volumes and backing up changed files
jobs = get_job_list(parsed_stack) # Exclude data/* to avoid touching user data volumes
if jobs and not parsed_spec.is_kubernetes_deployment(): # Exclude config file to preserve deployment settings (XXX breaks passing config vars
destination_compose_jobs_dir = deployment_dir_path.joinpath("compose-jobs") # from spec. could warn about this or not exclude...)
os.mkdir(destination_compose_jobs_dir) exclude_patterns = ["data", "data/*", constants.config_file_name]
for job in jobs: _safe_copy_tree(
job_file_path = get_job_file_path(stack_name, parsed_stack, job) temp_dir, deployment_dir_path, exclude_patterns=exclude_patterns
if job_file_path and job_file_path.exists(): )
parsed_job_file = yaml.load(open(job_file_path, "r")) finally:
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir) # Clean up temp dir
with open( rmtree(temp_dir)
destination_compose_jobs_dir.joinpath( else:
"docker-compose-%s.yml" % job # Normal mode: write directly to deployment dir
), _write_deployment_files(
"w", deployment_dir_path,
) as output_file: Path(spec_file),
yaml.dump(parsed_job_file, output_file) parsed_spec,
if opts.o.debug: stack_name,
print(f"Copied job compose file: {job}") deployment_type,
include_deployment_file=True,
)
# Delegate to the stack's Python code # Delegate to the stack's Python code
# The deploy create command doesn't require a --stack argument so we need # The deploy create command doesn't require a --stack argument so we need
@ -712,6 +667,181 @@ def create_operation(
) )
def _safe_copy_tree(src: Path, dst: Path, exclude_patterns: List[str] = None):
"""
Recursively copy a directory tree, backing up changed files with .bak suffix.
:param src: Source directory
:param dst: Destination directory
:param exclude_patterns: List of path patterns to exclude (relative to src)
"""
if exclude_patterns is None:
exclude_patterns = []
def should_exclude(path: Path) -> bool:
"""Check if path matches any exclude pattern."""
rel_path = path.relative_to(src)
for pattern in exclude_patterns:
if rel_path.match(pattern):
return True
return False
def safe_copy_file(src_file: Path, dst_file: Path):
"""Copy file, backing up destination if it differs."""
if (
dst_file.exists()
and not dst_file.is_dir()
and not filecmp.cmp(src_file, dst_file)
):
os.rename(dst_file, f"{dst_file}.bak")
copy(src_file, dst_file)
# Walk the source tree
for src_path in src.rglob("*"):
if should_exclude(src_path):
continue
rel_path = src_path.relative_to(src)
dst_path = dst / rel_path
if src_path.is_dir():
dst_path.mkdir(parents=True, exist_ok=True)
else:
dst_path.parent.mkdir(parents=True, exist_ok=True)
safe_copy_file(src_path, dst_path)
def _write_deployment_files(
target_dir: Path,
spec_file: Path,
parsed_spec: Spec,
stack_name: str,
deployment_type: str,
include_deployment_file: bool = True,
):
"""
Write deployment files to target directory.
:param target_dir: Directory to write files to
:param spec_file: Path to spec file
:param parsed_spec: Parsed spec object
:param stack_name: Name of stack
:param deployment_type: Type of deployment
:param include_deployment_file: Whether to create deployment.yml file (skip for update)
"""
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
parsed_stack = get_parsed_stack_config(stack_name)
# Copy spec file and the stack file into the target dir
copyfile(spec_file, target_dir.joinpath(constants.spec_file_name))
copyfile(stack_file, target_dir.joinpath(constants.stack_file_name))
# Create deployment file if requested
if include_deployment_file:
_create_deployment_file(target_dir)
# Copy any config variables from the spec file into an env file suitable for compose
_write_config_file(spec_file, target_dir.joinpath(constants.config_file_name))
# Copy any k8s config file into the target dir
if deployment_type == "k8s":
_write_kube_config_file(
Path(parsed_spec[constants.kube_config_key]),
target_dir.joinpath(constants.kube_config_filename),
)
# Copy the pod files into the target dir, fixing up content
pods = get_pod_list(parsed_stack)
destination_compose_dir = target_dir.joinpath("compose")
os.makedirs(destination_compose_dir, exist_ok=True)
destination_pods_dir = target_dir.joinpath("pods")
os.makedirs(destination_pods_dir, exist_ok=True)
yaml = get_yaml()
for pod in pods:
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
if pod_file_path is None:
continue
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
destination_pod_dir = destination_pods_dir.joinpath(pod)
os.makedirs(destination_pod_dir, exist_ok=True)
if opts.o.debug:
print(f"extra config dirs: {extra_config_dirs}")
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open(
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w"
) as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any
config_dirs = {pod}
config_dirs = config_dirs.union(extra_config_dirs)
for config_dir in config_dirs:
source_config_dir = resolve_config_dir(stack_name, config_dir)
if os.path.exists(source_config_dir):
destination_config_dir = target_dir.joinpath("config", config_dir)
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
# Copy the script files for the pod, if any
if pod_has_scripts(parsed_stack, pod):
destination_script_dir = destination_pod_dir.joinpath("scripts")
os.makedirs(destination_script_dir, exist_ok=True)
script_paths = get_pod_script_paths(parsed_stack, pod)
_copy_files_to_directory(script_paths, destination_script_dir)
if parsed_spec.is_kubernetes_deployment():
for configmap in parsed_spec.get_configmaps():
source_config_dir = resolve_config_dir(stack_name, configmap)
if os.path.exists(source_config_dir):
destination_config_dir = target_dir.joinpath(
"configmaps", configmap
)
copytree(
source_config_dir, destination_config_dir, dirs_exist_ok=True
)
else:
# TODO:
# this is odd - looks up config dir that matches a volume name, then copies as a mount dir?
# AFAICT this is not used by or relevant to any existing stack - roy
# TODO: We should probably only do this if the volume is marked :ro.
for volume_name, volume_path in parsed_spec.get_volumes().items():
source_config_dir = resolve_config_dir(stack_name, volume_name)
# Only copy if the source exists and is _not_ empty.
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
destination_config_dir = target_dir.joinpath(volume_path)
# Only copy if the destination exists and _is_ empty.
if os.path.exists(destination_config_dir) and not os.listdir(
destination_config_dir
):
copytree(
source_config_dir,
destination_config_dir,
dirs_exist_ok=True,
)
# Copy the job files into the target dir (for Docker deployments)
jobs = get_job_list(parsed_stack)
if jobs and not parsed_spec.is_kubernetes_deployment():
destination_compose_jobs_dir = target_dir.joinpath("compose-jobs")
os.makedirs(destination_compose_jobs_dir, exist_ok=True)
for job in jobs:
job_file_path = get_job_file_path(stack_name, parsed_stack, job)
if job_file_path and job_file_path.exists():
parsed_job_file = yaml.load(open(job_file_path, "r"))
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
with open(
destination_compose_jobs_dir.joinpath(
"docker-compose-%s.yml" % job
),
"w",
) as output_file:
yaml.dump(parsed_job_file, output_file)
if opts.o.debug:
print(f"Copied job compose file: {job}")
# TODO: this code should be in the stack .py files but # TODO: this code should be in the stack .py files but
# we haven't yet figured out how to integrate click across # we haven't yet figured out how to integrate click across
# the plugin boundary # the plugin boundary

View File

@ -94,7 +94,7 @@ def create_deployment(
# Add the TLS and DNS spec # Add the TLS and DNS spec
_fixup_url_spec(spec_file_name, url) _fixup_url_spec(spec_file_name, url)
create_operation( create_operation(
deploy_command_context, spec_file_name, deployment_dir, False, None, None deploy_command_context, spec_file_name, deployment_dir, False, False, None, None
) )
# Fix up the container tag inside the deployment compose file # Fix up the container tag inside the deployment compose file
_fixup_container_tag(deployment_dir, image) _fixup_container_tag(deployment_dir, image)

View File

@ -86,7 +86,7 @@ fi
echo "deploy init test: passed" echo "deploy init test: passed"
# Switch to a full path for the data dir so it gets provisioned as a host bind mounted volume and preserved beyond cluster lifetime # Switch to a full path for the data dir so it gets provisioned as a host bind mounted volume and preserved beyond cluster lifetime
sed -i "s|^\(\s*db-data:$\)$|\1 ${test_deployment_dir}/data/db-data|" $test_deployment_spec sed -i.bak "s|^\(\s*db-data:$\)$|\1 ${test_deployment_dir}/data/db-data|" $test_deployment_spec
$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir $TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
# Check the deployment dir exists # Check the deployment dir exists

View File

@ -34,6 +34,7 @@ mkdir -p $CERC_REPO_BASE_DIR
# with and without volume removal # with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories $TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers $TEST_TARGET_SO --stack test build-containers
# Test deploy command execution # Test deploy command execution
$TEST_TARGET_SO --stack test deploy setup $CERC_REPO_BASE_DIR $TEST_TARGET_SO --stack test deploy setup $CERC_REPO_BASE_DIR
# Check that we now have the expected output directory # Check that we now have the expected output directory
@ -85,6 +86,7 @@ else
exit 1 exit 1
fi fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes $TEST_TARGET_SO --stack test deploy down --delete-volumes
# Basic test of creating a deployment # Basic test of creating a deployment
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
@ -122,6 +124,101 @@ fi
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
echo "deploy create output file test: passed" echo "deploy create output file test: passed"
# Test sync functionality: update deployment without destroying data
# First, create a marker file in the data directory to verify it's preserved
test_data_marker="$test_deployment_dir/data/test-data-bind/sync-test-marker.txt"
echo "original-data-$(date +%s)" > "$test_data_marker"
original_marker_content=$(<$test_data_marker)
# Modify a config file in the deployment to differ from source (to test backup)
test_config_file="$test_deployment_dir/config/test/settings.env"
test_config_file_original_content=$(<$test_config_file)
test_config_file_changed_content="ANSWER=69"
echo "$test_config_file_changed_content" > "$test_config_file"
# Check a config file that matches the source (to test no backup for unchanged files)
test_unchanged_config="$test_deployment_dir/config/test/script.sh"
# Modify spec file to simulate an update
sed -i.bak 's/CERC_TEST_PARAM_3:/CERC_TEST_PARAM_3: FASTER/' $test_deployment_spec
# Create/modify config.env to test it isn't overwritten during sync
config_env_file="$test_deployment_dir/config.env"
config_env_persistent_content="PERSISTENT_VALUE=should-not-be-overwritten-$(date +%s)"
echo "$config_env_persistent_content" >> "$config_env_file"
original_config_env_content=$(<$config_env_file)
# Run sync to update deployment files without destroying data
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir --update
# Verify config.env was not overwritten
synced_config_env_content=$(<$config_env_file)
if [ "$synced_config_env_content" == "$original_config_env_content" ]; then
echo "deployment update test: config.env preserved - passed"
else
echo "deployment update test: config.env was overwritten - FAILED"
echo "Expected: $original_config_env_content"
echo "Got: $synced_config_env_content"
exit 1
fi
# Verify the spec file was updated in deployment dir
updated_deployed_spec=$(<$test_deployment_dir/spec.yml)
if [[ "$updated_deployed_spec" == *"FASTER"* ]]; then
echo "deployment update test: spec file updated"
else
echo "deployment update test: spec file not updated - FAILED"
exit 1
fi
# Verify changed config file was backed up
test_config_backup="${test_config_file}.bak"
if [ -f "$test_config_backup" ]; then
backup_content=$(<$test_config_backup)
if [ "$backup_content" == "$test_config_file_changed_content" ]; then
echo "deployment update test: changed config file backed up - passed"
else
echo "deployment update test: backup content incorrect - FAILED"
exit 1
fi
else
echo "deployment update test: backup file not created for changed file - FAILED"
exit 1
fi
# Verify unchanged config file was NOT backed up
test_unchanged_backup="$test_unchanged_config.bak"
if [ -f "$test_unchanged_backup" ]; then
echo "deployment update test: backup created for unchanged file - FAILED"
exit 1
else
echo "deployment update test: no backup for unchanged file - passed"
fi
# Verify the config file was updated from source
updated_config_content=$(<$test_config_file)
if [ "$updated_config_content" == "$test_config_file_original_content" ]; then
echo "deployment update test: config file updated from source - passed"
else
echo "deployment update test: config file not updated correctly - FAILED"
exit 1
fi
# Verify the data marker file still exists with original content
if [ ! -f "$test_data_marker" ]; then
echo "deployment update test: data file deleted - FAILED"
exit 1
fi
synced_marker_content=$(<$test_data_marker)
if [ "$synced_marker_content" == "$original_marker_content" ]; then
echo "deployment update test: data preserved - passed"
else
echo "deployment update test: data corrupted - FAILED"
exit 1
fi
echo "deployment update test: passed"
# Try to start the deployment # Try to start the deployment
$TEST_TARGET_SO deployment --dir $test_deployment_dir start $TEST_TARGET_SO deployment --dir $test_deployment_dir start
# Check logs command works # Check logs command works

View File

@ -125,6 +125,49 @@ fi
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
echo "deploy create output file test: passed" echo "deploy create output file test: passed"
# Test sync functionality: update deployment without destroying data
# First, create a marker file in the data directory to verify it's preserved
test_data_marker="$test_deployment_dir/data/test-data/sync-test-marker.txt"
mkdir -p "$test_deployment_dir/data/test-data"
echo "external-stack-data-$(date +%s)" > "$test_data_marker"
original_marker_content=$(<$test_data_marker)
# Verify deployment file exists and preserve its cluster ID
original_cluster_id=$(grep "cluster-id:" "$test_deployment_dir/deployment.yml" 2>/dev/null || echo "")
# Modify spec file to simulate an update
sed -i.bak 's/CERC_TEST_PARAM_1=PASSED/CERC_TEST_PARAM_1=UPDATED/' $test_deployment_spec
# Run sync to update deployment files without destroying data
$TEST_TARGET_SO_STACK deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir --update
# Verify the spec file was updated in deployment dir
updated_deployed_spec=$(<$test_deployment_dir/spec.yml)
if [[ "$updated_deployed_spec" == *"UPDATED"* ]]; then
echo "deploy sync test: spec file updated"
else
echo "deploy sync test: spec file not updated - FAILED"
exit 1
fi
# Verify the data marker file still exists with original content
if [ ! -f "$test_data_marker" ]; then
echo "deploy sync test: data file deleted - FAILED"
exit 1
fi
synced_marker_content=$(<$test_data_marker)
if [ "$synced_marker_content" == "$original_marker_content" ]; then
echo "deploy sync test: data preserved - passed"
else
echo "deploy sync test: data corrupted - FAILED"
exit 1
fi
# Verify cluster ID was preserved (not regenerated)
new_cluster_id=$(grep "cluster-id:" "$test_deployment_dir/deployment.yml" 2>/dev/null || echo "")
if [ -n "$original_cluster_id" ] && [ "$original_cluster_id" == "$new_cluster_id" ]; then
echo "deploy sync test: cluster ID preserved - passed"
else
echo "deploy sync test: cluster ID not preserved - FAILED"
exit 1
fi
echo "deploy sync test: passed"
# Try to start the deployment # Try to start the deployment
$TEST_TARGET_SO deployment --dir $test_deployment_dir start $TEST_TARGET_SO deployment --dir $test_deployment_dir start
# Check logs command works # Check logs command works