cv-c3c: add --image flag to deployment restart command
Allows callers to override container images during restart, e.g.: laconic-so deployment restart --image backend=ghcr.io/org/app:sha123 The override is applied to the k8s Deployment spec before create-or-patch. Docker/compose deployers accept the parameter but ignore it. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
0e4ecc3602
commit
4b1fc27a1e
@ -48,7 +48,7 @@ class DockerDeployer(Deployer):
|
||||
self.compose_project_name = compose_project_name
|
||||
self.compose_env_file = compose_env_file
|
||||
|
||||
def up(self, detach, skip_cluster_management, services):
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.up(detach=detach, services=services)
|
||||
|
||||
@ -137,7 +137,11 @@ def create_deploy_context(
|
||||
|
||||
|
||||
def up_operation(
|
||||
ctx, services_list, stay_attached=False, skip_cluster_management=False
|
||||
ctx,
|
||||
services_list,
|
||||
stay_attached=False,
|
||||
skip_cluster_management=False,
|
||||
image_overrides=None,
|
||||
):
|
||||
global_context = ctx.parent.parent.obj
|
||||
deploy_context = ctx.obj
|
||||
@ -156,6 +160,7 @@ def up_operation(
|
||||
detach=not stay_attached,
|
||||
skip_cluster_management=skip_cluster_management,
|
||||
services=services_list,
|
||||
image_overrides=image_overrides,
|
||||
)
|
||||
for post_start_command in cluster_context.post_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||
|
||||
@ -20,7 +20,7 @@ from typing import Optional
|
||||
|
||||
class Deployer(ABC):
|
||||
@abstractmethod
|
||||
def up(self, detach, skip_cluster_management, services):
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@ -248,8 +248,13 @@ def run_job(ctx, job_name, helm_release):
|
||||
"--expected-ip",
|
||||
help="Expected IP for DNS verification (if different from egress)",
|
||||
)
|
||||
@click.option(
|
||||
"--image",
|
||||
multiple=True,
|
||||
help="Override container image: container=image",
|
||||
)
|
||||
@click.pass_context
|
||||
def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
|
||||
def restart(ctx, stack_path, spec_file, config_file, force, expected_ip, image):
|
||||
"""Pull latest code and restart deployment using git-tracked spec.
|
||||
|
||||
GitOps workflow:
|
||||
@ -276,6 +281,17 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
|
||||
|
||||
deployment_context: DeploymentContext = ctx.obj
|
||||
|
||||
# Parse --image flags into a dict of container_name -> image
|
||||
image_overrides = {}
|
||||
for entry in image:
|
||||
if "=" not in entry:
|
||||
raise click.BadParameter(
|
||||
f"Invalid --image format '{entry}', expected container=image",
|
||||
param_hint="'--image'",
|
||||
)
|
||||
container_name, image_ref = entry.split("=", 1)
|
||||
image_overrides[container_name] = image_ref
|
||||
|
||||
# Get current spec info (before git pull)
|
||||
current_spec = deployment_context.spec
|
||||
current_http_proxy = current_spec.get_http_proxy()
|
||||
@ -389,7 +405,11 @@ def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
|
||||
print("\n[4/4] Applying deployment update...")
|
||||
ctx.obj = make_deploy_context(ctx)
|
||||
up_operation(
|
||||
ctx, services_list=None, stay_attached=False, skip_cluster_management=True
|
||||
ctx,
|
||||
services_list=None,
|
||||
stay_attached=False,
|
||||
skip_cluster_management=True,
|
||||
image_overrides=image_overrides or None,
|
||||
)
|
||||
|
||||
print("\n=== Restart Complete ===")
|
||||
|
||||
@ -115,6 +115,7 @@ class K8sDeployer(Deployer):
|
||||
) -> None:
|
||||
self.type = type
|
||||
self.skip_cluster_management = False
|
||||
self.image_overrides = None
|
||||
self.k8s_namespace = "default" # Will be overridden below if context exists
|
||||
# TODO: workaround pending refactoring above to cope with being
|
||||
# created with a null deployment_context
|
||||
@ -122,9 +123,13 @@ class K8sDeployer(Deployer):
|
||||
return
|
||||
self.deployment_dir = deployment_context.deployment_dir
|
||||
self.deployment_context = deployment_context
|
||||
self.kind_cluster_name = deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
||||
self.kind_cluster_name = (
|
||||
deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
||||
)
|
||||
# Use spec namespace if provided, otherwise derive from cluster-id
|
||||
self.k8s_namespace = deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
|
||||
self.k8s_namespace = (
|
||||
deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
|
||||
)
|
||||
self.cluster_info = ClusterInfo()
|
||||
# stack.name may be an absolute path (from spec "stack:" key after
|
||||
# path resolution). Extract just the directory basename for labels.
|
||||
@ -269,7 +274,8 @@ class K8sDeployer(Deployer):
|
||||
for job in jobs.items:
|
||||
print(f"Deleting Job {job.metadata.name}")
|
||||
self.batch_api.delete_namespaced_job(
|
||||
name=job.metadata.name, namespace=ns,
|
||||
name=job.metadata.name,
|
||||
namespace=ns,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
except ApiException as e:
|
||||
@ -406,9 +412,16 @@ class K8sDeployer(Deployer):
|
||||
print("No pods defined, skipping Deployment creation")
|
||||
return
|
||||
# Process compose files into a Deployment
|
||||
deployment = self.cluster_info.get_deployment(
|
||||
image_pull_policy="Always"
|
||||
)
|
||||
deployment = self.cluster_info.get_deployment(image_pull_policy="Always")
|
||||
# Apply image overrides if provided
|
||||
if self.image_overrides:
|
||||
for container in deployment.spec.template.spec.containers:
|
||||
if container.name in self.image_overrides:
|
||||
container.image = self.image_overrides[container.name]
|
||||
if opts.o.debug:
|
||||
print(
|
||||
f"Overriding image for {container.name}: {container.image}"
|
||||
)
|
||||
# Create or update the k8s Deployment
|
||||
if opts.o.debug:
|
||||
print(f"Sending this deployment: {deployment}")
|
||||
@ -470,9 +483,7 @@ class K8sDeployer(Deployer):
|
||||
|
||||
def _create_jobs(self):
|
||||
# Process job compose files into k8s Jobs
|
||||
jobs = self.cluster_info.get_jobs(
|
||||
image_pull_policy="Always"
|
||||
)
|
||||
jobs = self.cluster_info.get_jobs(image_pull_policy="Always")
|
||||
for job in jobs:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this job: {job}")
|
||||
@ -524,7 +535,8 @@ class K8sDeployer(Deployer):
|
||||
return cert
|
||||
return None
|
||||
|
||||
def up(self, detach, skip_cluster_management, services):
|
||||
def up(self, detach, skip_cluster_management, services, image_overrides=None):
|
||||
self.image_overrides = image_overrides
|
||||
self.skip_cluster_management = skip_cluster_management
|
||||
if not opts.o.dry_run:
|
||||
if self.is_kind() and not self.skip_cluster_management:
|
||||
@ -646,7 +658,10 @@ class K8sDeployer(Deployer):
|
||||
|
||||
# Call start() hooks — stacks can create additional k8s resources
|
||||
if self.deployment_context:
|
||||
from stack_orchestrator.deploy.deployment_create import call_stack_deploy_start
|
||||
from stack_orchestrator.deploy.deployment_create import (
|
||||
call_stack_deploy_start,
|
||||
)
|
||||
|
||||
call_stack_deploy_start(self.deployment_context)
|
||||
|
||||
def down(self, timeout, volumes, skip_cluster_management):
|
||||
@ -658,9 +673,7 @@ class K8sDeployer(Deployer):
|
||||
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
|
||||
if volumes:
|
||||
try:
|
||||
pvs = self.core_api.list_persistent_volume(
|
||||
label_selector=app_label
|
||||
)
|
||||
pvs = self.core_api.list_persistent_volume(label_selector=app_label)
|
||||
for pv in pvs.items:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting PV: {pv.metadata.name}")
|
||||
@ -804,14 +817,18 @@ class K8sDeployer(Deployer):
|
||||
|
||||
def logs(self, services, tail, follow, stream):
|
||||
self.connect_api()
|
||||
pods = pods_in_deployment(self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace)
|
||||
pods = pods_in_deployment(
|
||||
self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace
|
||||
)
|
||||
if len(pods) > 1:
|
||||
print("Warning: more than one pod in the deployment")
|
||||
if len(pods) == 0:
|
||||
log_data = "******* Pods not running ********\n"
|
||||
else:
|
||||
k8s_pod_name = pods[0]
|
||||
containers = containers_in_pod(self.core_api, k8s_pod_name, namespace=self.k8s_namespace)
|
||||
containers = containers_in_pod(
|
||||
self.core_api, k8s_pod_name, namespace=self.k8s_namespace
|
||||
)
|
||||
# If pod not started, logs request below will throw an exception
|
||||
try:
|
||||
log_data = ""
|
||||
@ -910,9 +927,7 @@ class K8sDeployer(Deployer):
|
||||
else:
|
||||
# Non-Helm path: create job from ClusterInfo
|
||||
self.connect_api()
|
||||
jobs = self.cluster_info.get_jobs(
|
||||
image_pull_policy="Always"
|
||||
)
|
||||
jobs = self.cluster_info.get_jobs(image_pull_policy="Always")
|
||||
# Find the matching job by name
|
||||
target_name = f"{self.cluster_info.app_name}-job-{job_name}"
|
||||
matched_job = None
|
||||
|
||||
Loading…
Reference in New Issue
Block a user