Compare commits
No commits in common. "main" and "fix-down-cleanup-by-label" have entirely different histories.
main
...
fix-down-c
@ -31,7 +31,6 @@ from stack_orchestrator.deploy.k8s.helpers import (
|
|||||||
envs_from_environment_variables_map,
|
envs_from_environment_variables_map,
|
||||||
envs_from_compose_file,
|
envs_from_compose_file,
|
||||||
merge_envs,
|
merge_envs,
|
||||||
translate_sidecar_service_names,
|
|
||||||
)
|
)
|
||||||
from stack_orchestrator.deploy.deploy_util import (
|
from stack_orchestrator.deploy.deploy_util import (
|
||||||
parsed_pod_files_map_from_file_names,
|
parsed_pod_files_map_from_file_names,
|
||||||
@ -440,12 +439,6 @@ class ClusterInfo:
|
|||||||
if "environment" in service_info
|
if "environment" in service_info
|
||||||
else self.environment_variables.map
|
else self.environment_variables.map
|
||||||
)
|
)
|
||||||
# Translate docker-compose service names to localhost for sidecars
|
|
||||||
# All services in the same pod share the network namespace
|
|
||||||
sibling_services = [s for s in services.keys() if s != service_name]
|
|
||||||
merged_envs = translate_sidecar_service_names(
|
|
||||||
merged_envs, sibling_services
|
|
||||||
)
|
|
||||||
envs = envs_from_environment_variables_map(merged_envs)
|
envs = envs_from_environment_variables_map(merged_envs)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Merged envs: {envs}")
|
print(f"Merged envs: {envs}")
|
||||||
|
|||||||
@ -96,7 +96,7 @@ class K8sDeployer(Deployer):
|
|||||||
core_api: client.CoreV1Api
|
core_api: client.CoreV1Api
|
||||||
apps_api: client.AppsV1Api
|
apps_api: client.AppsV1Api
|
||||||
networking_api: client.NetworkingV1Api
|
networking_api: client.NetworkingV1Api
|
||||||
k8s_namespace: str
|
k8s_namespace: str = "default"
|
||||||
kind_cluster_name: str
|
kind_cluster_name: str
|
||||||
skip_cluster_management: bool
|
skip_cluster_management: bool
|
||||||
cluster_info: ClusterInfo
|
cluster_info: ClusterInfo
|
||||||
@ -113,7 +113,6 @@ class K8sDeployer(Deployer):
|
|||||||
) -> None:
|
) -> None:
|
||||||
self.type = type
|
self.type = type
|
||||||
self.skip_cluster_management = False
|
self.skip_cluster_management = False
|
||||||
self.k8s_namespace = "default" # Will be overridden below if context exists
|
|
||||||
# TODO: workaround pending refactoring above to cope with being
|
# TODO: workaround pending refactoring above to cope with being
|
||||||
# created with a null deployment_context
|
# created with a null deployment_context
|
||||||
if deployment_context is None:
|
if deployment_context is None:
|
||||||
@ -121,8 +120,6 @@ class K8sDeployer(Deployer):
|
|||||||
self.deployment_dir = deployment_context.deployment_dir
|
self.deployment_dir = deployment_context.deployment_dir
|
||||||
self.deployment_context = deployment_context
|
self.deployment_context = deployment_context
|
||||||
self.kind_cluster_name = compose_project_name
|
self.kind_cluster_name = compose_project_name
|
||||||
# Use deployment-specific namespace for resource isolation and easy cleanup
|
|
||||||
self.k8s_namespace = f"laconic-{compose_project_name}"
|
|
||||||
self.cluster_info = ClusterInfo()
|
self.cluster_info = ClusterInfo()
|
||||||
self.cluster_info.int(
|
self.cluster_info.int(
|
||||||
compose_files,
|
compose_files,
|
||||||
@ -152,46 +149,6 @@ class K8sDeployer(Deployer):
|
|||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
self.custom_obj_api = client.CustomObjectsApi()
|
self.custom_obj_api = client.CustomObjectsApi()
|
||||||
|
|
||||||
def _ensure_namespace(self):
|
|
||||||
"""Create the deployment namespace if it doesn't exist."""
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Dry run: would create namespace {self.k8s_namespace}")
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
self.core_api.read_namespace(name=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Namespace {self.k8s_namespace} already exists")
|
|
||||||
except ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
# Create the namespace
|
|
||||||
ns = client.V1Namespace(
|
|
||||||
metadata=client.V1ObjectMeta(
|
|
||||||
name=self.k8s_namespace,
|
|
||||||
labels={"app": self.cluster_info.app_name},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self.core_api.create_namespace(body=ns)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Created namespace {self.k8s_namespace}")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _delete_namespace(self):
|
|
||||||
"""Delete the deployment namespace and all resources within it."""
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Dry run: would delete namespace {self.k8s_namespace}")
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
self.core_api.delete_namespace(name=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Deleted namespace {self.k8s_namespace}")
|
|
||||||
except ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Namespace {self.k8s_namespace} not found")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _create_volume_data(self):
|
def _create_volume_data(self):
|
||||||
# Create the host-path-mounted PVs for this deployment
|
# Create the host-path-mounted PVs for this deployment
|
||||||
pvs = self.cluster_info.get_pvs()
|
pvs = self.cluster_info.get_pvs()
|
||||||
@ -357,8 +314,6 @@ class K8sDeployer(Deployer):
|
|||||||
load_images_into_kind(self.kind_cluster_name, local_images)
|
load_images_into_kind(self.kind_cluster_name, local_images)
|
||||||
# Note: if no local containers defined, all images come from registries
|
# Note: if no local containers defined, all images come from registries
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
# Create deployment-specific namespace for resource isolation
|
|
||||||
self._ensure_namespace()
|
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Configure ingress controller (not installed by default in kind)
|
# Configure ingress controller (not installed by default in kind)
|
||||||
# Skip if already running (idempotent for shared cluster)
|
# Skip if already running (idempotent for shared cluster)
|
||||||
@ -426,12 +381,17 @@ class K8sDeployer(Deployer):
|
|||||||
print("NodePort created:")
|
print("NodePort created:")
|
||||||
print(f"{nodeport_resp}")
|
print(f"{nodeport_resp}")
|
||||||
|
|
||||||
def down(self, timeout, volumes, skip_cluster_management):
|
def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
|
||||||
self.skip_cluster_management = skip_cluster_management
|
self.skip_cluster_management = skip_cluster_management
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
|
|
||||||
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
|
# Query K8s for resources by label selector instead of generating names
|
||||||
|
# from config. This ensures we clean up orphaned resources when deployment
|
||||||
|
# IDs change (e.g., after force_redeploy).
|
||||||
|
label_selector = f"app={self.cluster_info.app_name}"
|
||||||
|
|
||||||
if volumes:
|
if volumes:
|
||||||
|
# Delete PVs for this deployment (PVs use volume-label pattern)
|
||||||
try:
|
try:
|
||||||
pvs = self.core_api.list_persistent_volume(
|
pvs = self.core_api.list_persistent_volume(
|
||||||
label_selector=f"app={self.cluster_info.app_name}"
|
label_selector=f"app={self.cluster_info.app_name}"
|
||||||
@ -447,9 +407,97 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error listing PVs: {e}")
|
print(f"Error listing PVs: {e}")
|
||||||
|
|
||||||
# Delete the deployment namespace - this cascades to all namespaced resources
|
# Delete PVCs for this deployment
|
||||||
# (PVCs, ConfigMaps, Deployments, Services, Ingresses, etc.)
|
try:
|
||||||
self._delete_namespace()
|
pvcs = self.core_api.list_namespaced_persistent_volume_claim(
|
||||||
|
namespace=self.k8s_namespace, label_selector=label_selector
|
||||||
|
)
|
||||||
|
for pvc in pvcs.items:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting PVC: {pvc.metadata.name}")
|
||||||
|
try:
|
||||||
|
self.core_api.delete_namespaced_persistent_volume_claim(
|
||||||
|
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
except ApiException as e:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Error listing PVCs: {e}")
|
||||||
|
|
||||||
|
# Delete ConfigMaps for this deployment
|
||||||
|
try:
|
||||||
|
cfg_maps = self.core_api.list_namespaced_config_map(
|
||||||
|
namespace=self.k8s_namespace, label_selector=label_selector
|
||||||
|
)
|
||||||
|
for cfg_map in cfg_maps.items:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting ConfigMap: {cfg_map.metadata.name}")
|
||||||
|
try:
|
||||||
|
self.core_api.delete_namespaced_config_map(
|
||||||
|
name=cfg_map.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
except ApiException as e:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Error listing ConfigMaps: {e}")
|
||||||
|
|
||||||
|
# Delete Deployments for this deployment
|
||||||
|
try:
|
||||||
|
deployments = self.apps_api.list_namespaced_deployment(
|
||||||
|
namespace=self.k8s_namespace, label_selector=label_selector
|
||||||
|
)
|
||||||
|
for deployment in deployments.items:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting Deployment: {deployment.metadata.name}")
|
||||||
|
try:
|
||||||
|
self.apps_api.delete_namespaced_deployment(
|
||||||
|
name=deployment.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
except ApiException as e:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Error listing Deployments: {e}")
|
||||||
|
|
||||||
|
# Delete Services for this deployment (includes both ClusterIP and NodePort)
|
||||||
|
try:
|
||||||
|
services = self.core_api.list_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace, label_selector=label_selector
|
||||||
|
)
|
||||||
|
for service in services.items:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting Service: {service.metadata.name}")
|
||||||
|
try:
|
||||||
|
self.core_api.delete_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace, name=service.metadata.name
|
||||||
|
)
|
||||||
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
except ApiException as e:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Error listing Services: {e}")
|
||||||
|
|
||||||
|
# Delete Ingresses for this deployment
|
||||||
|
try:
|
||||||
|
ingresses = self.networking_api.list_namespaced_ingress(
|
||||||
|
namespace=self.k8s_namespace, label_selector=label_selector
|
||||||
|
)
|
||||||
|
for ingress in ingresses.items:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting Ingress: {ingress.metadata.name}")
|
||||||
|
try:
|
||||||
|
self.networking_api.delete_namespaced_ingress(
|
||||||
|
name=ingress.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
if not ingresses.items and opts.o.debug:
|
||||||
|
print("No ingress to delete")
|
||||||
|
except ApiException as e:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Error listing Ingresses: {e}")
|
||||||
|
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Destroy the kind cluster
|
# Destroy the kind cluster
|
||||||
@ -587,7 +635,7 @@ class K8sDeployer(Deployer):
|
|||||||
log_data = ""
|
log_data = ""
|
||||||
for container in containers:
|
for container in containers:
|
||||||
container_log = self.core_api.read_namespaced_pod_log(
|
container_log = self.core_api.read_namespaced_pod_log(
|
||||||
k8s_pod_name, namespace=self.k8s_namespace, container=container
|
k8s_pod_name, namespace="default", container=container
|
||||||
)
|
)
|
||||||
container_log_lines = container_log.splitlines()
|
container_log_lines = container_log.splitlines()
|
||||||
for line in container_log_lines:
|
for line in container_log_lines:
|
||||||
|
|||||||
@ -942,41 +942,6 @@ def envs_from_compose_file(
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def translate_sidecar_service_names(
|
|
||||||
envs: Mapping[str, str], sibling_service_names: List[str]
|
|
||||||
) -> Mapping[str, str]:
|
|
||||||
"""Translate docker-compose service names to localhost for sidecar containers.
|
|
||||||
|
|
||||||
In docker-compose, services can reference each other by name (e.g., 'db:5432').
|
|
||||||
In Kubernetes, when multiple containers are in the same pod (sidecars), they
|
|
||||||
share the same network namespace and must use 'localhost' instead.
|
|
||||||
|
|
||||||
This function replaces service name references with 'localhost' in env values.
|
|
||||||
"""
|
|
||||||
import re
|
|
||||||
|
|
||||||
if not sibling_service_names:
|
|
||||||
return envs
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for env_var, env_val in envs.items():
|
|
||||||
if env_val is None:
|
|
||||||
result[env_var] = env_val
|
|
||||||
continue
|
|
||||||
|
|
||||||
new_val = str(env_val)
|
|
||||||
for service_name in sibling_service_names:
|
|
||||||
# Match service name followed by optional port (e.g., 'db:5432', 'db')
|
|
||||||
# Handle URLs like: postgres://user:pass@db:5432/dbname
|
|
||||||
# and simple refs like: db:5432 or just db
|
|
||||||
pattern = rf"\b{re.escape(service_name)}(:\d+)?\b"
|
|
||||||
new_val = re.sub(pattern, lambda m: f'localhost{m.group(1) or ""}', new_val)
|
|
||||||
|
|
||||||
result[env_var] = new_val
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def envs_from_environment_variables_map(
|
def envs_from_environment_variables_map(
|
||||||
map: Mapping[str, str]
|
map: Mapping[str, str]
|
||||||
) -> List[client.V1EnvVar]:
|
) -> List[client.V1EnvVar]:
|
||||||
|
|||||||
@ -128,6 +128,9 @@ class Spec:
|
|||||||
def get_http_proxy(self):
|
def get_http_proxy(self):
|
||||||
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
||||||
|
|
||||||
|
def get_acme_email(self):
|
||||||
|
return self.obj.get(constants.network_key, {}).get("acme-email", "")
|
||||||
|
|
||||||
def get_annotations(self):
|
def get_annotations(self):
|
||||||
return self.obj.get(constants.annotations_key, {})
|
return self.obj.get(constants.annotations_key, {})
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user