Compare commits
6 Commits
v1.1.0-21d
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 4a1b5d86fd | |||
|
|
019225ca18 | ||
| 0296da6f64 | |||
|
|
d913926144 | ||
| b41e0cb2f5 | |||
|
|
47d3d10ead |
@ -31,6 +31,7 @@ from stack_orchestrator.deploy.k8s.helpers import (
|
||||
envs_from_environment_variables_map,
|
||||
envs_from_compose_file,
|
||||
merge_envs,
|
||||
translate_sidecar_service_names,
|
||||
)
|
||||
from stack_orchestrator.deploy.deploy_util import (
|
||||
parsed_pod_files_map_from_file_names,
|
||||
@ -125,7 +126,8 @@ class ClusterInfo:
|
||||
name=(
|
||||
f"{self.app_name}-nodeport-"
|
||||
f"{pod_port}-{protocol.lower()}"
|
||||
)
|
||||
),
|
||||
labels={"app": self.app_name},
|
||||
),
|
||||
spec=client.V1ServiceSpec(
|
||||
type="NodePort",
|
||||
@ -208,7 +210,9 @@ class ClusterInfo:
|
||||
|
||||
ingress = client.V1Ingress(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=f"{self.app_name}-ingress", annotations=ingress_annotations
|
||||
name=f"{self.app_name}-ingress",
|
||||
labels={"app": self.app_name},
|
||||
annotations=ingress_annotations,
|
||||
),
|
||||
spec=spec,
|
||||
)
|
||||
@ -238,7 +242,10 @@ class ClusterInfo:
|
||||
]
|
||||
|
||||
service = client.V1Service(
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=f"{self.app_name}-service",
|
||||
labels={"app": self.app_name},
|
||||
),
|
||||
spec=client.V1ServiceSpec(
|
||||
type="ClusterIP",
|
||||
ports=service_ports,
|
||||
@ -320,7 +327,7 @@ class ClusterInfo:
|
||||
spec = client.V1ConfigMap(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=f"{self.app_name}-{cfg_map_name}",
|
||||
labels={"configmap-label": cfg_map_name},
|
||||
labels={"app": self.app_name, "configmap-label": cfg_map_name},
|
||||
),
|
||||
binary_data=data,
|
||||
)
|
||||
@ -377,7 +384,10 @@ class ClusterInfo:
|
||||
pv = client.V1PersistentVolume(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=f"{self.app_name}-{volume_name}",
|
||||
labels={"volume-label": f"{self.app_name}-{volume_name}"},
|
||||
labels={
|
||||
"app": self.app_name,
|
||||
"volume-label": f"{self.app_name}-{volume_name}",
|
||||
},
|
||||
),
|
||||
spec=spec,
|
||||
)
|
||||
@ -430,6 +440,12 @@ class ClusterInfo:
|
||||
if "environment" in service_info
|
||||
else self.environment_variables.map
|
||||
)
|
||||
# Translate docker-compose service names to localhost for sidecars
|
||||
# All services in the same pod share the network namespace
|
||||
sibling_services = [s for s in services.keys() if s != service_name]
|
||||
merged_envs = translate_sidecar_service_names(
|
||||
merged_envs, sibling_services
|
||||
)
|
||||
envs = envs_from_environment_variables_map(merged_envs)
|
||||
if opts.o.debug:
|
||||
print(f"Merged envs: {envs}")
|
||||
|
||||
@ -96,7 +96,7 @@ class K8sDeployer(Deployer):
|
||||
core_api: client.CoreV1Api
|
||||
apps_api: client.AppsV1Api
|
||||
networking_api: client.NetworkingV1Api
|
||||
k8s_namespace: str = "default"
|
||||
k8s_namespace: str
|
||||
kind_cluster_name: str
|
||||
skip_cluster_management: bool
|
||||
cluster_info: ClusterInfo
|
||||
@ -113,6 +113,7 @@ class K8sDeployer(Deployer):
|
||||
) -> None:
|
||||
self.type = type
|
||||
self.skip_cluster_management = False
|
||||
self.k8s_namespace = "default" # Will be overridden below if context exists
|
||||
# TODO: workaround pending refactoring above to cope with being
|
||||
# created with a null deployment_context
|
||||
if deployment_context is None:
|
||||
@ -120,6 +121,8 @@ class K8sDeployer(Deployer):
|
||||
self.deployment_dir = deployment_context.deployment_dir
|
||||
self.deployment_context = deployment_context
|
||||
self.kind_cluster_name = compose_project_name
|
||||
# Use deployment-specific namespace for resource isolation and easy cleanup
|
||||
self.k8s_namespace = f"laconic-{compose_project_name}"
|
||||
self.cluster_info = ClusterInfo()
|
||||
self.cluster_info.int(
|
||||
compose_files,
|
||||
@ -149,6 +152,46 @@ class K8sDeployer(Deployer):
|
||||
self.apps_api = client.AppsV1Api()
|
||||
self.custom_obj_api = client.CustomObjectsApi()
|
||||
|
||||
def _ensure_namespace(self):
|
||||
"""Create the deployment namespace if it doesn't exist."""
|
||||
if opts.o.dry_run:
|
||||
print(f"Dry run: would create namespace {self.k8s_namespace}")
|
||||
return
|
||||
try:
|
||||
self.core_api.read_namespace(name=self.k8s_namespace)
|
||||
if opts.o.debug:
|
||||
print(f"Namespace {self.k8s_namespace} already exists")
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
# Create the namespace
|
||||
ns = client.V1Namespace(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=self.k8s_namespace,
|
||||
labels={"app": self.cluster_info.app_name},
|
||||
)
|
||||
)
|
||||
self.core_api.create_namespace(body=ns)
|
||||
if opts.o.debug:
|
||||
print(f"Created namespace {self.k8s_namespace}")
|
||||
else:
|
||||
raise
|
||||
|
||||
def _delete_namespace(self):
|
||||
"""Delete the deployment namespace and all resources within it."""
|
||||
if opts.o.dry_run:
|
||||
print(f"Dry run: would delete namespace {self.k8s_namespace}")
|
||||
return
|
||||
try:
|
||||
self.core_api.delete_namespace(name=self.k8s_namespace)
|
||||
if opts.o.debug:
|
||||
print(f"Deleted namespace {self.k8s_namespace}")
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
if opts.o.debug:
|
||||
print(f"Namespace {self.k8s_namespace} not found")
|
||||
else:
|
||||
raise
|
||||
|
||||
def _create_volume_data(self):
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
@ -314,6 +357,8 @@ class K8sDeployer(Deployer):
|
||||
load_images_into_kind(self.kind_cluster_name, local_images)
|
||||
# Note: if no local containers defined, all images come from registries
|
||||
self.connect_api()
|
||||
# Create deployment-specific namespace for resource isolation
|
||||
self._ensure_namespace()
|
||||
if self.is_kind() and not self.skip_cluster_management:
|
||||
# Configure ingress controller (not installed by default in kind)
|
||||
# Skip if already running (idempotent for shared cluster)
|
||||
@ -381,107 +426,30 @@ class K8sDeployer(Deployer):
|
||||
print("NodePort created:")
|
||||
print(f"{nodeport_resp}")
|
||||
|
||||
def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
|
||||
def down(self, timeout, volumes, skip_cluster_management):
|
||||
self.skip_cluster_management = skip_cluster_management
|
||||
self.connect_api()
|
||||
# Delete the k8s objects
|
||||
|
||||
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
|
||||
if volumes:
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
for pv in pvs:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pv: {pv}")
|
||||
try:
|
||||
pv_resp = self.core_api.delete_persistent_volume(
|
||||
name=pv.metadata.name
|
||||
)
|
||||
try:
|
||||
pvs = self.core_api.list_persistent_volume(
|
||||
label_selector=f"app={self.cluster_info.app_name}"
|
||||
)
|
||||
for pv in pvs.items:
|
||||
if opts.o.debug:
|
||||
print("PV deleted:")
|
||||
print(f"{pv_resp}")
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
# Figure out the PVCs for this deployment
|
||||
pvcs = self.cluster_info.get_pvcs()
|
||||
for pvc in pvcs:
|
||||
print(f"Deleting PV: {pv.metadata.name}")
|
||||
try:
|
||||
self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
except ApiException as e:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pvc: {pvc}")
|
||||
try:
|
||||
pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(
|
||||
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("PVCs deleted:")
|
||||
print(f"{pvc_resp}")
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
print(f"Error listing PVs: {e}")
|
||||
|
||||
# Figure out the ConfigMaps for this deployment
|
||||
cfg_maps = self.cluster_info.get_configmaps()
|
||||
for cfg_map in cfg_maps:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this ConfigMap: {cfg_map}")
|
||||
try:
|
||||
cfg_map_resp = self.core_api.delete_namespaced_config_map(
|
||||
name=cfg_map.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("ConfigMap deleted:")
|
||||
print(f"{cfg_map_resp}")
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
deployment = self.cluster_info.get_deployment()
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this deployment: {deployment}")
|
||||
if deployment and deployment.metadata and deployment.metadata.name:
|
||||
try:
|
||||
self.apps_api.delete_namespaced_deployment(
|
||||
name=deployment.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
service = self.cluster_info.get_service()
|
||||
if opts.o.debug:
|
||||
print(f"Deleting service: {service}")
|
||||
if service and service.metadata and service.metadata.name:
|
||||
try:
|
||||
self.core_api.delete_namespaced_service(
|
||||
namespace=self.k8s_namespace, name=service.metadata.name
|
||||
)
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||
if ingress and ingress.metadata and ingress.metadata.name:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this ingress: {ingress}")
|
||||
try:
|
||||
self.networking_api.delete_namespaced_ingress(
|
||||
name=ingress.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
else:
|
||||
if opts.o.debug:
|
||||
print("No ingress to delete")
|
||||
|
||||
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
|
||||
for nodeport in nodeports:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this nodeport: {nodeport}")
|
||||
if nodeport.metadata and nodeport.metadata.name:
|
||||
try:
|
||||
self.core_api.delete_namespaced_service(
|
||||
namespace=self.k8s_namespace, name=nodeport.metadata.name
|
||||
)
|
||||
except ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
else:
|
||||
if opts.o.debug:
|
||||
print("No nodeport to delete")
|
||||
# Delete the deployment namespace - this cascades to all namespaced resources
|
||||
# (PVCs, ConfigMaps, Deployments, Services, Ingresses, etc.)
|
||||
self._delete_namespace()
|
||||
|
||||
if self.is_kind() and not self.skip_cluster_management:
|
||||
# Destroy the kind cluster
|
||||
@ -619,7 +587,7 @@ class K8sDeployer(Deployer):
|
||||
log_data = ""
|
||||
for container in containers:
|
||||
container_log = self.core_api.read_namespaced_pod_log(
|
||||
k8s_pod_name, namespace="default", container=container
|
||||
k8s_pod_name, namespace=self.k8s_namespace, container=container
|
||||
)
|
||||
container_log_lines = container_log.splitlines()
|
||||
for line in container_log_lines:
|
||||
|
||||
@ -942,6 +942,41 @@ def envs_from_compose_file(
|
||||
return result
|
||||
|
||||
|
||||
def translate_sidecar_service_names(
|
||||
envs: Mapping[str, str], sibling_service_names: List[str]
|
||||
) -> Mapping[str, str]:
|
||||
"""Translate docker-compose service names to localhost for sidecar containers.
|
||||
|
||||
In docker-compose, services can reference each other by name (e.g., 'db:5432').
|
||||
In Kubernetes, when multiple containers are in the same pod (sidecars), they
|
||||
share the same network namespace and must use 'localhost' instead.
|
||||
|
||||
This function replaces service name references with 'localhost' in env values.
|
||||
"""
|
||||
import re
|
||||
|
||||
if not sibling_service_names:
|
||||
return envs
|
||||
|
||||
result = {}
|
||||
for env_var, env_val in envs.items():
|
||||
if env_val is None:
|
||||
result[env_var] = env_val
|
||||
continue
|
||||
|
||||
new_val = str(env_val)
|
||||
for service_name in sibling_service_names:
|
||||
# Match service name followed by optional port (e.g., 'db:5432', 'db')
|
||||
# Handle URLs like: postgres://user:pass@db:5432/dbname
|
||||
# and simple refs like: db:5432 or just db
|
||||
pattern = rf"\b{re.escape(service_name)}(:\d+)?\b"
|
||||
new_val = re.sub(pattern, lambda m: f'localhost{m.group(1) or ""}', new_val)
|
||||
|
||||
result[env_var] = new_val
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def envs_from_environment_variables_map(
|
||||
map: Mapping[str, str]
|
||||
) -> List[client.V1EnvVar]:
|
||||
|
||||
@ -128,9 +128,6 @@ class Spec:
|
||||
def get_http_proxy(self):
|
||||
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
||||
|
||||
def get_acme_email(self):
|
||||
return self.obj.get(constants.network_key, {}).get("acme-email", "")
|
||||
|
||||
def get_annotations(self):
|
||||
return self.obj.get(constants.annotations_key, {})
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user