Compare commits

..

5 Commits

Author SHA1 Message Date
4a1b5d86fd Merge pull request 'fix(k8s): translate service names to localhost for sidecar containers' (#989) from fix-sidecar-localhost into main
Some checks failed
Lint Checks / Run linter (push) Successful in 16s
Publish / Build and publish (push) Successful in 29s
Deploy Test / Run deploy test suite (push) Successful in 2m10s
Webapp Test / Run webapp test suite (push) Successful in 3m51s
Smoke Test / Run basic test suite (push) Successful in 3m51s
Database Test / Run database hosting test on kind/k8s (push) Failing after 2m6s
Container Registry Test / Run contaier registry hosting test on kind/k8s (push) Failing after 2m36s
External Stack Test / Run external stack test suite (push) Failing after 1m59s
Fixturenet-Laconicd-Test / Run Laconicd fixturenet and Laconic CLI tests (push) Successful in 20m41s
K8s Deploy Test / Run deploy test suite on kind/k8s (push) Failing after 5m57s
Reviewed-on: #989
2026-02-03 23:13:27 +00:00
A. F. Dudley
019225ca18 fix(k8s): translate service names to localhost for sidecar containers
Some checks failed
Lint Checks / Run linter (push) Failing after 3s
Lint Checks / Run linter (pull_request) Failing after 4s
Deploy Test / Run deploy test suite (pull_request) Failing after 4s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Failing after 5s
K8s Deployment Control Test / Run deployment control suite on kind/k8s (pull_request) Failing after 4s
Webapp Test / Run webapp test suite (pull_request) Failing after 5s
Smoke Test / Run basic test suite (pull_request) Failing after 5s
In docker-compose, services can reference each other by name (e.g., 'db:5432').
In Kubernetes, when multiple containers are in the same pod (sidecars), they
share the same network namespace and must use 'localhost' instead.

This fix adds translate_sidecar_service_names() which replaces docker-compose
service name references with 'localhost' in environment variable values for
containers that share the same pod.

Fixes issue where multi-container pods fail because one container tries to
connect to a sibling using the compose service name instead of localhost.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-03 18:10:32 -05:00
0296da6f64 Merge pull request 'feat(k8s): namespace-per-deployment for resource isolation and cleanup' (#988) from feat-namespace-per-deployment into main
Some checks failed
Lint Checks / Run linter (push) Failing after 5s
Deploy Test / Run deploy test suite (push) Failing after 5s
Publish / Build and publish (push) Failing after 6s
Webapp Test / Run webapp test suite (push) Failing after 5s
Smoke Test / Run basic test suite (push) Failing after 5s
Reviewed-on: #988
2026-02-03 23:09:16 +00:00
A. F. Dudley
d913926144 feat(k8s): namespace-per-deployment for resource isolation and cleanup
Some checks failed
Lint Checks / Run linter (push) Failing after 4s
Deploy Test / Run deploy test suite (pull_request) Failing after 5s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Failing after 5s
K8s Deployment Control Test / Run deployment control suite on kind/k8s (pull_request) Failing after 5s
Webapp Test / Run webapp test suite (pull_request) Failing after 5s
Smoke Test / Run basic test suite (pull_request) Failing after 4s
Lint Checks / Run linter (pull_request) Failing after 3s
Each deployment now gets its own Kubernetes namespace (laconic-{deployment_id}).
This provides:
- Resource isolation between deployments on the same cluster
- Simplified cleanup: deleting the namespace cascades to all namespaced resources
- No orphaned resources possible when deployment IDs change

Changes:
- Set k8s_namespace based on deployment name in __init__
- Add _ensure_namespace() to create namespace before deploying resources
- Add _delete_namespace() for cleanup
- Simplify down() to just delete PVs (cluster-scoped) and the namespace
- Fix hardcoded "default" namespace in logs function

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-03 18:04:52 -05:00
b41e0cb2f5 Merge pull request 'fix(k8s): query resources by label in down() for proper cleanup' (#987) from fix-down-cleanup-by-label into main
Some checks failed
Lint Checks / Run linter (push) Failing after 17s
Publish / Build and publish (push) Successful in 27s
Deploy Test / Run deploy test suite (push) Successful in 2m13s
Smoke Test / Run basic test suite (push) Successful in 3m54s
Webapp Test / Run webapp test suite (push) Successful in 4m13s
Reviewed-on: #987
2026-02-03 22:57:52 +00:00
4 changed files with 94 additions and 103 deletions

View File

@ -31,6 +31,7 @@ from stack_orchestrator.deploy.k8s.helpers import (
envs_from_environment_variables_map,
envs_from_compose_file,
merge_envs,
translate_sidecar_service_names,
)
from stack_orchestrator.deploy.deploy_util import (
parsed_pod_files_map_from_file_names,
@ -439,6 +440,12 @@ class ClusterInfo:
if "environment" in service_info
else self.environment_variables.map
)
# Translate docker-compose service names to localhost for sidecars
# All services in the same pod share the network namespace
sibling_services = [s for s in services.keys() if s != service_name]
merged_envs = translate_sidecar_service_names(
merged_envs, sibling_services
)
envs = envs_from_environment_variables_map(merged_envs)
if opts.o.debug:
print(f"Merged envs: {envs}")

View File

@ -96,7 +96,7 @@ class K8sDeployer(Deployer):
core_api: client.CoreV1Api
apps_api: client.AppsV1Api
networking_api: client.NetworkingV1Api
k8s_namespace: str = "default"
k8s_namespace: str
kind_cluster_name: str
skip_cluster_management: bool
cluster_info: ClusterInfo
@ -113,6 +113,7 @@ class K8sDeployer(Deployer):
) -> None:
self.type = type
self.skip_cluster_management = False
self.k8s_namespace = "default" # Will be overridden below if context exists
# TODO: workaround pending refactoring above to cope with being
# created with a null deployment_context
if deployment_context is None:
@ -120,6 +121,8 @@ class K8sDeployer(Deployer):
self.deployment_dir = deployment_context.deployment_dir
self.deployment_context = deployment_context
self.kind_cluster_name = compose_project_name
# Use deployment-specific namespace for resource isolation and easy cleanup
self.k8s_namespace = f"laconic-{compose_project_name}"
self.cluster_info = ClusterInfo()
self.cluster_info.int(
compose_files,
@ -149,6 +152,46 @@ class K8sDeployer(Deployer):
self.apps_api = client.AppsV1Api()
self.custom_obj_api = client.CustomObjectsApi()
def _ensure_namespace(self):
"""Create the deployment namespace if it doesn't exist."""
if opts.o.dry_run:
print(f"Dry run: would create namespace {self.k8s_namespace}")
return
try:
self.core_api.read_namespace(name=self.k8s_namespace)
if opts.o.debug:
print(f"Namespace {self.k8s_namespace} already exists")
except ApiException as e:
if e.status == 404:
# Create the namespace
ns = client.V1Namespace(
metadata=client.V1ObjectMeta(
name=self.k8s_namespace,
labels={"app": self.cluster_info.app_name},
)
)
self.core_api.create_namespace(body=ns)
if opts.o.debug:
print(f"Created namespace {self.k8s_namespace}")
else:
raise
def _delete_namespace(self):
"""Delete the deployment namespace and all resources within it."""
if opts.o.dry_run:
print(f"Dry run: would delete namespace {self.k8s_namespace}")
return
try:
self.core_api.delete_namespace(name=self.k8s_namespace)
if opts.o.debug:
print(f"Deleted namespace {self.k8s_namespace}")
except ApiException as e:
if e.status == 404:
if opts.o.debug:
print(f"Namespace {self.k8s_namespace} not found")
else:
raise
def _create_volume_data(self):
# Create the host-path-mounted PVs for this deployment
pvs = self.cluster_info.get_pvs()
@ -314,6 +357,8 @@ class K8sDeployer(Deployer):
load_images_into_kind(self.kind_cluster_name, local_images)
# Note: if no local containers defined, all images come from registries
self.connect_api()
# Create deployment-specific namespace for resource isolation
self._ensure_namespace()
if self.is_kind() and not self.skip_cluster_management:
# Configure ingress controller (not installed by default in kind)
# Skip if already running (idempotent for shared cluster)
@ -381,17 +426,12 @@ class K8sDeployer(Deployer):
print("NodePort created:")
print(f"{nodeport_resp}")
def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
def down(self, timeout, volumes, skip_cluster_management):
self.skip_cluster_management = skip_cluster_management
self.connect_api()
# Query K8s for resources by label selector instead of generating names
# from config. This ensures we clean up orphaned resources when deployment
# IDs change (e.g., after force_redeploy).
label_selector = f"app={self.cluster_info.app_name}"
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
if volumes:
# Delete PVs for this deployment (PVs use volume-label pattern)
try:
pvs = self.core_api.list_persistent_volume(
label_selector=f"app={self.cluster_info.app_name}"
@ -407,97 +447,9 @@ class K8sDeployer(Deployer):
if opts.o.debug:
print(f"Error listing PVs: {e}")
# Delete PVCs for this deployment
try:
pvcs = self.core_api.list_namespaced_persistent_volume_claim(
namespace=self.k8s_namespace, label_selector=label_selector
)
for pvc in pvcs.items:
if opts.o.debug:
print(f"Deleting PVC: {pvc.metadata.name}")
try:
self.core_api.delete_namespaced_persistent_volume_claim(
name=pvc.metadata.name, namespace=self.k8s_namespace
)
except ApiException as e:
_check_delete_exception(e)
except ApiException as e:
if opts.o.debug:
print(f"Error listing PVCs: {e}")
# Delete ConfigMaps for this deployment
try:
cfg_maps = self.core_api.list_namespaced_config_map(
namespace=self.k8s_namespace, label_selector=label_selector
)
for cfg_map in cfg_maps.items:
if opts.o.debug:
print(f"Deleting ConfigMap: {cfg_map.metadata.name}")
try:
self.core_api.delete_namespaced_config_map(
name=cfg_map.metadata.name, namespace=self.k8s_namespace
)
except ApiException as e:
_check_delete_exception(e)
except ApiException as e:
if opts.o.debug:
print(f"Error listing ConfigMaps: {e}")
# Delete Deployments for this deployment
try:
deployments = self.apps_api.list_namespaced_deployment(
namespace=self.k8s_namespace, label_selector=label_selector
)
for deployment in deployments.items:
if opts.o.debug:
print(f"Deleting Deployment: {deployment.metadata.name}")
try:
self.apps_api.delete_namespaced_deployment(
name=deployment.metadata.name, namespace=self.k8s_namespace
)
except ApiException as e:
_check_delete_exception(e)
except ApiException as e:
if opts.o.debug:
print(f"Error listing Deployments: {e}")
# Delete Services for this deployment (includes both ClusterIP and NodePort)
try:
services = self.core_api.list_namespaced_service(
namespace=self.k8s_namespace, label_selector=label_selector
)
for service in services.items:
if opts.o.debug:
print(f"Deleting Service: {service.metadata.name}")
try:
self.core_api.delete_namespaced_service(
namespace=self.k8s_namespace, name=service.metadata.name
)
except ApiException as e:
_check_delete_exception(e)
except ApiException as e:
if opts.o.debug:
print(f"Error listing Services: {e}")
# Delete Ingresses for this deployment
try:
ingresses = self.networking_api.list_namespaced_ingress(
namespace=self.k8s_namespace, label_selector=label_selector
)
for ingress in ingresses.items:
if opts.o.debug:
print(f"Deleting Ingress: {ingress.metadata.name}")
try:
self.networking_api.delete_namespaced_ingress(
name=ingress.metadata.name, namespace=self.k8s_namespace
)
except ApiException as e:
_check_delete_exception(e)
if not ingresses.items and opts.o.debug:
print("No ingress to delete")
except ApiException as e:
if opts.o.debug:
print(f"Error listing Ingresses: {e}")
# Delete the deployment namespace - this cascades to all namespaced resources
# (PVCs, ConfigMaps, Deployments, Services, Ingresses, etc.)
self._delete_namespace()
if self.is_kind() and not self.skip_cluster_management:
# Destroy the kind cluster
@ -635,7 +587,7 @@ class K8sDeployer(Deployer):
log_data = ""
for container in containers:
container_log = self.core_api.read_namespaced_pod_log(
k8s_pod_name, namespace="default", container=container
k8s_pod_name, namespace=self.k8s_namespace, container=container
)
container_log_lines = container_log.splitlines()
for line in container_log_lines:

View File

@ -942,6 +942,41 @@ def envs_from_compose_file(
return result
def translate_sidecar_service_names(
envs: Mapping[str, str], sibling_service_names: List[str]
) -> Mapping[str, str]:
"""Translate docker-compose service names to localhost for sidecar containers.
In docker-compose, services can reference each other by name (e.g., 'db:5432').
In Kubernetes, when multiple containers are in the same pod (sidecars), they
share the same network namespace and must use 'localhost' instead.
This function replaces service name references with 'localhost' in env values.
"""
import re
if not sibling_service_names:
return envs
result = {}
for env_var, env_val in envs.items():
if env_val is None:
result[env_var] = env_val
continue
new_val = str(env_val)
for service_name in sibling_service_names:
# Match service name followed by optional port (e.g., 'db:5432', 'db')
# Handle URLs like: postgres://user:pass@db:5432/dbname
# and simple refs like: db:5432 or just db
pattern = rf"\b{re.escape(service_name)}(:\d+)?\b"
new_val = re.sub(pattern, lambda m: f'localhost{m.group(1) or ""}', new_val)
result[env_var] = new_val
return result
def envs_from_environment_variables_map(
map: Mapping[str, str]
) -> List[client.V1EnvVar]:

View File

@ -128,9 +128,6 @@ class Spec:
def get_http_proxy(self):
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
def get_acme_email(self):
return self.obj.get(constants.network_key, {}).get("acme-email", "")
def get_annotations(self):
return self.obj.get(constants.annotations_key, {})