so-m3m: add credentials-files spec key for on-disk credential injection

_write_config_file() now reads each file listed under the credentials-files
top-level spec key and appends its contents to config.env after config vars.
Paths support ~ expansion. Missing files fail hard with sys.exit(1).

Also adds get_credentials_files() to Spec class following the same pattern
as get_image_registry_config().

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
A. F. Dudley 2026-03-18 21:55:28 +00:00
parent 0e4ecc3602
commit 25e5ff09d9
6 changed files with 97 additions and 59 deletions

View File

@ -1 +1 @@
{"project": "stack-orchestrator", "prefix": "so"}
{"project": "stack-orchestrator", "prefix": "so"}

View File

@ -695,6 +695,19 @@ def _write_config_file(
continue
output_file.write(f"{variable_name}={variable_value}\n")
# Append contents of credentials files listed in spec
credentials_files = spec_content.get("credentials-files", []) or []
for cred_path_str in credentials_files:
cred_path = Path(cred_path_str).expanduser()
if not cred_path.exists():
print(f"Error: credentials file does not exist: {cred_path}")
sys.exit(1)
output_file.write(f"# From credentials file: {cred_path_str}\n")
contents = cred_path.read_text()
output_file.write(contents)
if not contents.endswith("\n"):
output_file.write("\n")
def _write_kube_config_file(external_path: Path, internal_path: Path):
if not external_path.exists():
@ -1041,12 +1054,8 @@ def _write_deployment_files(
for configmap in parsed_spec.get_configmaps():
source_config_dir = resolve_config_dir(stack_name, configmap)
if os.path.exists(source_config_dir):
destination_config_dir = target_dir.joinpath(
"configmaps", configmap
)
copytree(
source_config_dir, destination_config_dir, dirs_exist_ok=True
)
destination_config_dir = target_dir.joinpath("configmaps", configmap)
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
# Copy the job files into the target dir
jobs = get_job_list(parsed_stack)

View File

@ -82,7 +82,14 @@ class ClusterInfo:
def __init__(self) -> None:
self.parsed_job_yaml_map = {}
def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec, stack_name=""):
def int(
self,
pod_files: List[str],
compose_env_file,
deployment_name,
spec: Spec,
stack_name="",
):
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods
self.image_set = images_for_deployment(pod_files)
@ -314,8 +321,7 @@ class ClusterInfo:
# Per-volume resources override global, which overrides default.
vol_resources = (
self.spec.get_volume_resources_for(volume_name)
or global_resources
self.spec.get_volume_resources_for(volume_name) or global_resources
)
labels = {
@ -417,8 +423,7 @@ class ClusterInfo:
continue
vol_resources = (
self.spec.get_volume_resources_for(volume_name)
or global_resources
self.spec.get_volume_resources_for(volume_name) or global_resources
)
if self.spec.is_kind_deployment():
host_path = client.V1HostPathVolumeSource(
@ -554,9 +559,7 @@ class ClusterInfo:
if self.spec.get_image_registry() is not None
else image
)
volume_mounts = volume_mounts_for_service(
parsed_yaml_map, service_name
)
volume_mounts = volume_mounts_for_service(parsed_yaml_map, service_name)
# Handle command/entrypoint from compose file
# In docker-compose: entrypoint -> k8s command, command -> k8s args
container_command = None
@ -615,7 +618,9 @@ class ClusterInfo:
readiness_probe=readiness_probe,
security_context=client.V1SecurityContext(
privileged=self.spec.get_privileged(),
run_as_user=int(service_info["user"]) if "user" in service_info else None,
run_as_user=int(service_info["user"])
if "user" in service_info
else None,
capabilities=client.V1Capabilities(
add=self.spec.get_capabilities()
)
@ -629,19 +634,17 @@ class ClusterInfo:
svc_labels = service_info.get("labels", {})
if isinstance(svc_labels, list):
# docker-compose labels can be a list of "key=value"
svc_labels = dict(
item.split("=", 1) for item in svc_labels
)
is_init = str(
svc_labels.get("laconic.init-container", "")
).lower() in ("true", "1", "yes")
svc_labels = dict(item.split("=", 1) for item in svc_labels)
is_init = str(svc_labels.get("laconic.init-container", "")).lower() in (
"true",
"1",
"yes",
)
if is_init:
init_containers.append(container)
else:
containers.append(container)
volumes = volumes_for_pod_files(
parsed_yaml_map, self.spec, self.app_name
)
volumes = volumes_for_pod_files(parsed_yaml_map, self.spec, self.app_name)
return containers, init_containers, services, volumes
# TODO: put things like image pull policy into an object-scope struct
@ -738,7 +741,14 @@ class ClusterInfo:
kind="Deployment",
metadata=client.V1ObjectMeta(
name=f"{self.app_name}-deployment",
labels={"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})},
labels={
"app": self.app_name,
**(
{"app.kubernetes.io/stack": self.stack_name}
if self.stack_name
else {}
),
},
),
spec=spec,
)
@ -766,8 +776,8 @@ class ClusterInfo:
for job_file in self.parsed_job_yaml_map:
# Build containers for this single job file
single_job_map = {job_file: self.parsed_job_yaml_map[job_file]}
containers, init_containers, _services, volumes = (
self._build_containers(single_job_map, image_pull_policy)
containers, init_containers, _services, volumes = self._build_containers(
single_job_map, image_pull_policy
)
# Derive job name from file path: docker-compose-<name>.yml -> <name>
@ -775,7 +785,7 @@ class ClusterInfo:
# Strip docker-compose- prefix and .yml suffix
job_name = base
if job_name.startswith("docker-compose-"):
job_name = job_name[len("docker-compose-"):]
job_name = job_name[len("docker-compose-") :]
if job_name.endswith(".yml"):
job_name = job_name[: -len(".yml")]
elif job_name.endswith(".yaml"):
@ -785,12 +795,14 @@ class ClusterInfo:
# picked up by pods_in_deployment() which queries app={app_name}.
pod_labels = {
"app": f"{self.app_name}-job",
**({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {}),
**(
{"app.kubernetes.io/stack": self.stack_name}
if self.stack_name
else {}
),
}
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels=pod_labels
),
metadata=client.V1ObjectMeta(labels=pod_labels),
spec=client.V1PodSpec(
containers=containers,
init_containers=init_containers or None,
@ -803,7 +815,14 @@ class ClusterInfo:
template=template,
backoff_limit=0,
)
job_labels = {"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})}
job_labels = {
"app": self.app_name,
**(
{"app.kubernetes.io/stack": self.stack_name}
if self.stack_name
else {}
),
}
job = client.V1Job(
api_version="batch/v1",
kind="Job",

View File

@ -122,9 +122,13 @@ class K8sDeployer(Deployer):
return
self.deployment_dir = deployment_context.deployment_dir
self.deployment_context = deployment_context
self.kind_cluster_name = deployment_context.spec.get_kind_cluster_name() or compose_project_name
self.kind_cluster_name = (
deployment_context.spec.get_kind_cluster_name() or compose_project_name
)
# Use spec namespace if provided, otherwise derive from cluster-id
self.k8s_namespace = deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
self.k8s_namespace = (
deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
)
self.cluster_info = ClusterInfo()
# stack.name may be an absolute path (from spec "stack:" key after
# path resolution). Extract just the directory basename for labels.
@ -269,7 +273,8 @@ class K8sDeployer(Deployer):
for job in jobs.items:
print(f"Deleting Job {job.metadata.name}")
self.batch_api.delete_namespaced_job(
name=job.metadata.name, namespace=ns,
name=job.metadata.name,
namespace=ns,
body=client.V1DeleteOptions(propagation_policy="Background"),
)
except ApiException as e:
@ -406,9 +411,7 @@ class K8sDeployer(Deployer):
print("No pods defined, skipping Deployment creation")
return
# Process compose files into a Deployment
deployment = self.cluster_info.get_deployment(
image_pull_policy="Always"
)
deployment = self.cluster_info.get_deployment(image_pull_policy="Always")
# Create or update the k8s Deployment
if opts.o.debug:
print(f"Sending this deployment: {deployment}")
@ -470,9 +473,7 @@ class K8sDeployer(Deployer):
def _create_jobs(self):
# Process job compose files into k8s Jobs
jobs = self.cluster_info.get_jobs(
image_pull_policy="Always"
)
jobs = self.cluster_info.get_jobs(image_pull_policy="Always")
for job in jobs:
if opts.o.debug:
print(f"Sending this job: {job}")
@ -646,7 +647,10 @@ class K8sDeployer(Deployer):
# Call start() hooks — stacks can create additional k8s resources
if self.deployment_context:
from stack_orchestrator.deploy.deployment_create import call_stack_deploy_start
from stack_orchestrator.deploy.deployment_create import (
call_stack_deploy_start,
)
call_stack_deploy_start(self.deployment_context)
def down(self, timeout, volumes, skip_cluster_management):
@ -658,9 +662,7 @@ class K8sDeployer(Deployer):
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
if volumes:
try:
pvs = self.core_api.list_persistent_volume(
label_selector=app_label
)
pvs = self.core_api.list_persistent_volume(label_selector=app_label)
for pv in pvs.items:
if opts.o.debug:
print(f"Deleting PV: {pv.metadata.name}")
@ -804,14 +806,18 @@ class K8sDeployer(Deployer):
def logs(self, services, tail, follow, stream):
self.connect_api()
pods = pods_in_deployment(self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace)
pods = pods_in_deployment(
self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace
)
if len(pods) > 1:
print("Warning: more than one pod in the deployment")
if len(pods) == 0:
log_data = "******* Pods not running ********\n"
else:
k8s_pod_name = pods[0]
containers = containers_in_pod(self.core_api, k8s_pod_name, namespace=self.k8s_namespace)
containers = containers_in_pod(
self.core_api, k8s_pod_name, namespace=self.k8s_namespace
)
# If pod not started, logs request below will throw an exception
try:
log_data = ""
@ -910,9 +916,7 @@ class K8sDeployer(Deployer):
else:
# Non-Helm path: create job from ClusterInfo
self.connect_api()
jobs = self.cluster_info.get_jobs(
image_pull_policy="Always"
)
jobs = self.cluster_info.get_jobs(image_pull_policy="Always")
# Find the matching job by name
target_name = f"{self.cluster_info.app_name}-job-{job_name}"
matched_job = None

View File

@ -393,7 +393,9 @@ def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
raise DeployerException(f"kind load docker-image failed: {result}")
def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str, namespace: str = "default"):
def pods_in_deployment(
core_api: client.CoreV1Api, deployment_name: str, namespace: str = "default"
):
pods = []
pod_response = core_api.list_namespaced_pod(
namespace=namespace, label_selector=f"app={deployment_name}"
@ -406,7 +408,9 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str, namespa
return pods
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str, namespace: str = "default") -> List[str]:
def containers_in_pod(
core_api: client.CoreV1Api, pod_name: str, namespace: str = "default"
) -> List[str]:
containers: List[str] = []
pod_response = cast(
client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace=namespace)

View File

@ -98,6 +98,10 @@ class Spec:
def get_image_registry(self):
return self.obj.get(constants.image_registry_key)
def get_credentials_files(self) -> typing.List[str]:
"""Returns list of credential file paths to append to config.env."""
return self.obj.get("credentials-files", [])
def get_image_registry_config(self) -> typing.Optional[typing.Dict]:
"""Returns registry auth config: {server, username, token-env}.
@ -167,15 +171,13 @@ class Spec:
Returns the per-volume Resources if found, otherwise None.
The caller should fall back to get_volume_resources() then the default.
"""
vol_section = (
self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {})
vol_section = self.obj.get(constants.resources_key, {}).get(
constants.volumes_key, {}
)
if volume_name not in vol_section:
return None
entry = vol_section[volume_name]
if isinstance(entry, dict) and (
"reservations" in entry or "limits" in entry
):
if isinstance(entry, dict) and ("reservations" in entry or "limits" in entry):
return Resources(entry)
return None