Add ConfigMap support for k8s. (#714)

* Minor fixes for deploying with k8s and podman.

* ConfigMap support
This commit is contained in:
Thomas E Lackey 2024-01-30 23:09:48 -06:00 committed by GitHub
parent 62af03077f
commit 12ec1bec43
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 208 additions and 54 deletions

View File

@ -33,6 +33,7 @@ from stack_orchestrator.base import get_npm_registry_url
# TODO: find a place for this # TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
def make_container_build_env(dev_root_path: str, def make_container_build_env(dev_root_path: str,
container_build_dir: str, container_build_dir: str,
debug: bool, debug: bool,
@ -104,6 +105,9 @@ def process_container(stack: str,
build_command = os.path.join(container_build_dir, build_command = os.path.join(container_build_dir,
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}" "default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
if not dry_run: if not dry_run:
# No PATH at all causes failures with podman.
if "PATH" not in container_build_env:
container_build_env["PATH"] = os.environ["PATH"]
if verbose: if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}") print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env) build_result = subprocess.run(build_command, shell=True, env=container_build_env)
@ -119,6 +123,7 @@ def process_container(stack: str,
else: else:
print("Skipped") print("Skipped")
@click.command() @click.command()
@click.option('--include', help="only build these containers") @click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers") @click.option('--exclude', help="don\'t build these containers")

View File

@ -54,19 +54,44 @@ def _get_ports(stack):
def _get_named_volumes(stack): def _get_named_volumes(stack):
# Parse the compose files looking for named volumes # Parse the compose files looking for named volumes
named_volumes = [] named_volumes = {
"rw": [],
"ro": []
}
parsed_stack = get_parsed_stack_config(stack) parsed_stack = get_parsed_stack_config(stack)
pods = get_pod_list(parsed_stack) pods = get_pod_list(parsed_stack)
yaml = get_yaml() yaml = get_yaml()
def find_vol_usage(parsed_pod_file, vol):
ret = {}
if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items():
if "volumes" in svc:
for svc_volume in svc["volumes"]:
parts = svc_volume.split(":")
if parts[0] == vol:
ret[svc_name] = {
"volume": parts[0],
"mount": parts[1],
"options": parts[2] if len(parts) == 3 else None
}
return ret
for pod in pods: for pod in pods:
pod_file_path = get_pod_file_path(parsed_stack, pod) pod_file_path = get_pod_file_path(parsed_stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r")) parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file: if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"] volumes = parsed_pod_file["volumes"]
for volume in volumes.keys(): for volume in volumes.keys():
# Volume definition looks like: for vu in find_vol_usage(parsed_pod_file, volume).values():
# 'laconicd-data': None read_only = vu["options"] == "ro"
named_volumes.append(volume) if read_only:
if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]:
named_volumes["ro"].append(vu["volume"])
else:
if vu["volume"] not in named_volumes["rw"]:
named_volumes["rw"].append(vu["volume"])
return named_volumes return named_volumes
@ -98,12 +123,24 @@ def _fixup_pod_file(pod, spec, compose_dir):
_create_bind_dir_if_relative(volume, volume_spec, compose_dir) _create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local", new_volume_spec = {"driver": "local",
"driver_opts": { "driver_opts": {
"type": "none", "type": "none",
"device": volume_spec_fixedup, "device": volume_spec_fixedup,
"o": "bind" "o": "bind"
} }
} }
pod["volumes"][volume] = new_volume_spec pod["volumes"][volume] = new_volume_spec
# Fix up configmaps
if "configmaps" in spec:
spec_cfgmaps = spec["configmaps"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_cfgmaps:
volume_cfg = spec_cfgmaps[volume]
# Just make the dir (if necessary)
_create_bind_dir_if_relative(volume, volume_cfg, compose_dir)
# Fix up ports # Fix up ports
if "network" in spec and "ports" in spec["network"]: if "network" in spec and "ports" in spec["network"]:
spec_ports = spec["network"]["ports"] spec_ports = spec["network"]["ports"]
@ -319,9 +356,18 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
named_volumes = _get_named_volumes(stack) named_volumes = _get_named_volumes(stack)
if named_volumes: if named_volumes:
volume_descriptors = {} volume_descriptors = {}
for named_volume in named_volumes: configmap_descriptors = {}
for named_volume in named_volumes["rw"]:
volume_descriptors[named_volume] = f"./data/{named_volume}" volume_descriptors[named_volume] = f"./data/{named_volume}"
for named_volume in named_volumes["ro"]:
if "k8s" in deployer_type:
if "config" in named_volume:
configmap_descriptors[named_volume] = f"./data/{named_volume}"
else:
volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors spec_file_content["volumes"] = volume_descriptors
if configmap_descriptors:
spec_file_content["configmaps"] = configmap_descriptors
if opts.o.debug: if opts.o.debug:
print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")

View File

@ -31,7 +31,8 @@ def _image_needs_pushed(image: str):
def remote_tag_for_image(image: str, remote_repo_url: str): def remote_tag_for_image(image: str, remote_repo_url: str):
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
(org, image_name_with_version) = image.split("/") major_parts = image.split("/", 2)
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
(image_name, image_version) = image_name_with_version.split(":") (image_name, image_version) = image_name_with_version.split(":")
if image_version == "local": if image_version == "local":
return f"{remote_repo_url}/{image_name}:deploy" return f"{remote_repo_url}/{image_name}:deploy"

View File

@ -13,6 +13,8 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
from kubernetes import client from kubernetes import client
from typing import Any, List, Set from typing import Any, List, Set
@ -112,9 +114,10 @@ class ClusterInfo:
services = pod["services"] services = pod["services"]
for service_name in services: for service_name in services:
service_info = services[service_name] service_info = services[service_name]
port = int(service_info["ports"][0]) if "ports" in service_info:
if opts.o.debug: port = int(service_info["ports"][0])
print(f"service port: {port}") if opts.o.debug:
print(f"service port: {port}")
service = client.V1Service( service = client.V1Service(
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
spec=client.V1ServiceSpec( spec=client.V1ServiceSpec(
@ -130,30 +133,70 @@ class ClusterInfo:
def get_pvcs(self): def get_pvcs(self):
result = [] result = []
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) spec_volumes = self.spec.get_volumes()
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
if opts.o.debug: if opts.o.debug:
print(f"Volumes: {volumes}") print(f"Spec Volumes: {spec_volumes}")
for volume_name in volumes: print(f"Named Volumes: {named_volumes}")
for volume_name in spec_volumes:
if volume_name not in named_volumes:
if opts.o.debug:
print(f"{volume_name} not in pod files")
continue
spec = client.V1PersistentVolumeClaimSpec( spec = client.V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"], access_modes=["ReadWriteOnce"],
storage_class_name="manual", storage_class_name="manual",
resources=client.V1ResourceRequirements( resources=client.V1ResourceRequirements(
requests={"storage": "2Gi"} requests={"storage": "2Gi"}
), ),
volume_name=volume_name volume_name=f"{self.app_name}-{volume_name}"
) )
pvc = client.V1PersistentVolumeClaim( pvc = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(name=volume_name, metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
labels={"volume-label": volume_name}), labels={"volume-label": f"{self.app_name}-{volume_name}"}),
spec=spec, spec=spec,
) )
result.append(pvc) result.append(pvc)
return result return result
def get_configmaps(self):
result = []
spec_configmaps = self.spec.get_configmaps()
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
for cfg_map_name, cfg_map_path in spec_configmaps.items():
if cfg_map_name not in named_volumes:
if opts.o.debug:
print(f"{cfg_map_name} not in pod files")
continue
if not cfg_map_path.startswith("/"):
cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path)
# Read in all the files at a single-level of the directory. This mimics the behavior
# of `kubectl create configmap foo --from-file=/path/to/dir`
data = {}
for f in os.listdir(cfg_map_path):
full_path = os.path.join(cfg_map_path, f)
if os.path.isfile(full_path):
data[f] = open(full_path, 'rt').read()
spec = client.V1ConfigMap(
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
labels={"configmap-label": cfg_map_name}),
data=data
)
result.append(spec)
return result
def get_pvs(self): def get_pvs(self):
result = [] result = []
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) spec_volumes = self.spec.get_volumes()
for volume_name in volumes: named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
for volume_name in spec_volumes:
if volume_name not in named_volumes:
if opts.o.debug:
print(f"{volume_name} not in pod files")
continue
spec = client.V1PersistentVolumeSpec( spec = client.V1PersistentVolumeSpec(
storage_class_name="manual", storage_class_name="manual",
access_modes=["ReadWriteOnce"], access_modes=["ReadWriteOnce"],
@ -161,8 +204,8 @@ class ClusterInfo:
host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name))
) )
pv = client.V1PersistentVolume( pv = client.V1PersistentVolume(
metadata=client.V1ObjectMeta(name=volume_name, metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
labels={"volume-label": volume_name}), labels={"volume-label": f"{self.app_name}-{volume_name}"}),
spec=spec, spec=spec,
) )
result.append(pv) result.append(pv)
@ -178,10 +221,11 @@ class ClusterInfo:
container_name = service_name container_name = service_name
service_info = services[service_name] service_info = services[service_name]
image = service_info["image"] image = service_info["image"]
port = int(service_info["ports"][0]) if "ports" in service_info:
if opts.o.debug: port = int(service_info["ports"][0])
print(f"image: {image}") if opts.o.debug:
print(f"service port: {port}") print(f"image: {image}")
print(f"service port: {port}")
# Re-write the image tag for remote deployment # Re-write the image tag for remote deployment
image_to_use = remote_tag_for_image( image_to_use = remote_tag_for_image(
image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image
@ -199,7 +243,7 @@ class ClusterInfo:
), ),
) )
containers.append(container) containers.append(container)
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map) volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec)
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")] image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
template = client.V1PodTemplateSpec( template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": self.app_name}), metadata=client.V1ObjectMeta(labels={"app": self.app_name}),

View File

@ -1,5 +1,4 @@
# Copyright © 2023 Vulcanize # Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or # the Free Software Foundation, either version 3 of the License, or
@ -110,6 +109,20 @@ class K8sDeployer(Deployer):
if opts.o.debug: if opts.o.debug:
print("PVCs created:") print("PVCs created:")
print(f"{pvc_resp}") print(f"{pvc_resp}")
# Figure out the ConfigMaps for this deployment
config_maps = self.cluster_info.get_configmaps()
for cfg_map in config_maps:
if opts.o.debug:
print(f"Sending this ConfigMap: {cfg_map}")
cfg_rsp = self.core_api.create_namespaced_config_map(
body=cfg_map,
namespace=self.k8s_namespace
)
if opts.o.debug:
print("ConfigMap created:")
print(f"{cfg_rsp}")
# Process compose files into a Deployment # Process compose files into a Deployment
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always") deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
# Create the k8s objects # Create the k8s objects
@ -135,17 +148,21 @@ class K8sDeployer(Deployer):
if not self.is_kind(): if not self.is_kind():
ingress: client.V1Ingress = self.cluster_info.get_ingress() ingress: client.V1Ingress = self.cluster_info.get_ingress()
if opts.o.debug: if ingress:
print(f"Sending this ingress: {ingress}") if opts.o.debug:
ingress_resp = self.networking_api.create_namespaced_ingress( print(f"Sending this ingress: {ingress}")
namespace=self.k8s_namespace, ingress_resp = self.networking_api.create_namespaced_ingress(
body=ingress namespace=self.k8s_namespace,
) body=ingress
if opts.o.debug: )
print("Ingress created:") if opts.o.debug:
print(f"{ingress_resp}") print("Ingress created:")
print(f"{ingress_resp}")
else:
if opts.o.debug:
print("No ingress configured")
def down(self, timeout, volumes): def down(self, timeout, volumes): # noqa: C901
self.connect_api() self.connect_api()
# Delete the k8s objects # Delete the k8s objects
# Create the host-path-mounted PVs for this deployment # Create the host-path-mounted PVs for this deployment
@ -175,6 +192,22 @@ class K8sDeployer(Deployer):
print(f"{pvc_resp}") print(f"{pvc_resp}")
except client.exceptions.ApiException as e: except client.exceptions.ApiException as e:
_check_delete_exception(e) _check_delete_exception(e)
# Figure out the ConfigMaps for this deployment
cfg_maps = self.cluster_info.get_configmaps()
for cfg_map in cfg_maps:
if opts.o.debug:
print(f"Deleting this ConfigMap: {cfg_map}")
try:
cfg_map_resp = self.core_api.delete_namespaced_config_map(
name=cfg_map.metadata.name, namespace=self.k8s_namespace
)
if opts.o.debug:
print("ConfigMap deleted:")
print(f"{cfg_map_resp}")
except client.exceptions.ApiException as e:
_check_delete_exception(e)
deployment = self.cluster_info.get_deployment() deployment = self.cluster_info.get_deployment()
if opts.o.debug: if opts.o.debug:
print(f"Deleting this deployment: {deployment}") print(f"Deleting this deployment: {deployment}")
@ -198,14 +231,18 @@ class K8sDeployer(Deployer):
if not self.is_kind(): if not self.is_kind():
ingress: client.V1Ingress = self.cluster_info.get_ingress() ingress: client.V1Ingress = self.cluster_info.get_ingress()
if opts.o.debug: if ingress:
print(f"Deleting this ingress: {ingress}") if opts.o.debug:
try: print(f"Deleting this ingress: {ingress}")
self.networking_api.delete_namespaced_ingress( try:
name=ingress.metadata.name, namespace=self.k8s_namespace self.networking_api.delete_namespaced_ingress(
) name=ingress.metadata.name, namespace=self.k8s_namespace
except client.exceptions.ApiException as e: )
_check_delete_exception(e) except client.exceptions.ApiException as e:
_check_delete_exception(e)
else:
if opts.o.debug:
print("No ingress to delete")
if self.is_kind(): if self.is_kind():
# Destroy the kind cluster # Destroy the kind cluster

View File

@ -73,7 +73,7 @@ def named_volumes_from_pod_files(parsed_pod_files):
parsed_pod_file = parsed_pod_files[pod] parsed_pod_file = parsed_pod_files[pod]
if "volumes" in parsed_pod_file: if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"] volumes = parsed_pod_file["volumes"]
for volume in volumes.keys(): for volume, value in volumes.items():
# Volume definition looks like: # Volume definition looks like:
# 'laconicd-data': None # 'laconicd-data': None
named_volumes.append(volume) named_volumes.append(volume)
@ -98,22 +98,31 @@ def volume_mounts_for_service(parsed_pod_files, service):
volumes = service_obj["volumes"] volumes = service_obj["volumes"]
for mount_string in volumes: for mount_string in volumes:
# Looks like: test-data:/data # Looks like: test-data:/data
(volume_name, mount_path) = mount_string.split(":") parts = mount_string.split(":")
volume_device = client.V1VolumeMount(mount_path=mount_path, name=volume_name) volume_name = parts[0]
mount_path = parts[1]
mount_options = parts[2] if len(parts) == 3 else None
volume_device = client.V1VolumeMount(
mount_path=mount_path, name=volume_name, read_only="ro" == mount_options)
result.append(volume_device) result.append(volume_device)
return result return result
def volumes_for_pod_files(parsed_pod_files): def volumes_for_pod_files(parsed_pod_files, spec):
result = [] result = []
for pod in parsed_pod_files: for pod in parsed_pod_files:
parsed_pod_file = parsed_pod_files[pod] parsed_pod_file = parsed_pod_files[pod]
if "volumes" in parsed_pod_file: if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"] volumes = parsed_pod_file["volumes"]
for volume_name in volumes.keys(): for volume_name in volumes.keys():
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name) if volume_name in spec.get_configmaps():
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) config_map = client.V1ConfigMapVolumeSource(name=volume_name)
result.append(volume) volume = client.V1Volume(name=volume_name, config_map=config_map)
result.append(volume)
else:
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name)
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
result.append(volume)
return result return result
@ -158,7 +167,7 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir):
volume_definitions.append( volume_definitions.append(
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
f" containerPath: {get_node_pv_mount_path(volume_name)}" f" containerPath: {get_node_pv_mount_path(volume_name)}"
) )
return ( return (
"" if len(volume_definitions) == 0 else ( "" if len(volume_definitions) == 0 else (
" extraMounts:\n" " extraMounts:\n"

View File

@ -22,6 +22,7 @@ from stack_orchestrator import constants
class Spec: class Spec:
obj: typing.Any obj: typing.Any
file_path: Path
def __init__(self) -> None: def __init__(self) -> None:
pass pass
@ -29,12 +30,23 @@ class Spec:
def init_from_file(self, file_path: Path): def init_from_file(self, file_path: Path):
with file_path: with file_path:
self.obj = get_yaml().load(open(file_path, "r")) self.obj = get_yaml().load(open(file_path, "r"))
self.file_path = file_path
def get_image_registry(self): def get_image_registry(self):
return (self.obj[constants.image_resigtry_key] return (self.obj[constants.image_resigtry_key]
if self.obj and constants.image_resigtry_key in self.obj if self.obj and constants.image_resigtry_key in self.obj
else None) else None)
def get_volumes(self):
return (self.obj["volumes"]
if self.obj and "volumes" in self.obj
else {})
def get_configmaps(self):
return (self.obj["configmaps"]
if self.obj and "configmaps" in self.obj
else {})
def get_http_proxy(self): def get_http_proxy(self):
return (self.obj[constants.network_key][constants.http_proxy_key] return (self.obj[constants.network_key][constants.http_proxy_key]
if self.obj and constants.network_key in self.obj if self.obj and constants.network_key in self.obj