2023-10-27 16:19:44 +00:00
|
|
|
# Copyright © 2023 Vulcanize
|
|
|
|
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
from kubernetes import client
|
2023-11-08 08:11:00 +00:00
|
|
|
import os
|
2023-11-06 06:21:53 +00:00
|
|
|
from pathlib import Path
|
2023-10-27 16:19:44 +00:00
|
|
|
import subprocess
|
2024-02-08 19:41:57 +00:00
|
|
|
import re
|
2023-11-21 03:23:55 +00:00
|
|
|
from typing import Set, Mapping, List
|
2023-10-27 16:19:44 +00:00
|
|
|
|
2023-11-07 07:06:55 +00:00
|
|
|
from stack_orchestrator.opts import opts
|
2023-11-21 03:23:55 +00:00
|
|
|
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
|
2024-01-16 22:55:58 +00:00
|
|
|
from stack_orchestrator.deploy.deployer import DeployerException
|
2023-10-27 16:19:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _run_command(command: str):
|
|
|
|
if opts.o.debug:
|
|
|
|
print(f"Running: {command}")
|
|
|
|
result = subprocess.run(command, shell=True)
|
|
|
|
if opts.o.debug:
|
|
|
|
print(f"Result: {result}")
|
2024-01-16 22:55:58 +00:00
|
|
|
return result
|
2023-10-27 16:19:44 +00:00
|
|
|
|
|
|
|
|
2023-11-06 06:21:53 +00:00
|
|
|
def create_cluster(name: str, config_file: str):
|
2024-01-16 22:55:58 +00:00
|
|
|
result = _run_command(f"kind create cluster --name {name} --config {config_file}")
|
|
|
|
if result.returncode != 0:
|
|
|
|
raise DeployerException(f"kind create cluster failed: {result}")
|
2023-10-27 16:19:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
def destroy_cluster(name: str):
|
|
|
|
_run_command(f"kind delete cluster --name {name}")
|
|
|
|
|
|
|
|
|
|
|
|
def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
|
|
|
|
for image in image_set:
|
2024-01-16 22:55:58 +00:00
|
|
|
result = _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
|
|
|
|
if result.returncode != 0:
|
|
|
|
raise DeployerException(f"kind create cluster failed: {result}")
|
2023-10-27 16:19:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
|
|
|
pods = []
|
2024-01-16 22:55:58 +00:00
|
|
|
pod_response = core_api.list_namespaced_pod(namespace="default", label_selector=f"app={deployment_name}")
|
2023-10-27 16:19:44 +00:00
|
|
|
if opts.o.debug:
|
|
|
|
print(f"pod_response: {pod_response}")
|
|
|
|
for pod_info in pod_response.items:
|
|
|
|
pod_name = pod_info.metadata.name
|
|
|
|
pods.append(pod_name)
|
|
|
|
return pods
|
|
|
|
|
|
|
|
|
2024-02-15 05:26:29 +00:00
|
|
|
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str):
|
|
|
|
containers = []
|
|
|
|
pod_response = core_api.read_namespaced_pod(pod_name, namespace="default")
|
|
|
|
if opts.o.debug:
|
|
|
|
print(f"pod_response: {pod_response}")
|
|
|
|
pod_containers = pod_response.spec.containers
|
|
|
|
for pod_container in pod_containers:
|
|
|
|
containers.append(pod_container.name)
|
|
|
|
return containers
|
|
|
|
|
|
|
|
|
2023-10-27 16:19:44 +00:00
|
|
|
def log_stream_from_string(s: str):
|
|
|
|
# Note response has to be UTF-8 encoded because the caller expects to decode it
|
|
|
|
yield ("ignore", s.encode())
|
2023-11-03 23:02:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
def named_volumes_from_pod_files(parsed_pod_files):
|
|
|
|
# Parse the compose files looking for named volumes
|
|
|
|
named_volumes = []
|
|
|
|
for pod in parsed_pod_files:
|
|
|
|
parsed_pod_file = parsed_pod_files[pod]
|
|
|
|
if "volumes" in parsed_pod_file:
|
|
|
|
volumes = parsed_pod_file["volumes"]
|
2024-01-31 05:09:48 +00:00
|
|
|
for volume, value in volumes.items():
|
2023-11-03 23:02:13 +00:00
|
|
|
# Volume definition looks like:
|
|
|
|
# 'laconicd-data': None
|
|
|
|
named_volumes.append(volume)
|
|
|
|
return named_volumes
|
|
|
|
|
|
|
|
|
2024-02-14 21:45:01 +00:00
|
|
|
def get_kind_pv_bind_mount_path(volume_name: str):
|
2023-11-08 08:11:00 +00:00
|
|
|
return f"/mnt/{volume_name}"
|
|
|
|
|
|
|
|
|
2023-11-03 23:02:13 +00:00
|
|
|
def volume_mounts_for_service(parsed_pod_files, service):
|
|
|
|
result = []
|
|
|
|
# Find the service
|
|
|
|
for pod in parsed_pod_files:
|
|
|
|
parsed_pod_file = parsed_pod_files[pod]
|
|
|
|
if "services" in parsed_pod_file:
|
|
|
|
services = parsed_pod_file["services"]
|
|
|
|
for service_name in services:
|
|
|
|
if service_name == service:
|
|
|
|
service_obj = services[service_name]
|
|
|
|
if "volumes" in service_obj:
|
|
|
|
volumes = service_obj["volumes"]
|
|
|
|
for mount_string in volumes:
|
2024-02-06 19:32:10 +00:00
|
|
|
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
|
|
|
if opts.o.debug:
|
|
|
|
print(f"mount_string: {mount_string}")
|
|
|
|
mount_split = mount_string.split(":")
|
|
|
|
volume_name = mount_split[0]
|
|
|
|
mount_path = mount_split[1]
|
|
|
|
mount_options = mount_split[2] if len(mount_split) == 3 else None
|
|
|
|
if opts.o.debug:
|
2024-02-14 21:45:01 +00:00
|
|
|
print(f"volume_name: {volume_name}")
|
2024-02-06 19:32:10 +00:00
|
|
|
print(f"mount path: {mount_path}")
|
|
|
|
print(f"mount options: {mount_options}")
|
2024-01-31 05:09:48 +00:00
|
|
|
volume_device = client.V1VolumeMount(
|
2024-02-14 21:45:01 +00:00
|
|
|
mount_path=mount_path,
|
|
|
|
name=volume_name,
|
|
|
|
read_only="ro" == mount_options
|
|
|
|
)
|
2023-11-03 23:02:13 +00:00
|
|
|
result.append(volume_device)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2024-02-03 02:05:15 +00:00
|
|
|
def volumes_for_pod_files(parsed_pod_files, spec, app_name):
|
2023-11-03 23:02:13 +00:00
|
|
|
result = []
|
|
|
|
for pod in parsed_pod_files:
|
|
|
|
parsed_pod_file = parsed_pod_files[pod]
|
|
|
|
if "volumes" in parsed_pod_file:
|
|
|
|
volumes = parsed_pod_file["volumes"]
|
|
|
|
for volume_name in volumes.keys():
|
2024-01-31 05:09:48 +00:00
|
|
|
if volume_name in spec.get_configmaps():
|
2024-02-03 02:05:15 +00:00
|
|
|
config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}")
|
2024-01-31 05:09:48 +00:00
|
|
|
volume = client.V1Volume(name=volume_name, config_map=config_map)
|
|
|
|
result.append(volume)
|
|
|
|
else:
|
2024-02-03 02:05:15 +00:00
|
|
|
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=f"{app_name}-{volume_name}")
|
2024-01-31 05:09:48 +00:00
|
|
|
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
|
|
|
|
result.append(volume)
|
2023-11-03 23:02:13 +00:00
|
|
|
return result
|
2023-11-06 06:21:53 +00:00
|
|
|
|
|
|
|
|
2024-02-14 21:45:01 +00:00
|
|
|
def _get_host_paths_for_volumes(deployment_context):
|
|
|
|
return deployment_context.spec.get_volumes()
|
2023-11-06 06:21:53 +00:00
|
|
|
|
|
|
|
|
2023-11-08 08:11:00 +00:00
|
|
|
def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path:
|
|
|
|
if os.path.isabs(data_mount_path):
|
|
|
|
return data_mount_path
|
|
|
|
else:
|
|
|
|
# Python Path voodo that looks pretty odd:
|
2024-02-14 21:45:01 +00:00
|
|
|
return Path.cwd().joinpath(deployment_dir.joinpath(data_mount_path)).resolve()
|
2023-11-08 08:11:00 +00:00
|
|
|
|
|
|
|
|
2024-02-05 20:15:11 +00:00
|
|
|
def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
|
2023-11-06 06:21:53 +00:00
|
|
|
volume_definitions = []
|
2024-02-14 21:45:01 +00:00
|
|
|
volume_host_path_map = _get_host_paths_for_volumes(deployment_context)
|
2023-11-08 08:11:00 +00:00
|
|
|
# Note these paths are relative to the location of the pod files (at present)
|
|
|
|
# So we need to fix up to make them correct and absolute because kind assumes
|
|
|
|
# relative to the cwd.
|
2023-11-06 06:21:53 +00:00
|
|
|
for pod in parsed_pod_files:
|
|
|
|
parsed_pod_file = parsed_pod_files[pod]
|
|
|
|
if "services" in parsed_pod_file:
|
|
|
|
services = parsed_pod_file["services"]
|
|
|
|
for service_name in services:
|
|
|
|
service_obj = services[service_name]
|
|
|
|
if "volumes" in service_obj:
|
|
|
|
volumes = service_obj["volumes"]
|
|
|
|
for mount_string in volumes:
|
2024-02-06 19:32:10 +00:00
|
|
|
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
|
|
|
if opts.o.debug:
|
|
|
|
print(f"mount_string: {mount_string}")
|
|
|
|
mount_split = mount_string.split(":")
|
|
|
|
volume_name = mount_split[0]
|
|
|
|
mount_path = mount_split[1]
|
|
|
|
if opts.o.debug:
|
2024-02-14 21:45:01 +00:00
|
|
|
print(f"volume_name: {volume_name}")
|
2024-02-06 19:32:10 +00:00
|
|
|
print(f"map: {volume_host_path_map}")
|
|
|
|
print(f"mount path: {mount_path}")
|
2024-02-05 20:15:11 +00:00
|
|
|
if volume_name not in deployment_context.spec.get_configmaps():
|
2024-02-14 21:45:01 +00:00
|
|
|
if volume_host_path_map[volume_name]:
|
|
|
|
volume_definitions.append(
|
|
|
|
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
|
|
|
f" containerPath: {get_kind_pv_bind_mount_path(volume_name)}\n"
|
|
|
|
)
|
2023-11-06 06:21:53 +00:00
|
|
|
return (
|
|
|
|
"" if len(volume_definitions) == 0 else (
|
|
|
|
" extraMounts:\n"
|
|
|
|
f"{''.join(volume_definitions)}"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_kind_port_mappings(parsed_pod_files):
|
|
|
|
port_definitions = []
|
|
|
|
for pod in parsed_pod_files:
|
|
|
|
parsed_pod_file = parsed_pod_files[pod]
|
|
|
|
if "services" in parsed_pod_file:
|
|
|
|
services = parsed_pod_file["services"]
|
|
|
|
for service_name in services:
|
|
|
|
service_obj = services[service_name]
|
|
|
|
if "ports" in service_obj:
|
|
|
|
ports = service_obj["ports"]
|
|
|
|
for port_string in ports:
|
|
|
|
# TODO handle the complex cases
|
|
|
|
# Looks like: 80 or something more complicated
|
2024-02-06 19:32:10 +00:00
|
|
|
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
|
2023-11-06 06:21:53 +00:00
|
|
|
return (
|
|
|
|
"" if len(port_definitions) == 0 else (
|
|
|
|
" extraPortMappings:\n"
|
|
|
|
f"{''.join(port_definitions)}"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-02-08 19:41:57 +00:00
|
|
|
# Note: this makes any duplicate definition in b overwrite a
|
|
|
|
def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]:
|
|
|
|
result = {**a, **b}
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
def _expand_shell_vars(raw_val: str) -> str:
|
|
|
|
# could be: <string> or ${<env-var-name>} or ${<env-var-name>:-<default-value>}
|
|
|
|
# TODO: implement support for variable substitution and default values
|
|
|
|
# if raw_val is like ${<something>} print a warning and substitute an empty string
|
|
|
|
# otherwise return raw_val
|
|
|
|
match = re.search(r"^\$\{(.*)\}$", raw_val)
|
|
|
|
if match:
|
|
|
|
print(f"WARNING: found unimplemented environment variable substitution: {raw_val}")
|
|
|
|
else:
|
|
|
|
return raw_val
|
|
|
|
|
|
|
|
|
|
|
|
# TODO: handle the case where the same env var is defined in multiple places
|
|
|
|
def envs_from_compose_file(compose_file_envs: Mapping[str, str]) -> Mapping[str, str]:
|
|
|
|
result = {}
|
|
|
|
for env_var, env_val in compose_file_envs.items():
|
|
|
|
expanded_env_val = _expand_shell_vars(env_val)
|
|
|
|
result.update({env_var: expanded_env_val})
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2023-11-09 00:53:46 +00:00
|
|
|
def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]:
|
|
|
|
result = []
|
|
|
|
for env_var, env_val in map.items():
|
|
|
|
result.append(client.V1EnvVar(env_var, env_val))
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2023-11-06 06:21:53 +00:00
|
|
|
# This needs to know:
|
|
|
|
# The service ports for the cluster
|
|
|
|
# The bind mounted volumes for the cluster
|
|
|
|
#
|
|
|
|
# Make ports like this:
|
|
|
|
# extraPortMappings:
|
|
|
|
# - containerPort: 80
|
|
|
|
# hostPort: 80
|
|
|
|
# # optional: set the bind address on the host
|
|
|
|
# # 0.0.0.0 is the current default
|
|
|
|
# listenAddress: "127.0.0.1"
|
|
|
|
# # optional: set the protocol to one of TCP, UDP, SCTP.
|
|
|
|
# # TCP is the default
|
|
|
|
# protocol: TCP
|
|
|
|
# Make bind mounts like this:
|
|
|
|
# extraMounts:
|
|
|
|
# - hostPath: /path/to/my/files
|
|
|
|
# containerPath: /files
|
2024-02-05 20:15:11 +00:00
|
|
|
def generate_kind_config(deployment_dir: Path, deployment_context):
|
2023-11-06 06:21:53 +00:00
|
|
|
compose_file_dir = deployment_dir.joinpath("compose")
|
|
|
|
# TODO: this should come from the stack file, not this way
|
|
|
|
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
|
|
|
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
|
|
|
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
2024-02-05 20:15:11 +00:00
|
|
|
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context)
|
2023-11-06 06:21:53 +00:00
|
|
|
return (
|
|
|
|
"kind: Cluster\n"
|
|
|
|
"apiVersion: kind.x-k8s.io/v1alpha4\n"
|
|
|
|
"nodes:\n"
|
|
|
|
"- role: control-plane\n"
|
|
|
|
f"{port_mappings_yml}\n"
|
|
|
|
f"{mounts_yml}\n"
|
|
|
|
)
|