forked from cerc-io/stack-orchestrator
Merge main
This commit is contained in:
commit
01ab438c7e
@ -1 +1,2 @@
|
|||||||
Change this file to trigger running the test-k8s-deploy CI job
|
Change this file to trigger running the test-k8s-deploy CI job
|
||||||
|
Trigger test on PR branch
|
||||||
|
@ -7,8 +7,10 @@ services:
|
|||||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||||
volumes:
|
volumes:
|
||||||
- test-data:/data
|
- test-data:/data
|
||||||
|
- test-config:/config:ro
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "80"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
test-data:
|
test-data:
|
||||||
|
test-config:
|
||||||
|
@ -17,5 +17,20 @@ fi
|
|||||||
if [ -n "$CERC_TEST_PARAM_1" ]; then
|
if [ -n "$CERC_TEST_PARAM_1" ]; then
|
||||||
echo "Test-param-1: ${CERC_TEST_PARAM_1}"
|
echo "Test-param-1: ${CERC_TEST_PARAM_1}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -d "/config" ]; then
|
||||||
|
echo "/config: EXISTS"
|
||||||
|
for f in /config/*; do
|
||||||
|
if [[ -f "$f" ]] || [[ -L "$f" ]]; then
|
||||||
|
echo "$f:"
|
||||||
|
cat "$f"
|
||||||
|
echo ""
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "/config: does NOT EXIST"
|
||||||
|
fi
|
||||||
|
|
||||||
# Run nginx which will block here forever
|
# Run nginx which will block here forever
|
||||||
/usr/sbin/nginx -g "daemon off;"
|
/usr/sbin/nginx -g "daemon off;"
|
||||||
|
@ -17,6 +17,7 @@ from pathlib import Path
|
|||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployer(Deployer):
|
class DockerDeployer(Deployer):
|
||||||
@ -29,60 +30,69 @@ class DockerDeployer(Deployer):
|
|||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, services):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.up(detach=detach, services=services)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.up(detach=detach, services=services)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def down(self, timeout, volumes):
|
def down(self, timeout, volumes):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.restart()
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.restart()
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def status(self):
|
def status(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
for p in self.docker.compose.ps():
|
try:
|
||||||
print(f"{p.name}\t{p.state.status}")
|
for p in self.docker.compose.ps():
|
||||||
except DockerException as e:
|
print(f"{p.name}\t{p.state.status}")
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def ps(self):
|
def ps(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.ps()
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.ps()
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.port(service=service, private_port=private_port)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.port(service=service, private_port=private_port)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def execute(self, service, command, tty, envs):
|
def execute(self, service, command, tty, envs):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
try:
|
||||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||||
except DockerException as e:
|
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
|
@ -85,54 +85,39 @@ def create_deploy_context(
|
|||||||
def up_operation(ctx, services_list, stay_attached=False):
|
def up_operation(ctx, services_list, stay_attached=False):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
if not global_context.dry_run:
|
cluster_context = deploy_context.cluster_context
|
||||||
cluster_context = deploy_context.cluster_context
|
container_exec_env = _make_runtime_env(global_context)
|
||||||
container_exec_env = _make_runtime_env(global_context)
|
for attr, value in container_exec_env.items():
|
||||||
for attr, value in container_exec_env.items():
|
os.environ[attr] = value
|
||||||
os.environ[attr] = value
|
if global_context.verbose:
|
||||||
if global_context.verbose:
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
||||||
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
def down_operation(ctx, delete_volumes, extra_args_list):
|
||||||
global_context = ctx.parent.parent.obj
|
timeout_arg = None
|
||||||
if not global_context.dry_run:
|
if extra_args_list:
|
||||||
if global_context.verbose:
|
timeout_arg = extra_args_list[0]
|
||||||
print("Running compose down")
|
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||||
timeout_arg = None
|
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||||
if extra_args_list:
|
|
||||||
timeout_arg = extra_args_list[0]
|
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
|
||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
ctx.obj.deployer.status()
|
||||||
if not global_context.dry_run:
|
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose status")
|
|
||||||
ctx.obj.deployer.status()
|
|
||||||
|
|
||||||
|
|
||||||
def update_operation(ctx):
|
def update_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
ctx.obj.deployer.update()
|
||||||
if not global_context.dry_run:
|
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose update")
|
|
||||||
ctx.obj.deployer.update()
|
|
||||||
|
|
||||||
|
|
||||||
def ps_operation(ctx):
|
def ps_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose ps")
|
|
||||||
container_list = ctx.obj.deployer.ps()
|
container_list = ctx.obj.deployer.ps()
|
||||||
if len(container_list) > 0:
|
if len(container_list) > 0:
|
||||||
print("Running containers:")
|
print("Running containers:")
|
||||||
@ -187,15 +172,11 @@ def exec_operation(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
global_context = ctx.parent.parent.obj
|
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
if not global_context.dry_run:
|
services_list = extra_args_list if extra_args_list is not None else []
|
||||||
if global_context.verbose:
|
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||||
print("Running compose logs")
|
for stream_type, stream_content in logs_stream:
|
||||||
services_list = extra_args_list if extra_args_list is not None else []
|
print(stream_content.decode("utf-8"), end="")
|
||||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
|
||||||
for stream_type, stream_content in logs_stream:
|
|
||||||
print(stream_content.decode("utf-8"), end="")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env)
|
envs=container_exec_env)
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug:
|
if ctx.debug and not waiting_for_data:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,11 +18,11 @@ from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerCon
|
|||||||
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
||||||
|
|
||||||
|
|
||||||
def getDeployerConfigGenerator(type: str):
|
def getDeployerConfigGenerator(type: str, deployment_context):
|
||||||
if type == "compose" or type is None:
|
if type == "compose" or type is None:
|
||||||
return DockerDeployerConfigGenerator(type)
|
return DockerDeployerConfigGenerator(type)
|
||||||
elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
||||||
return K8sDeployerConfigGenerator(type)
|
return K8sDeployerConfigGenerator(type, deployment_context)
|
||||||
else:
|
else:
|
||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|
||||||
|
@ -487,7 +487,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
deployment_context.init(deployment_dir_path)
|
deployment_context.init(deployment_dir_path)
|
||||||
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
||||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type)
|
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
|
||||||
# TODO: make deployment_dir_path a Path above
|
# TODO: make deployment_dir_path a Path above
|
||||||
deployer_config_generator.generate(deployment_dir_path)
|
deployer_config_generator.generate(deployment_dir_path)
|
||||||
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
||||||
|
@ -81,69 +81,84 @@ class K8sDeployer(Deployer):
|
|||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
self.custom_obj_api = client.CustomObjectsApi()
|
self.custom_obj_api = client.CustomObjectsApi()
|
||||||
|
|
||||||
def up(self, detach, services):
|
def _create_volume_data(self):
|
||||||
|
|
||||||
if self.is_kind():
|
|
||||||
# Create the kind cluster
|
|
||||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
|
||||||
# Ensure the referenced containers are copied into kind
|
|
||||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
|
||||||
self.connect_api()
|
|
||||||
|
|
||||||
# Create the host-path-mounted PVs for this deployment
|
# Create the host-path-mounted PVs for this deployment
|
||||||
pvs = self.cluster_info.get_pvs()
|
pvs = self.cluster_info.get_pvs()
|
||||||
for pv in pvs:
|
for pv in pvs:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this pv: {pv}")
|
print(f"Sending this pv: {pv}")
|
||||||
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
if not opts.o.dry_run:
|
||||||
if opts.o.debug:
|
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
||||||
print("PVs created:")
|
if opts.o.debug:
|
||||||
print(f"{pv_resp}")
|
print("PVs created:")
|
||||||
|
print(f"{pv_resp}")
|
||||||
|
|
||||||
# Figure out the PVCs for this deployment
|
# Figure out the PVCs for this deployment
|
||||||
pvcs = self.cluster_info.get_pvcs()
|
pvcs = self.cluster_info.get_pvcs()
|
||||||
for pvc in pvcs:
|
for pvc in pvcs:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this pvc: {pvc}")
|
print(f"Sending this pvc: {pvc}")
|
||||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
if not opts.o.dry_run:
|
||||||
print("PVCs created:")
|
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||||
print(f"{pvc_resp}")
|
if opts.o.debug:
|
||||||
|
print("PVCs created:")
|
||||||
|
print(f"{pvc_resp}")
|
||||||
|
|
||||||
# Figure out the ConfigMaps for this deployment
|
# Figure out the ConfigMaps for this deployment
|
||||||
config_maps = self.cluster_info.get_configmaps()
|
config_maps = self.cluster_info.get_configmaps()
|
||||||
for cfg_map in config_maps:
|
for cfg_map in config_maps:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ConfigMap: {cfg_map}")
|
print(f"Sending this ConfigMap: {cfg_map}")
|
||||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
if not opts.o.dry_run:
|
||||||
body=cfg_map,
|
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||||
namespace=self.k8s_namespace
|
body=cfg_map,
|
||||||
)
|
namespace=self.k8s_namespace
|
||||||
if opts.o.debug:
|
)
|
||||||
print("ConfigMap created:")
|
if opts.o.debug:
|
||||||
print(f"{cfg_rsp}")
|
print("ConfigMap created:")
|
||||||
|
print(f"{cfg_rsp}")
|
||||||
|
|
||||||
|
def _create_deployment(self):
|
||||||
# Process compose files into a Deployment
|
# Process compose files into a Deployment
|
||||||
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
||||||
# Create the k8s objects
|
# Create the k8s objects
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this deployment: {deployment}")
|
print(f"Sending this deployment: {deployment}")
|
||||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
if not opts.o.dry_run:
|
||||||
body=deployment, namespace=self.k8s_namespace
|
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||||
)
|
body=deployment, namespace=self.k8s_namespace
|
||||||
if opts.o.debug:
|
)
|
||||||
print("Deployment created:")
|
if opts.o.debug:
|
||||||
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
print("Deployment created:")
|
||||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
||||||
|
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service: client.V1Service = self.cluster_info.get_service()
|
||||||
service_resp = self.core_api.create_namespaced_service(
|
|
||||||
namespace=self.k8s_namespace,
|
|
||||||
body=service
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Service created:")
|
print(f"Sending this service: {service}")
|
||||||
print(f"{service_resp}")
|
if not opts.o.dry_run:
|
||||||
|
service_resp = self.core_api.create_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
body=service
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print("Service created:")
|
||||||
|
print(f"{service_resp}")
|
||||||
|
|
||||||
|
def up(self, detach, services):
|
||||||
|
if not opts.o.dry_run:
|
||||||
|
if self.is_kind():
|
||||||
|
# Create the kind cluster
|
||||||
|
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||||
|
# Ensure the referenced containers are copied into kind
|
||||||
|
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||||
|
self.connect_api()
|
||||||
|
else:
|
||||||
|
print("Dry run mode enabled, skipping k8s API connect")
|
||||||
|
|
||||||
|
self._create_volume_data()
|
||||||
|
self._create_deployment()
|
||||||
|
|
||||||
if not self.is_kind():
|
if not self.is_kind():
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
||||||
@ -151,13 +166,14 @@ class K8sDeployer(Deployer):
|
|||||||
if ingress:
|
if ingress:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ingress: {ingress}")
|
print(f"Sending this ingress: {ingress}")
|
||||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
if not opts.o.dry_run:
|
||||||
namespace=self.k8s_namespace,
|
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||||
body=ingress
|
namespace=self.k8s_namespace,
|
||||||
)
|
body=ingress
|
||||||
if opts.o.debug:
|
)
|
||||||
print("Ingress created:")
|
if opts.o.debug:
|
||||||
print(f"{ingress_resp}")
|
print("Ingress created:")
|
||||||
|
print(f"{ingress_resp}")
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No ingress configured")
|
print("No ingress configured")
|
||||||
@ -390,8 +406,9 @@ class K8sDeployer(Deployer):
|
|||||||
class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
type: str
|
type: str
|
||||||
|
|
||||||
def __init__(self, type: str) -> None:
|
def __init__(self, type: str, deployment_context) -> None:
|
||||||
self.type = type
|
self.type = type
|
||||||
|
self.deployment_context = deployment_context
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
@ -399,7 +416,7 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
|||||||
if self.type == "k8s-kind":
|
if self.type == "k8s-kind":
|
||||||
# Check the file isn't already there
|
# Check the file isn't already there
|
||||||
# Get the config file contents
|
# Get the config file contents
|
||||||
content = generate_kind_config(deployment_dir)
|
content = generate_kind_config(deployment_dir, self.deployment_context)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"kind config is: {content}")
|
print(f"kind config is: {content}")
|
||||||
config_file = deployment_dir.joinpath(constants.kind_config_filename)
|
config_file = deployment_dir.joinpath(constants.kind_config_filename)
|
||||||
|
@ -140,8 +140,9 @@ def _get_host_paths_for_volumes(parsed_pod_files):
|
|||||||
volumes = parsed_pod_file["volumes"]
|
volumes = parsed_pod_file["volumes"]
|
||||||
for volume_name in volumes.keys():
|
for volume_name in volumes.keys():
|
||||||
volume_definition = volumes[volume_name]
|
volume_definition = volumes[volume_name]
|
||||||
host_path = volume_definition["driver_opts"]["device"]
|
if volume_definition and "driver_opts" in volume_definition:
|
||||||
result[volume_name] = host_path
|
host_path = volume_definition["driver_opts"]["device"]
|
||||||
|
result[volume_name] = host_path
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -153,7 +154,7 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat
|
|||||||
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
|
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
|
||||||
|
|
||||||
|
|
||||||
def _generate_kind_mounts(parsed_pod_files, deployment_dir):
|
def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
|
||||||
volume_definitions = []
|
volume_definitions = []
|
||||||
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
|
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
|
||||||
# Note these paths are relative to the location of the pod files (at present)
|
# Note these paths are relative to the location of the pod files (at present)
|
||||||
@ -178,10 +179,11 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir):
|
|||||||
print(f"volumne_name: {volume_name}")
|
print(f"volumne_name: {volume_name}")
|
||||||
print(f"map: {volume_host_path_map}")
|
print(f"map: {volume_host_path_map}")
|
||||||
print(f"mount path: {mount_path}")
|
print(f"mount path: {mount_path}")
|
||||||
volume_definitions.append(
|
if volume_name not in deployment_context.spec.get_configmaps():
|
||||||
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
volume_definitions.append(
|
||||||
f" containerPath: {get_node_pv_mount_path(volume_name)}\n"
|
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
||||||
)
|
f" containerPath: {get_node_pv_mount_path(volume_name)}\n"
|
||||||
|
)
|
||||||
return (
|
return (
|
||||||
"" if len(volume_definitions) == 0 else (
|
"" if len(volume_definitions) == 0 else (
|
||||||
" extraMounts:\n"
|
" extraMounts:\n"
|
||||||
@ -237,13 +239,13 @@ def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V
|
|||||||
# extraMounts:
|
# extraMounts:
|
||||||
# - hostPath: /path/to/my/files
|
# - hostPath: /path/to/my/files
|
||||||
# containerPath: /files
|
# containerPath: /files
|
||||||
def generate_kind_config(deployment_dir: Path):
|
def generate_kind_config(deployment_dir: Path, deployment_context):
|
||||||
compose_file_dir = deployment_dir.joinpath("compose")
|
compose_file_dir = deployment_dir.joinpath("compose")
|
||||||
# TODO: this should come from the stack file, not this way
|
# TODO: this should come from the stack file, not this way
|
||||||
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
||||||
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
||||||
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir)
|
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context)
|
||||||
return (
|
return (
|
||||||
"kind: Cluster\n"
|
"kind: Cluster\n"
|
||||||
"apiVersion: kind.x-k8s.io/v1alpha4\n"
|
"apiVersion: kind.x-k8s.io/v1alpha4\n"
|
||||||
|
@ -97,6 +97,10 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
|||||||
echo "deploy create test: FAILED"
|
echo "deploy create test: FAILED"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Add a config file to be picked up by the ConfigMap before starting.
|
||||||
|
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
|
||||||
|
|
||||||
echo "deploy create output file test: passed"
|
echo "deploy create output file test: passed"
|
||||||
# Try to start the deployment
|
# Try to start the deployment
|
||||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
@ -117,6 +121,16 @@ else
|
|||||||
echo "deployment config test: FAILED"
|
echo "deployment config test: FAILED"
|
||||||
delete_cluster_exit
|
delete_cluster_exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check that the ConfigMap is mounted and contains the expected content.
|
||||||
|
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
||||||
|
echo "deployment ConfigMap test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment ConfigMap test: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
# Stop then start again and check the volume was preserved
|
# Stop then start again and check the volume was preserved
|
||||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||||
# Sleep a bit just in case
|
# Sleep a bit just in case
|
||||||
@ -125,8 +139,8 @@ sleep 20
|
|||||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
wait_for_pods_started
|
wait_for_pods_started
|
||||||
wait_for_log_output
|
wait_for_log_output
|
||||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
if [[ "$log_output_4" == *"Filesystem is old"* ]]; then
|
if [[ "$log_output_5" == *"Filesystem is old"* ]]; then
|
||||||
echo "Retain volumes test: passed"
|
echo "Retain volumes test: passed"
|
||||||
else
|
else
|
||||||
echo "Retain volumes test: FAILED"
|
echo "Retain volumes test: FAILED"
|
||||||
|
Loading…
Reference in New Issue
Block a user