From f8159c51fa88c718e8643c9e8548cd78ea9456d7 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 6 Feb 2024 06:58:42 -0700 Subject: [PATCH] Implement dry run support for k8s deploy --- .../deploy/compose/deploy_docker.py | 86 ++++++++------- stack_orchestrator/deploy/deploy.py | 67 ++++------- stack_orchestrator/deploy/k8s/deploy_k8s.py | 104 ++++++++++-------- 3 files changed, 132 insertions(+), 125 deletions(-) diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index b2622820..ffde91c2 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -17,6 +17,7 @@ from pathlib import Path from python_on_whales import DockerClient, DockerException from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.opts import opts class DockerDeployer(Deployer): @@ -29,60 +30,69 @@ class DockerDeployer(Deployer): self.type = type def up(self, detach, services): - try: - return self.docker.compose.up(detach=detach, services=services) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.up(detach=detach, services=services) + except DockerException as e: + raise DeployerException(e) def down(self, timeout, volumes): - try: - return self.docker.compose.down(timeout=timeout, volumes=volumes) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.down(timeout=timeout, volumes=volumes) + except DockerException as e: + raise DeployerException(e) def update(self): - try: - return self.docker.compose.restart() - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.restart() + except DockerException as e: + raise DeployerException(e) def status(self): - try: - for p in self.docker.compose.ps(): - print(f"{p.name}\t{p.state.status}") - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + for p in self.docker.compose.ps(): + print(f"{p.name}\t{p.state.status}") + except DockerException as e: + raise DeployerException(e) def ps(self): - try: - return self.docker.compose.ps() - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.ps() + except DockerException as e: + raise DeployerException(e) def port(self, service, private_port): - try: - return self.docker.compose.port(service=service, private_port=private_port) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.port(service=service, private_port=private_port) + except DockerException as e: + raise DeployerException(e) def execute(self, service, command, tty, envs): - try: - return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) + except DockerException as e: + raise DeployerException(e) def logs(self, services, tail, follow, stream): - try: - return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream) + except DockerException as e: + raise DeployerException(e) def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): - try: - return self.docker.run(image=image, command=command, user=user, volumes=volumes, - entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.run(image=image, command=command, user=user, volumes=volumes, + entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) + except DockerException as e: + raise DeployerException(e) class DockerDeployerConfigGenerator(DeployerConfigGenerator): diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index de68154b..29afcf13 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -85,54 +85,39 @@ def create_deploy_context( def up_operation(ctx, services_list, stay_attached=False): global_context = ctx.parent.parent.obj deploy_context = ctx.obj - if not global_context.dry_run: - cluster_context = deploy_context.cluster_context - container_exec_env = _make_runtime_env(global_context) - for attr, value in container_exec_env.items(): - os.environ[attr] = value - if global_context.verbose: - print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") - for pre_start_command in cluster_context.pre_start_commands: - _run_command(global_context, cluster_context.cluster, pre_start_command) - deploy_context.deployer.up(detach=not stay_attached, services=services_list) - for post_start_command in cluster_context.post_start_commands: - _run_command(global_context, cluster_context.cluster, post_start_command) - _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env) + cluster_context = deploy_context.cluster_context + container_exec_env = _make_runtime_env(global_context) + for attr, value in container_exec_env.items(): + os.environ[attr] = value + if global_context.verbose: + print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") + for pre_start_command in cluster_context.pre_start_commands: + _run_command(global_context, cluster_context.cluster, pre_start_command) + deploy_context.deployer.up(detach=not stay_attached, services=services_list) + for post_start_command in cluster_context.post_start_commands: + _run_command(global_context, cluster_context.cluster, post_start_command) + _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env) def down_operation(ctx, delete_volumes, extra_args_list): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose down") - timeout_arg = None - if extra_args_list: - timeout_arg = extra_args_list[0] - # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully - ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) + timeout_arg = None + if extra_args_list: + timeout_arg = extra_args_list[0] + # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully + ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) def status_operation(ctx): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose status") - ctx.obj.deployer.status() + ctx.obj.deployer.status() def update_operation(ctx): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose update") - ctx.obj.deployer.update() + ctx.obj.deployer.update() def ps_operation(ctx): global_context = ctx.parent.parent.obj if not global_context.dry_run: - if global_context.verbose: - print("Running compose ps") container_list = ctx.obj.deployer.ps() if len(container_list) > 0: print("Running containers:") @@ -187,15 +172,11 @@ def exec_operation(ctx, extra_args): def logs_operation(ctx, tail: int, follow: bool, extra_args: str): - global_context = ctx.parent.parent.obj extra_args_list = list(extra_args) or None - if not global_context.dry_run: - if global_context.verbose: - print("Running compose logs") - services_list = extra_args_list if extra_args_list is not None else [] - logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True) - for stream_type, stream_content in logs_stream: - print(stream_content.decode("utf-8"), end="") + services_list = extra_args_list if extra_args_list is not None else [] + logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True) + for stream_type, stream_content in logs_stream: + print(stream_content.decode("utf-8"), end="") @command.command() @@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en tty=False, envs=container_exec_env) waiting_for_data = False - if ctx.debug: + if ctx.debug and not waiting_for_data: print(f"destination output: {destination_output}") diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 045d1893..929547d0 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -81,69 +81,84 @@ class K8sDeployer(Deployer): self.apps_api = client.AppsV1Api() self.custom_obj_api = client.CustomObjectsApi() - def up(self, detach, services): - - if self.is_kind(): - # Create the kind cluster - create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) - # Ensure the referenced containers are copied into kind - load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) - self.connect_api() - + def _create_volume_data(self): # Create the host-path-mounted PVs for this deployment pvs = self.cluster_info.get_pvs() for pv in pvs: if opts.o.debug: print(f"Sending this pv: {pv}") - pv_resp = self.core_api.create_persistent_volume(body=pv) - if opts.o.debug: - print("PVs created:") - print(f"{pv_resp}") + if not opts.o.dry_run: + pv_resp = self.core_api.create_persistent_volume(body=pv) + if opts.o.debug: + print("PVs created:") + print(f"{pv_resp}") # Figure out the PVCs for this deployment pvcs = self.cluster_info.get_pvcs() for pvc in pvcs: if opts.o.debug: print(f"Sending this pvc: {pvc}") - pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) - if opts.o.debug: - print("PVCs created:") - print(f"{pvc_resp}") + + if not opts.o.dry_run: + pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) + if opts.o.debug: + print("PVCs created:") + print(f"{pvc_resp}") # Figure out the ConfigMaps for this deployment config_maps = self.cluster_info.get_configmaps() for cfg_map in config_maps: if opts.o.debug: print(f"Sending this ConfigMap: {cfg_map}") - cfg_rsp = self.core_api.create_namespaced_config_map( - body=cfg_map, - namespace=self.k8s_namespace - ) - if opts.o.debug: - print("ConfigMap created:") - print(f"{cfg_rsp}") + if not opts.o.dry_run: + cfg_rsp = self.core_api.create_namespaced_config_map( + body=cfg_map, + namespace=self.k8s_namespace + ) + if opts.o.debug: + print("ConfigMap created:") + print(f"{cfg_rsp}") + def _create_deployment(self): # Process compose files into a Deployment deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always") # Create the k8s objects if opts.o.debug: print(f"Sending this deployment: {deployment}") - deployment_resp = self.apps_api.create_namespaced_deployment( - body=deployment, namespace=self.k8s_namespace - ) - if opts.o.debug: - print("Deployment created:") - print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ - {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") + if not opts.o.dry_run: + deployment_resp = self.apps_api.create_namespaced_deployment( + body=deployment, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("Deployment created:") + print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ + {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") service: client.V1Service = self.cluster_info.get_service() - service_resp = self.core_api.create_namespaced_service( - namespace=self.k8s_namespace, - body=service - ) if opts.o.debug: - print("Service created:") - print(f"{service_resp}") + print(f"Sending this service: {service}") + if not opts.o.dry_run: + service_resp = self.core_api.create_namespaced_service( + namespace=self.k8s_namespace, + body=service + ) + if opts.o.debug: + print("Service created:") + print(f"{service_resp}") + + def up(self, detach, services): + if not opts.o.dry_run: + if self.is_kind(): + # Create the kind cluster + create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) + # Ensure the referenced containers are copied into kind + load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + self.connect_api() + else: + print("Dry run mode enabled, skipping k8s API connect") + + self._create_volume_data() + self._create_deployment() if not self.is_kind(): ingress: client.V1Ingress = self.cluster_info.get_ingress() @@ -151,13 +166,14 @@ class K8sDeployer(Deployer): if ingress: if opts.o.debug: print(f"Sending this ingress: {ingress}") - ingress_resp = self.networking_api.create_namespaced_ingress( - namespace=self.k8s_namespace, - body=ingress - ) - if opts.o.debug: - print("Ingress created:") - print(f"{ingress_resp}") + if not opts.o.dry_run: + ingress_resp = self.networking_api.create_namespaced_ingress( + namespace=self.k8s_namespace, + body=ingress + ) + if opts.o.debug: + print("Ingress created:") + print(f"{ingress_resp}") else: if opts.o.debug: print("No ingress configured")