forked from cerc-io/stack-orchestrator
Implement dry run support for k8s deploy (#727)
This commit is contained in:
parent
36bb068983
commit
6848fc33cf
@ -17,6 +17,7 @@ from pathlib import Path
|
|||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployer(Deployer):
|
class DockerDeployer(Deployer):
|
||||||
@ -29,60 +30,69 @@ class DockerDeployer(Deployer):
|
|||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, services):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.up(detach=detach, services=services)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.up(detach=detach, services=services)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def down(self, timeout, volumes):
|
def down(self, timeout, volumes):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.restart()
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.restart()
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def status(self):
|
def status(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
for p in self.docker.compose.ps():
|
try:
|
||||||
print(f"{p.name}\t{p.state.status}")
|
for p in self.docker.compose.ps():
|
||||||
except DockerException as e:
|
print(f"{p.name}\t{p.state.status}")
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def ps(self):
|
def ps(self):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.ps()
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.ps()
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.port(service=service, private_port=private_port)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.port(service=service, private_port=private_port)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def execute(self, service, command, tty, envs):
|
def execute(self, service, command, tty, envs):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
try:
|
||||||
except DockerException as e:
|
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||||
try:
|
if not opts.o.dry_run:
|
||||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
try:
|
||||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||||
except DockerException as e:
|
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||||
raise DeployerException(e)
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
|
@ -85,54 +85,39 @@ def create_deploy_context(
|
|||||||
def up_operation(ctx, services_list, stay_attached=False):
|
def up_operation(ctx, services_list, stay_attached=False):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
if not global_context.dry_run:
|
cluster_context = deploy_context.cluster_context
|
||||||
cluster_context = deploy_context.cluster_context
|
container_exec_env = _make_runtime_env(global_context)
|
||||||
container_exec_env = _make_runtime_env(global_context)
|
for attr, value in container_exec_env.items():
|
||||||
for attr, value in container_exec_env.items():
|
os.environ[attr] = value
|
||||||
os.environ[attr] = value
|
if global_context.verbose:
|
||||||
if global_context.verbose:
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
||||||
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
def down_operation(ctx, delete_volumes, extra_args_list):
|
||||||
global_context = ctx.parent.parent.obj
|
timeout_arg = None
|
||||||
if not global_context.dry_run:
|
if extra_args_list:
|
||||||
if global_context.verbose:
|
timeout_arg = extra_args_list[0]
|
||||||
print("Running compose down")
|
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||||
timeout_arg = None
|
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||||
if extra_args_list:
|
|
||||||
timeout_arg = extra_args_list[0]
|
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
|
||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
ctx.obj.deployer.status()
|
||||||
if not global_context.dry_run:
|
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose status")
|
|
||||||
ctx.obj.deployer.status()
|
|
||||||
|
|
||||||
|
|
||||||
def update_operation(ctx):
|
def update_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
ctx.obj.deployer.update()
|
||||||
if not global_context.dry_run:
|
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose update")
|
|
||||||
ctx.obj.deployer.update()
|
|
||||||
|
|
||||||
|
|
||||||
def ps_operation(ctx):
|
def ps_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
if global_context.verbose:
|
|
||||||
print("Running compose ps")
|
|
||||||
container_list = ctx.obj.deployer.ps()
|
container_list = ctx.obj.deployer.ps()
|
||||||
if len(container_list) > 0:
|
if len(container_list) > 0:
|
||||||
print("Running containers:")
|
print("Running containers:")
|
||||||
@ -187,15 +172,11 @@ def exec_operation(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
global_context = ctx.parent.parent.obj
|
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
if not global_context.dry_run:
|
services_list = extra_args_list if extra_args_list is not None else []
|
||||||
if global_context.verbose:
|
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||||
print("Running compose logs")
|
for stream_type, stream_content in logs_stream:
|
||||||
services_list = extra_args_list if extra_args_list is not None else []
|
print(stream_content.decode("utf-8"), end="")
|
||||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
|
||||||
for stream_type, stream_content in logs_stream:
|
|
||||||
print(stream_content.decode("utf-8"), end="")
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env)
|
envs=container_exec_env)
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug:
|
if ctx.debug and not waiting_for_data:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -81,69 +81,84 @@ class K8sDeployer(Deployer):
|
|||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
self.custom_obj_api = client.CustomObjectsApi()
|
self.custom_obj_api = client.CustomObjectsApi()
|
||||||
|
|
||||||
def up(self, detach, services):
|
def _create_volume_data(self):
|
||||||
|
|
||||||
if self.is_kind():
|
|
||||||
# Create the kind cluster
|
|
||||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
|
||||||
# Ensure the referenced containers are copied into kind
|
|
||||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
|
||||||
self.connect_api()
|
|
||||||
|
|
||||||
# Create the host-path-mounted PVs for this deployment
|
# Create the host-path-mounted PVs for this deployment
|
||||||
pvs = self.cluster_info.get_pvs()
|
pvs = self.cluster_info.get_pvs()
|
||||||
for pv in pvs:
|
for pv in pvs:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this pv: {pv}")
|
print(f"Sending this pv: {pv}")
|
||||||
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
if not opts.o.dry_run:
|
||||||
if opts.o.debug:
|
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
||||||
print("PVs created:")
|
if opts.o.debug:
|
||||||
print(f"{pv_resp}")
|
print("PVs created:")
|
||||||
|
print(f"{pv_resp}")
|
||||||
|
|
||||||
# Figure out the PVCs for this deployment
|
# Figure out the PVCs for this deployment
|
||||||
pvcs = self.cluster_info.get_pvcs()
|
pvcs = self.cluster_info.get_pvcs()
|
||||||
for pvc in pvcs:
|
for pvc in pvcs:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this pvc: {pvc}")
|
print(f"Sending this pvc: {pvc}")
|
||||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
if not opts.o.dry_run:
|
||||||
print("PVCs created:")
|
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||||
print(f"{pvc_resp}")
|
if opts.o.debug:
|
||||||
|
print("PVCs created:")
|
||||||
|
print(f"{pvc_resp}")
|
||||||
|
|
||||||
# Figure out the ConfigMaps for this deployment
|
# Figure out the ConfigMaps for this deployment
|
||||||
config_maps = self.cluster_info.get_configmaps()
|
config_maps = self.cluster_info.get_configmaps()
|
||||||
for cfg_map in config_maps:
|
for cfg_map in config_maps:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ConfigMap: {cfg_map}")
|
print(f"Sending this ConfigMap: {cfg_map}")
|
||||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
if not opts.o.dry_run:
|
||||||
body=cfg_map,
|
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||||
namespace=self.k8s_namespace
|
body=cfg_map,
|
||||||
)
|
namespace=self.k8s_namespace
|
||||||
if opts.o.debug:
|
)
|
||||||
print("ConfigMap created:")
|
if opts.o.debug:
|
||||||
print(f"{cfg_rsp}")
|
print("ConfigMap created:")
|
||||||
|
print(f"{cfg_rsp}")
|
||||||
|
|
||||||
|
def _create_deployment(self):
|
||||||
# Process compose files into a Deployment
|
# Process compose files into a Deployment
|
||||||
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
||||||
# Create the k8s objects
|
# Create the k8s objects
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this deployment: {deployment}")
|
print(f"Sending this deployment: {deployment}")
|
||||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
if not opts.o.dry_run:
|
||||||
body=deployment, namespace=self.k8s_namespace
|
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||||
)
|
body=deployment, namespace=self.k8s_namespace
|
||||||
if opts.o.debug:
|
)
|
||||||
print("Deployment created:")
|
if opts.o.debug:
|
||||||
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
print("Deployment created:")
|
||||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
||||||
|
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service: client.V1Service = self.cluster_info.get_service()
|
||||||
service_resp = self.core_api.create_namespaced_service(
|
|
||||||
namespace=self.k8s_namespace,
|
|
||||||
body=service
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Service created:")
|
print(f"Sending this service: {service}")
|
||||||
print(f"{service_resp}")
|
if not opts.o.dry_run:
|
||||||
|
service_resp = self.core_api.create_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
body=service
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print("Service created:")
|
||||||
|
print(f"{service_resp}")
|
||||||
|
|
||||||
|
def up(self, detach, services):
|
||||||
|
if not opts.o.dry_run:
|
||||||
|
if self.is_kind():
|
||||||
|
# Create the kind cluster
|
||||||
|
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||||
|
# Ensure the referenced containers are copied into kind
|
||||||
|
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||||
|
self.connect_api()
|
||||||
|
else:
|
||||||
|
print("Dry run mode enabled, skipping k8s API connect")
|
||||||
|
|
||||||
|
self._create_volume_data()
|
||||||
|
self._create_deployment()
|
||||||
|
|
||||||
if not self.is_kind():
|
if not self.is_kind():
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
||||||
@ -151,13 +166,14 @@ class K8sDeployer(Deployer):
|
|||||||
if ingress:
|
if ingress:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ingress: {ingress}")
|
print(f"Sending this ingress: {ingress}")
|
||||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
if not opts.o.dry_run:
|
||||||
namespace=self.k8s_namespace,
|
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||||
body=ingress
|
namespace=self.k8s_namespace,
|
||||||
)
|
body=ingress
|
||||||
if opts.o.debug:
|
)
|
||||||
print("Ingress created:")
|
if opts.o.debug:
|
||||||
print(f"{ingress_resp}")
|
print("Ingress created:")
|
||||||
|
print(f"{ingress_resp}")
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No ingress configured")
|
print("No ingress configured")
|
||||||
|
Loading…
Reference in New Issue
Block a user