forked from cerc-io/stack-orchestrator
Implement dry run support for k8s deploy (#727)
This commit is contained in:
parent
36bb068983
commit
6848fc33cf
@ -17,6 +17,7 @@ from pathlib import Path
|
||||
from python_on_whales import DockerClient, DockerException
|
||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||
from stack_orchestrator.opts import opts
|
||||
|
||||
|
||||
class DockerDeployer(Deployer):
|
||||
@ -29,24 +30,28 @@ class DockerDeployer(Deployer):
|
||||
self.type = type
|
||||
|
||||
def up(self, detach, services):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.up(detach=detach, services=services)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def down(self, timeout, volumes):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def update(self):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.restart()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def status(self):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
for p in self.docker.compose.ps():
|
||||
print(f"{p.name}\t{p.state.status}")
|
||||
@ -54,30 +59,35 @@ class DockerDeployer(Deployer):
|
||||
raise DeployerException(e)
|
||||
|
||||
def ps(self):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.ps()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def port(self, service, private_port):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.port(service=service, private_port=private_port)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def execute(self, service, command, tty, envs):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def logs(self, services, tail, follow, stream):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||
|
@ -85,7 +85,6 @@ def create_deploy_context(
|
||||
def up_operation(ctx, services_list, stay_attached=False):
|
||||
global_context = ctx.parent.parent.obj
|
||||
deploy_context = ctx.obj
|
||||
if not global_context.dry_run:
|
||||
cluster_context = deploy_context.cluster_context
|
||||
container_exec_env = _make_runtime_env(global_context)
|
||||
for attr, value in container_exec_env.items():
|
||||
@ -101,10 +100,6 @@ def up_operation(ctx, services_list, stay_attached=False):
|
||||
|
||||
|
||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose down")
|
||||
timeout_arg = None
|
||||
if extra_args_list:
|
||||
timeout_arg = extra_args_list[0]
|
||||
@ -113,26 +108,16 @@ def down_operation(ctx, delete_volumes, extra_args_list):
|
||||
|
||||
|
||||
def status_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose status")
|
||||
ctx.obj.deployer.status()
|
||||
|
||||
|
||||
def update_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose update")
|
||||
ctx.obj.deployer.update()
|
||||
|
||||
|
||||
def ps_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose ps")
|
||||
container_list = ctx.obj.deployer.ps()
|
||||
if len(container_list) > 0:
|
||||
print("Running containers:")
|
||||
@ -187,11 +172,7 @@ def exec_operation(ctx, extra_args):
|
||||
|
||||
|
||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||
global_context = ctx.parent.parent.obj
|
||||
extra_args_list = list(extra_args) or None
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose logs")
|
||||
services_list = extra_args_list if extra_args_list is not None else []
|
||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||
for stream_type, stream_content in logs_stream:
|
||||
@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
||||
tty=False,
|
||||
envs=container_exec_env)
|
||||
waiting_for_data = False
|
||||
if ctx.debug:
|
||||
if ctx.debug and not waiting_for_data:
|
||||
print(f"destination output: {destination_output}")
|
||||
|
||||
|
||||
|
@ -81,20 +81,13 @@ class K8sDeployer(Deployer):
|
||||
self.apps_api = client.AppsV1Api()
|
||||
self.custom_obj_api = client.CustomObjectsApi()
|
||||
|
||||
def up(self, detach, services):
|
||||
|
||||
if self.is_kind():
|
||||
# Create the kind cluster
|
||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||
# Ensure the referenced containers are copied into kind
|
||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||
self.connect_api()
|
||||
|
||||
def _create_volume_data(self):
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
for pv in pvs:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this pv: {pv}")
|
||||
if not opts.o.dry_run:
|
||||
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
||||
if opts.o.debug:
|
||||
print("PVs created:")
|
||||
@ -105,6 +98,8 @@ class K8sDeployer(Deployer):
|
||||
for pvc in pvcs:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this pvc: {pvc}")
|
||||
|
||||
if not opts.o.dry_run:
|
||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||
if opts.o.debug:
|
||||
print("PVCs created:")
|
||||
@ -115,6 +110,7 @@ class K8sDeployer(Deployer):
|
||||
for cfg_map in config_maps:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this ConfigMap: {cfg_map}")
|
||||
if not opts.o.dry_run:
|
||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||
body=cfg_map,
|
||||
namespace=self.k8s_namespace
|
||||
@ -123,11 +119,13 @@ class K8sDeployer(Deployer):
|
||||
print("ConfigMap created:")
|
||||
print(f"{cfg_rsp}")
|
||||
|
||||
def _create_deployment(self):
|
||||
# Process compose files into a Deployment
|
||||
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
||||
# Create the k8s objects
|
||||
if opts.o.debug:
|
||||
print(f"Sending this deployment: {deployment}")
|
||||
if not opts.o.dry_run:
|
||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||
body=deployment, namespace=self.k8s_namespace
|
||||
)
|
||||
@ -137,6 +135,9 @@ class K8sDeployer(Deployer):
|
||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||
|
||||
service: client.V1Service = self.cluster_info.get_service()
|
||||
if opts.o.debug:
|
||||
print(f"Sending this service: {service}")
|
||||
if not opts.o.dry_run:
|
||||
service_resp = self.core_api.create_namespaced_service(
|
||||
namespace=self.k8s_namespace,
|
||||
body=service
|
||||
@ -145,12 +146,27 @@ class K8sDeployer(Deployer):
|
||||
print("Service created:")
|
||||
print(f"{service_resp}")
|
||||
|
||||
def up(self, detach, services):
|
||||
if not opts.o.dry_run:
|
||||
if self.is_kind():
|
||||
# Create the kind cluster
|
||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||
# Ensure the referenced containers are copied into kind
|
||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||
self.connect_api()
|
||||
else:
|
||||
print("Dry run mode enabled, skipping k8s API connect")
|
||||
|
||||
self._create_volume_data()
|
||||
self._create_deployment()
|
||||
|
||||
if not self.is_kind():
|
||||
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
||||
|
||||
if ingress:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this ingress: {ingress}")
|
||||
if not opts.o.dry_run:
|
||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||
namespace=self.k8s_namespace,
|
||||
body=ingress
|
||||
|
Loading…
Reference in New Issue
Block a user