Refactor deploy into click subcommands (#399)

Former-commit-id: cb58fdb58ce1686f4638946745830f391d820f4b
This commit is contained in:
David Boreham 2023-05-19 07:01:46 +08:00 committed by GitHub
parent 87c25dfb5e
commit 1ffc6b1687

View File

@ -28,108 +28,146 @@ import importlib.resources
from pathlib import Path from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config
class DeployCommandContext(object):
def __init__(self, cluster_context, docker):
self.cluster_context = cluster_context
self.docker = docker
@click.command()
@click.group()
@click.option("--include", help="only start these components") @click.option("--include", help="only start these components")
@click.option("--exclude", help="don\'t start these components") @click.option("--exclude", help="don\'t start these components")
@click.option("--env-file", help="env file to be used") @click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name") @click.option("--cluster", help="specify a non-default cluster name")
@click.argument('command', required=True) # help: command: up|down|ps
@click.argument('extra_args', nargs=-1) # help: command: up|down|ps <service1> <service2>
@click.pass_context @click.pass_context
def command(ctx, include, exclude, env_file, cluster, command, extra_args): def command(ctx, include, exclude, env_file, cluster):
'''deploy a stack''' '''deploy a stack'''
# TODO: implement option exclusion and command value constraint lost with the move from argparse to click cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file)
debug = ctx.obj.debug
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
local_stack = ctx.obj.local_stack
dry_run = ctx.obj.dry_run
stack = ctx.obj.stack
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, compose_env_file=env_file) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
ctx.obj = DeployCommandContext(cluster_context, docker)
# Subcommand is executed now, by the magic of click
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None extra_args_list = list(extra_args) or None
if not global_context.dry_run:
cluster_context = ctx.obj.cluster_context
container_exec_env = _make_runtime_env(global_context)
for attr, value in container_exec_env.items():
os.environ[attr] = value
if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(ctx.obj, cluster_context.cluster, pre_start_command)
ctx.obj.docker.compose.up(detach=True, services=extra_args_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(ctx.obj, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(ctx.obj, cluster_context.config, ctx.obj.docker, container_exec_env)
if not dry_run:
if command == "up":
container_exec_env = _make_runtime_env(ctx.obj)
for attr, value in container_exec_env.items():
os.environ[attr] = value
if verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(ctx.obj, cluster_context.cluster, pre_start_command)
docker.compose.up(detach=True, services=extra_args_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(ctx.obj, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(ctx.obj, cluster_context.config, docker, container_exec_env) @command.command()
@click.option("--delete-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose down")
timeout_arg = None
if extra_args_list:
timeout_arg = extra_args_list[0]
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
ctx.obj.docker.compose.down(timeout=timeout_arg)
elif command == "down":
if verbose:
print("Running compose down")
timeout_arg = None @command.command()
if extra_args_list: @click.pass_context
timeout_arg=extra_args_list[0] def ps(ctx):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose ps")
container_list = ctx.obj.docker.compose.ps()
if len(container_list) > 0:
print("Running containers:")
for container in container_list:
print(f"id: {container.id}, name: {container.name}, ports: ", end="")
ports = container.network_settings.ports
comma = ""
for port_mapping in ports.keys():
mapping = ports[port_mapping]
print(comma, end="")
if mapping is None:
print(f"{port_mapping}", end="")
else:
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
comma = ", "
print()
else:
print("No containers running")
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
docker.compose.down(timeout=timeout_arg) @command.command()
elif command == "exec": @click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
if extra_args_list is None or len(extra_args_list) < 2: @click.pass_context
print("Usage: exec <service> <cmd>") def port(ctx, extra_args):
sys.exit(1) global_context = ctx.parent.parent.obj
service_name = extra_args_list[0] extra_args_list = list(extra_args) or None
command_to_exec = ["sh", "-c"] + extra_args_list[1:] if not global_context.dry_run:
container_exec_env = _make_runtime_env(ctx.obj) if extra_args_list is None or len(extra_args_list) < 2:
if verbose: print("Usage: port <service> <exposed-port>")
print(f"Running compose exec {service_name} {command_to_exec}") sys.exit(1)
try: service_name = extra_args_list[0]
docker.compose.execute(service_name, command_to_exec, envs=container_exec_env) exposed_port = extra_args_list[1]
except DockerException as error: if ctx.parent.obj.verbose:
print(f"container command returned error exit status") print(f"Running compose port {service_name} {exposed_port}")
elif command == "port": mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
if extra_args_list is None or len(extra_args_list) < 2: print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
print("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0] @command.command()
exposed_port = extra_args_list[1] @click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
if verbose: @click.pass_context
print(f"Running compose port {service_name} {exposed_port}") def exec(ctx, extra_args):
mapped_port_data = docker.compose.port(service_name, exposed_port) global_context = ctx.parent.parent.obj
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}") extra_args_list = list(extra_args) or None
elif command == "ps": if not global_context.dry_run:
if verbose: if extra_args_list is None or len(extra_args_list) < 2:
print("Running compose ps") print("Usage: exec <service> <cmd>")
container_list = docker.compose.ps() sys.exit(1)
if len(container_list) > 0: service_name = extra_args_list[0]
print("Running containers:") command_to_exec = ["sh", "-c"] + extra_args_list[1:]
for container in container_list: container_exec_env = _make_runtime_env(global_context)
print(f"id: {container.id}, name: {container.name}, ports: ", end="") if global_context.verbose:
ports = container.network_settings.ports print(f"Running compose exec {service_name} {command_to_exec}")
comma = "" try:
for port_mapping in ports.keys(): ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
mapping = ports[port_mapping] except DockerException as error:
print(comma, end="") print(f"container command returned error exit status")
if mapping is None:
print(f"{port_mapping}", end="")
else: @command.command()
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="") @click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
comma = ", " @click.pass_context
print() def logs(ctx, extra_args):
else: global_context = ctx.parent.parent.obj
print("No containers running") extra_args_list = list(extra_args) or None
elif command == "logs": if not global_context.dry_run:
if verbose: if global_context.verbose:
print("Running compose logs") print("Running compose logs")
logs_output = docker.compose.logs(services=extra_args_list if extra_args_list is not None else []) logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output) print(logs_output)
def get_stack_status(ctx, stack): def get_stack_status(ctx, stack):
@ -137,7 +175,7 @@ def get_stack_status(ctx, stack):
ctx_copy = copy.copy(ctx) ctx_copy = copy.copy(ctx)
ctx_copy.stack = stack ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, None, None, None) cluster_context = _make_cluster_context(ctx_copy, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above # TODO: refactor to avoid duplicating this code above
if ctx.verbose: if ctx.verbose:
@ -162,7 +200,7 @@ def _make_runtime_env(ctx):
return container_exec_env return container_exec_env
def _make_cluster_context(ctx, include, exclude, cluster): def _make_cluster_context(ctx, include, exclude, cluster, env_file):
if ctx.local_stack: if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
@ -235,16 +273,17 @@ def _make_cluster_context(ctx, include, exclude, cluster):
if ctx.verbose: if ctx.verbose:
print(f"files: {compose_files}") print(f"files: {compose_files}")
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config) return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
class cluster_context: class cluster_context:
def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config) -> None: def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config, env_file) -> None:
self.cluster = cluster self.cluster = cluster
self.compose_files = compose_files self.compose_files = compose_files
self.pre_start_commands = pre_start_commands self.pre_start_commands = pre_start_commands
self.post_start_commands = post_start_commands self.post_start_commands = post_start_commands
self.config = config self.config = config
self.env_file = env_file
def _convert_to_new_format(old_pod_array): def _convert_to_new_format(old_pod_array):