k8s refactor (#595)

This commit is contained in:
David Boreham 2023-10-24 14:44:48 -06:00 committed by GitHub
parent fc051265d8
commit 573a19a3b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 320 additions and 46 deletions

View File

@ -13,7 +13,7 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Deploys the system components using docker-compose # Deploys the system components using a deployer (either docker-compose or k8s)
import hashlib import hashlib
import copy import copy
@ -22,10 +22,11 @@ import sys
from dataclasses import dataclass from dataclasses import dataclass
from importlib import resources from importlib import resources
import subprocess import subprocess
from python_on_whales import DockerClient, DockerException
import click import click
from pathlib import Path from pathlib import Path
from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
from app.deployer import Deployer, DeployerException
from app.deployer_factory import getDeployer
from app.deploy_types import ClusterContext, DeployCommandContext from app.deploy_types import ClusterContext, DeployCommandContext
from app.deployment_create import create as deployment_create from app.deployment_create import create as deployment_create
from app.deployment_create import init as deployment_init from app.deployment_create import init as deployment_init
@ -37,8 +38,9 @@ from app.deployment_create import setup as deployment_setup
@click.option("--exclude", help="don\'t start these components") @click.option("--exclude", help="don\'t start these components")
@click.option("--env-file", help="env file to be used") @click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name") @click.option("--cluster", help="specify a non-default cluster name")
@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s)")
@click.pass_context @click.pass_context
def command(ctx, include, exclude, env_file, cluster): def command(ctx, include, exclude, env_file, cluster, deploy_to):
'''deploy a stack''' '''deploy a stack'''
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred, # Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
@ -50,16 +52,20 @@ def command(ctx, include, exclude, env_file, cluster):
if ctx.parent.obj.debug: if ctx.parent.obj.debug:
print(f"ctx.parent.obj: {ctx.parent.obj}") print(f"ctx.parent.obj: {ctx.parent.obj}")
ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file)
if deploy_to is None:
deploy_to = "compose"
ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to)
# Subcommand is executed now, by the magic of click # Subcommand is executed now, by the magic of click
def create_deploy_context(global_context, stack, include, exclude, cluster, env_file): def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file) compose_env_file=cluster_context.env_file)
return DeployCommandContext(stack, cluster_context, docker) return DeployCommandContext(stack, cluster_context, deployer)
def up_operation(ctx, services_list, stay_attached=False): def up_operation(ctx, services_list, stay_attached=False):
@ -74,10 +80,10 @@ def up_operation(ctx, services_list, stay_attached=False):
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands: for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command) _run_command(global_context, cluster_context.cluster, pre_start_command)
deploy_context.docker.compose.up(detach=not stay_attached, services=services_list) deploy_context.deployer.compose_up(detach=not stay_attached, services=services_list)
for post_start_command in cluster_context.post_start_commands: for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command) _run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env) _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
def down_operation(ctx, delete_volumes, extra_args_list): def down_operation(ctx, delete_volumes, extra_args_list):
@ -89,7 +95,7 @@ def down_operation(ctx, delete_volumes, extra_args_list):
if extra_args_list: if extra_args_list:
timeout_arg = extra_args_list[0] timeout_arg = extra_args_list[0]
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes) ctx.obj.deployer.compose_down(timeout=timeout_arg, volumes=delete_volumes)
def ps_operation(ctx): def ps_operation(ctx):
@ -97,7 +103,7 @@ def ps_operation(ctx):
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
print("Running compose ps") print("Running compose ps")
container_list = ctx.obj.docker.compose.ps() container_list = ctx.obj.deployer.compose_ps()
if len(container_list) > 0: if len(container_list) > 0:
print("Running containers:") print("Running containers:")
for container in container_list: for container in container_list:
@ -128,7 +134,7 @@ def port_operation(ctx, extra_args):
exposed_port = extra_args_list[1] exposed_port = extra_args_list[1]
if global_context.verbose: if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}") print(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port) mapped_port_data = ctx.obj.deployer.compose_port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}") print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@ -145,8 +151,8 @@ def exec_operation(ctx, extra_args):
if global_context.verbose: if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}") print(f"Running compose exec {service_name} {command_to_exec}")
try: try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env) ctx.obj.deployer.compose_execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException: except DeployerException:
print("container command returned error exit status") print("container command returned error exit status")
@ -157,7 +163,7 @@ def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
if global_context.verbose: if global_context.verbose:
print("Running compose logs") print("Running compose logs")
services_list = extra_args_list if extra_args_list is not None else [] services_list = extra_args_list if extra_args_list is not None else []
logs_stream = ctx.obj.docker.compose.logs(services=services_list, tail=tail, follow=follow, stream=True) logs_stream = ctx.obj.deployer.compose_logs(services=services_list, tail=tail, follow=follow, stream=True)
for stream_type, stream_content in logs_stream: for stream_type, stream_content in logs_stream:
print(stream_content.decode("utf-8"), end="") print(stream_content.decode("utf-8"), end="")
@ -214,11 +220,11 @@ def get_stack_status(ctx, stack):
ctx_copy.stack = stack ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None) cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above # TODO: refactor to avoid duplicating this code above
if ctx.verbose: if ctx.verbose:
print("Running compose ps") print("Running compose ps")
container_list = docker.compose.ps() container_list = deployer.compose_ps()
if len(container_list) > 0: if len(container_list) > 0:
if ctx.debug: if ctx.debug:
print(f"Container list from compose ps: {container_list}") print(f"Container list from compose ps: {container_list}")
@ -359,7 +365,7 @@ def _run_command(ctx, cluster_name, command):
sys.exit(1) sys.exit(1)
def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env): def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
@dataclass @dataclass
class ConfigDirective: class ConfigDirective:
@ -390,13 +396,13 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
# TODO: fix the script paths so they're consistent between containers # TODO: fix the script paths so they're consistent between containers
source_value = None source_value = None
try: try:
source_value = docker.compose.execute(pd.source_container, source_value = deployer.compose_execute(pd.source_container,
["sh", "-c", ["sh", "-c",
"sh /docker-entrypoint-scripts.d/export-" "sh /docker-entrypoint-scripts.d/export-"
f"{pd.source_variable}.sh"], f"{pd.source_variable}.sh"],
tty=False, tty=False,
envs=container_exec_env) envs=container_exec_env)
except DockerException as error: except DeployerException as error:
if ctx.debug: if ctx.debug:
print(f"Docker exception reading config source: {error}") print(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get: # If the script executed failed for some reason, we get:
@ -411,12 +417,12 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
if source_value: if source_value:
if ctx.debug: if ctx.debug:
print(f"fetched source value: {source_value}") print(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container, destination_output = deployer.compose_execute(pd.destination_container,
["sh", "-c", ["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh" f"sh /scripts/import-{pd.destination_variable}.sh"
f" {source_value}"], f" {source_value}"],
tty=False, tty=False,
envs=container_exec_env) envs=container_exec_env)
waiting_for_data = False waiting_for_data = False
if ctx.debug: if ctx.debug:
print(f"destination output: {destination_output}") print(f"destination output: {destination_output}")

67
app/deploy_docker.py Normal file
View File

@ -0,0 +1,67 @@
# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from python_on_whales import DockerClient, DockerException
from app.deployer import Deployer, DeployerException
class DockerDeployer(Deployer):
name: str = "compose"
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
compose_env_file=compose_env_file)
def compose_up(self, detach, services):
try:
return self.docker.compose.up(detach=detach, services=services)
except DockerException as e:
raise DeployerException(e)
def compose_down(self, timeout, volumes):
try:
return self.docker.compose.down(timeout=timeout, volumes=volumes)
except DockerException as e:
raise DeployerException(e)
def compose_ps(self):
try:
return self.docker.compose.ps()
except DockerException as e:
raise DeployerException(e)
def compose_port(self, service, private_port):
try:
return self.docker.compose.port(service=service, private_port=private_port)
except DockerException as e:
raise DeployerException(e)
def compose_execute(self, service_name, command, envs):
try:
return self.docker.compose.execute(service_name=service_name, command=command, envs=envs)
except DockerException as e:
raise DeployerException(e)
def compose_logs(self, services, tail, follow, stream):
try:
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
except DockerException as e:
raise DeployerException(e)
def run(self, image, command, user, volumes, entrypoint=None):
try:
return self.docker.run(image=image, command=command, user=user, volumes=volumes, entrypoint=entrypoint)
except DockerException as e:
raise DeployerException(e)

46
app/deploy_k8s.py Normal file
View File

@ -0,0 +1,46 @@
# Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from kubernetes import client, config
from app.deployer import Deployer
class K8sDeployer(Deployer):
name: str = "k8s"
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
config.load_kube_config()
self.client = client.CoreV1Api()
def compose_up(self, detach, services):
pass
def compose_down(self, timeout, volumes):
pass
def compose_ps(self):
pass
def compose_port(self, service, private_port):
pass
def compose_execute(self, service_name, command, envs):
pass
def compose_logs(self, services, tail, follow, stream):
pass
def run(self, image, command, user, volumes, entrypoint=None):
pass

View File

@ -16,8 +16,8 @@
from typing import List from typing import List
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from python_on_whales import DockerClient
from app.command_types import CommandOptions from app.command_types import CommandOptions
from app.deployer import Deployer
@dataclass @dataclass
@ -35,7 +35,7 @@ class ClusterContext:
class DeployCommandContext: class DeployCommandContext:
stack: str stack: str
cluster_context: ClusterContext cluster_context: ClusterContext
docker: DockerClient deployer: Deployer
@dataclass @dataclass

View File

@ -47,12 +47,12 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]): def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
docker = ctx.docker deployer = ctx.deployer
container_image = _container_image_from_service(ctx.stack, service) container_image = _container_image_from_service(ctx.stack, service)
docker_volumes = _volumes_to_docker(mounts) docker_volumes = _volumes_to_docker(mounts)
if ctx.cluster_context.options.debug: if ctx.cluster_context.options.debug:
print(f"Running this command in {service} container: {command}") print(f"Running this command in {service} container: {command}")
docker_output = docker.run( docker_output = deployer.run(
container_image, container_image,
["-c", command], entrypoint="sh", ["-c", command], entrypoint="sh",
user=f"{os.getuid()}:{os.getgid()}", user=f"{os.getuid()}:{os.getgid()}",

52
app/deployer.py Normal file
View File

@ -0,0 +1,52 @@
# Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from abc import ABC, abstractmethod
class Deployer(ABC):
@abstractmethod
def compose_up(self, detach, services):
pass
@abstractmethod
def compose_down(self, timeout, volumes):
pass
@abstractmethod
def compose_ps(self):
pass
@abstractmethod
def compose_port(self, service, private_port):
pass
@abstractmethod
def compose_execute(self, service_name, command, envs):
pass
@abstractmethod
def compose_logs(self, services, tail, follow, stream):
pass
@abstractmethod
def run(self, image, command, user, volumes, entrypoint):
pass
class DeployerException(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)

26
app/deployer_factory.py Normal file
View File

@ -0,0 +1,26 @@
# Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from app.deploy_k8s import K8sDeployer
from app.deploy_docker import DockerDeployer
def getDeployer(type, compose_files, compose_project_name, compose_env_file):
if type == "compose" or type is None:
return DockerDeployer(compose_files, compose_project_name, compose_env_file)
elif type == "k8s":
return K8sDeployer(compose_files, compose_project_name, compose_env_file)
else:
print(f"ERROR: deploy-to {type} is not valid")

View File

@ -14,20 +14,25 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>. # along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click import click
from dataclasses import dataclass
from pathlib import Path from pathlib import Path
import sys import sys
from app.deploy import up_operation, down_operation, ps_operation, port_operation from app.deploy import up_operation, down_operation, ps_operation, port_operation
from app.deploy import exec_operation, logs_operation, create_deploy_context from app.deploy import exec_operation, logs_operation, create_deploy_context
from app.stack import Stack
from app.spec import Spec
@dataclass
class DeploymentContext: class DeploymentContext:
dir: Path dir: Path
spec: Spec
stack: Stack
def get_stack_file(self): def get_stack_file(self):
return self.dir.joinpath("stack.yml") return self.dir.joinpath("stack.yml")
def get_spec_file(self):
return self.dir.joinpath("spec.yml")
def get_env_file(self): def get_env_file(self):
return self.dir.joinpath("config.env") return self.dir.joinpath("config.env")
@ -35,12 +40,19 @@ class DeploymentContext:
def get_cluster_name(self): def get_cluster_name(self):
return None return None
def init(self, dir):
self.dir = dir
self.stack = Stack()
self.stack.init_from_file(self.get_stack_file())
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
@click.group() @click.group()
@click.option("--dir", required=True, help="path to deployment directory") @click.option("--dir", required=True, help="path to deployment directory")
@click.pass_context @click.pass_context
def command(ctx, dir): def command(ctx, dir):
'''create a deployment''' '''manage a deployment'''
# Check that --stack wasn't supplied # Check that --stack wasn't supplied
if ctx.parent.obj.stack: if ctx.parent.obj.stack:
@ -55,14 +67,18 @@ def command(ctx, dir):
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory") print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
sys.exit(1) sys.exit(1)
# Store the deployment context for subcommands # Store the deployment context for subcommands
ctx.obj = DeploymentContext(dir_path) deployment_context = DeploymentContext()
deployment_context.init(dir_path)
ctx.obj = deployment_context
def make_deploy_context(ctx): def make_deploy_context(ctx):
stack_file_path = ctx.obj.get_stack_file() context: DeploymentContext = ctx.obj
env_file = ctx.obj.get_env_file() stack_file_path = context.get_stack_file()
cluster_name = ctx.obj.get_cluster_name() env_file = context.get_env_file()
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file) cluster_name = context.get_cluster_name()
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file,
context.spec.obj["deploy-to"])
@command.command() @command.command()

View File

@ -249,7 +249,7 @@ def init(ctx, config, output, map_ports_to_host):
stack = global_options(ctx).stack stack = global_options(ctx).stack
debug = global_options(ctx).debug debug = global_options(ctx).debug
default_spec_file_content = call_stack_deploy_init(ctx.obj) default_spec_file_content = call_stack_deploy_init(ctx.obj)
spec_file_content = {"stack": stack} spec_file_content = {"stack": stack, "deploy-to": ctx.obj.deployer.name}
if default_spec_file_content: if default_spec_file_content:
spec_file_content.update(default_spec_file_content) spec_file_content.update(default_spec_file_content)
config_variables = _parse_config_variables(config) config_variables = _parse_config_variables(config)
@ -315,7 +315,7 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
sys.exit(1) sys.exit(1)
os.mkdir(deployment_dir) os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir # Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file))) copyfile(spec_file, os.path.join(deployment_dir, "spec.yml"))
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file))) copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy any config varibles from the spec file into an env file suitable for compose # Copy any config varibles from the spec file into an env file suitable for compose
_write_config_file(spec_file, os.path.join(deployment_dir, "config.env")) _write_config_file(spec_file, os.path.join(deployment_dir, "config.env"))

30
app/spec.py Normal file
View File

@ -0,0 +1,30 @@
# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path
import typing
from app.util import get_yaml
class Spec:
obj: typing.Any
def __init__(self) -> None:
pass
def init_from_file(self, file_path: Path):
with file_path:
self.obj = get_yaml().load(open(file_path, "r"))

30
app/stack.py Normal file
View File

@ -0,0 +1,30 @@
# Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path
import typing
from app.util import get_yaml
class Stack:
obj: typing.Any
def __init__(self) -> None:
pass
def init_from_file(self, file_path: Path):
with file_path:
self.obj = get_yaml().load(open(file_path, "r"))

View File

@ -8,3 +8,4 @@ ruamel.yaml>=0.17.32
pydantic==1.10.9 pydantic==1.10.9
tomli==2.0.1 tomli==2.0.1
validators==0.22.0 validators==0.22.0
kubernetes>=28.1.0