diff --git a/app/data/stacks/fixturenet-laconic-loaded/stack.yml b/app/data/stacks/fixturenet-laconic-loaded/stack.yml
index 6c8aae4a..523a7091 100644
--- a/app/data/stacks/fixturenet-laconic-loaded/stack.yml
+++ b/app/data/stacks/fixturenet-laconic-loaded/stack.yml
@@ -2,14 +2,14 @@ version: "1.1"
name: fixturenet-laconic-loaded
description: "A full featured laconic fixturenet"
repos:
- - git.vdb.to/cerc-io/laconicd
+ - github.com/cerc-io/laconicd
- github.com/lirewine/debug
- github.com/lirewine/crypto
- github.com/lirewine/gem
- github.com/lirewine/sdk
- - git.vdb.to/cerc-io/laconic-sdk
- - git.vdb.to/cerc-io/laconic-registry-cli
- - git.vdb.to/cerc-io/laconic-console
+ - github.com/cerc-io/laconic-sdk
+ - github.com/cerc-io/laconic-registry-cli
+ - github.com/cerc-io/laconic-console
npms:
- laconic-sdk
- laconic-registry-cli
diff --git a/app/data/stacks/fixturenet-laconicd/stack.yml b/app/data/stacks/fixturenet-laconicd/stack.yml
index adab9e3e..ce0b1946 100644
--- a/app/data/stacks/fixturenet-laconicd/stack.yml
+++ b/app/data/stacks/fixturenet-laconicd/stack.yml
@@ -2,9 +2,9 @@ version: "1.0"
name: fixturenet-laconicd
description: "A laconicd fixturenet"
repos:
- - git.vdb.to/cerc-io/laconicd
- - git.vdb.to/cerc-io/laconic-sdk
- - git.vdb.to/cerc-io/laconic-registry-cli
+ - github.com/cerc-io/laconicd
+ - github.com/cerc-io/laconic-sdk
+ - github.com/cerc-io/laconic-registry-cli
npms:
- laconic-sdk
- laconic-registry-cli
diff --git a/app/data/stacks/test/stack.yml b/app/data/stacks/test/stack.yml
index ac724c89..a62da193 100644
--- a/app/data/stacks/test/stack.yml
+++ b/app/data/stacks/test/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: test
description: "A test stack"
repos:
- - git.vdb.to/cerc-io/laconicd
+ - github.com/cerc-io/laconicd
- git.vdb.to/cerc-io/test-project@test-branch
containers:
- cerc/test-container
diff --git a/app/deploy.py b/app/deploy.py
index 0ebcfe18..d11efa00 100644
--- a/app/deploy.py
+++ b/app/deploy.py
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-# Deploys the system components using docker-compose
+# Deploys the system components using a deployer (either docker-compose or k8s)
import hashlib
import copy
@@ -22,10 +22,11 @@ import sys
from dataclasses import dataclass
from importlib import resources
import subprocess
-from python_on_whales import DockerClient, DockerException
import click
from pathlib import Path
from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
+from app.deployer import Deployer, DeployerException
+from app.deployer_factory import getDeployer
from app.deploy_types import ClusterContext, DeployCommandContext
from app.deployment_create import create as deployment_create
from app.deployment_create import init as deployment_init
@@ -37,8 +38,9 @@ from app.deployment_create import setup as deployment_setup
@click.option("--exclude", help="don\'t start these components")
@click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name")
+@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s)")
@click.pass_context
-def command(ctx, include, exclude, env_file, cluster):
+def command(ctx, include, exclude, env_file, cluster, deploy_to):
'''deploy a stack'''
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
@@ -50,16 +52,20 @@ def command(ctx, include, exclude, env_file, cluster):
if ctx.parent.obj.debug:
print(f"ctx.parent.obj: {ctx.parent.obj}")
- ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file)
+
+ if deploy_to is None:
+ deploy_to = "compose"
+
+ ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to)
# Subcommand is executed now, by the magic of click
-def create_deploy_context(global_context, stack, include, exclude, cluster, env_file):
+def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
- docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
- compose_env_file=cluster_context.env_file)
- return DeployCommandContext(stack, cluster_context, docker)
+ deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
+ compose_env_file=cluster_context.env_file)
+ return DeployCommandContext(stack, cluster_context, deployer)
def up_operation(ctx, services_list, stay_attached=False):
@@ -74,10 +80,10 @@ def up_operation(ctx, services_list, stay_attached=False):
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command)
- deploy_context.docker.compose.up(detach=not stay_attached, services=services_list)
+ deploy_context.deployer.compose_up(detach=not stay_attached, services=services_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command)
- _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
+ _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
def down_operation(ctx, delete_volumes, extra_args_list):
@@ -89,7 +95,7 @@ def down_operation(ctx, delete_volumes, extra_args_list):
if extra_args_list:
timeout_arg = extra_args_list[0]
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
- ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
+ ctx.obj.deployer.compose_down(timeout=timeout_arg, volumes=delete_volumes)
def ps_operation(ctx):
@@ -97,7 +103,7 @@ def ps_operation(ctx):
if not global_context.dry_run:
if global_context.verbose:
print("Running compose ps")
- container_list = ctx.obj.docker.compose.ps()
+ container_list = ctx.obj.deployer.compose_ps()
if len(container_list) > 0:
print("Running containers:")
for container in container_list:
@@ -128,7 +134,7 @@ def port_operation(ctx, extra_args):
exposed_port = extra_args_list[1]
if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}")
- mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
+ mapped_port_data = ctx.obj.deployer.compose_port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@@ -145,8 +151,8 @@ def exec_operation(ctx, extra_args):
if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
try:
- ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
- except DockerException:
+ ctx.obj.deployer.compose_execute(service_name, command_to_exec, envs=container_exec_env)
+ except DeployerException:
print("container command returned error exit status")
@@ -157,7 +163,7 @@ def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
if global_context.verbose:
print("Running compose logs")
services_list = extra_args_list if extra_args_list is not None else []
- logs_stream = ctx.obj.docker.compose.logs(services=services_list, tail=tail, follow=follow, stream=True)
+ logs_stream = ctx.obj.deployer.compose_logs(services=services_list, tail=tail, follow=follow, stream=True)
for stream_type, stream_content in logs_stream:
print(stream_content.decode("utf-8"), end="")
@@ -214,11 +220,11 @@ def get_stack_status(ctx, stack):
ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
- docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
+ deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above
if ctx.verbose:
print("Running compose ps")
- container_list = docker.compose.ps()
+ container_list = deployer.compose_ps()
if len(container_list) > 0:
if ctx.debug:
print(f"Container list from compose ps: {container_list}")
@@ -359,7 +365,7 @@ def _run_command(ctx, cluster_name, command):
sys.exit(1)
-def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env):
+def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
@dataclass
class ConfigDirective:
@@ -390,13 +396,13 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
# TODO: fix the script paths so they're consistent between containers
source_value = None
try:
- source_value = docker.compose.execute(pd.source_container,
- ["sh", "-c",
- "sh /docker-entrypoint-scripts.d/export-"
- f"{pd.source_variable}.sh"],
- tty=False,
- envs=container_exec_env)
- except DockerException as error:
+ source_value = deployer.compose_execute(pd.source_container,
+ ["sh", "-c",
+ "sh /docker-entrypoint-scripts.d/export-"
+ f"{pd.source_variable}.sh"],
+ tty=False,
+ envs=container_exec_env)
+ except DeployerException as error:
if ctx.debug:
print(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get:
@@ -411,12 +417,12 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
if source_value:
if ctx.debug:
print(f"fetched source value: {source_value}")
- destination_output = docker.compose.execute(pd.destination_container,
- ["sh", "-c",
- f"sh /scripts/import-{pd.destination_variable}.sh"
- f" {source_value}"],
- tty=False,
- envs=container_exec_env)
+ destination_output = deployer.compose_execute(pd.destination_container,
+ ["sh", "-c",
+ f"sh /scripts/import-{pd.destination_variable}.sh"
+ f" {source_value}"],
+ tty=False,
+ envs=container_exec_env)
waiting_for_data = False
if ctx.debug:
print(f"destination output: {destination_output}")
diff --git a/app/deploy_docker.py b/app/deploy_docker.py
new file mode 100644
index 00000000..a40c4754
--- /dev/null
+++ b/app/deploy_docker.py
@@ -0,0 +1,67 @@
+# Copyright © 2022, 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from python_on_whales import DockerClient, DockerException
+from app.deployer import Deployer, DeployerException
+
+
+class DockerDeployer(Deployer):
+ name: str = "compose"
+
+ def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
+ self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
+ compose_env_file=compose_env_file)
+
+ def compose_up(self, detach, services):
+ try:
+ return self.docker.compose.up(detach=detach, services=services)
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def compose_down(self, timeout, volumes):
+ try:
+ return self.docker.compose.down(timeout=timeout, volumes=volumes)
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def compose_ps(self):
+ try:
+ return self.docker.compose.ps()
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def compose_port(self, service, private_port):
+ try:
+ return self.docker.compose.port(service=service, private_port=private_port)
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def compose_execute(self, service_name, command, envs):
+ try:
+ return self.docker.compose.execute(service_name=service_name, command=command, envs=envs)
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def compose_logs(self, services, tail, follow, stream):
+ try:
+ return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
+ except DockerException as e:
+ raise DeployerException(e)
+
+ def run(self, image, command, user, volumes, entrypoint=None):
+ try:
+ return self.docker.run(image=image, command=command, user=user, volumes=volumes, entrypoint=entrypoint)
+ except DockerException as e:
+ raise DeployerException(e)
diff --git a/app/deploy_k8s.py b/app/deploy_k8s.py
new file mode 100644
index 00000000..2614f725
--- /dev/null
+++ b/app/deploy_k8s.py
@@ -0,0 +1,46 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from kubernetes import client, config
+from app.deployer import Deployer
+
+
+class K8sDeployer(Deployer):
+ name: str = "k8s"
+
+ def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
+ config.load_kube_config()
+ self.client = client.CoreV1Api()
+
+ def compose_up(self, detach, services):
+ pass
+
+ def compose_down(self, timeout, volumes):
+ pass
+
+ def compose_ps(self):
+ pass
+
+ def compose_port(self, service, private_port):
+ pass
+
+ def compose_execute(self, service_name, command, envs):
+ pass
+
+ def compose_logs(self, services, tail, follow, stream):
+ pass
+
+ def run(self, image, command, user, volumes, entrypoint=None):
+ pass
diff --git a/app/deploy_types.py b/app/deploy_types.py
index 63f32762..ecc0d5a3 100644
--- a/app/deploy_types.py
+++ b/app/deploy_types.py
@@ -16,8 +16,8 @@
from typing import List
from dataclasses import dataclass
from pathlib import Path
-from python_on_whales import DockerClient
from app.command_types import CommandOptions
+from app.deployer import Deployer
@dataclass
@@ -35,7 +35,7 @@ class ClusterContext:
class DeployCommandContext:
stack: str
cluster_context: ClusterContext
- docker: DockerClient
+ deployer: Deployer
@dataclass
diff --git a/app/deploy_util.py b/app/deploy_util.py
index 498e3dfd..b492d6e1 100644
--- a/app/deploy_util.py
+++ b/app/deploy_util.py
@@ -47,12 +47,12 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
- docker = ctx.docker
+ deployer = ctx.deployer
container_image = _container_image_from_service(ctx.stack, service)
docker_volumes = _volumes_to_docker(mounts)
if ctx.cluster_context.options.debug:
print(f"Running this command in {service} container: {command}")
- docker_output = docker.run(
+ docker_output = deployer.run(
container_image,
["-c", command], entrypoint="sh",
user=f"{os.getuid()}:{os.getgid()}",
diff --git a/app/deployer.py b/app/deployer.py
new file mode 100644
index 00000000..97595db7
--- /dev/null
+++ b/app/deployer.py
@@ -0,0 +1,52 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from abc import ABC, abstractmethod
+
+
+class Deployer(ABC):
+
+ @abstractmethod
+ def compose_up(self, detach, services):
+ pass
+
+ @abstractmethod
+ def compose_down(self, timeout, volumes):
+ pass
+
+ @abstractmethod
+ def compose_ps(self):
+ pass
+
+ @abstractmethod
+ def compose_port(self, service, private_port):
+ pass
+
+ @abstractmethod
+ def compose_execute(self, service_name, command, envs):
+ pass
+
+ @abstractmethod
+ def compose_logs(self, services, tail, follow, stream):
+ pass
+
+ @abstractmethod
+ def run(self, image, command, user, volumes, entrypoint):
+ pass
+
+
+class DeployerException(Exception):
+ def __init__(self, *args: object) -> None:
+ super().__init__(*args)
diff --git a/app/deployer_factory.py b/app/deployer_factory.py
new file mode 100644
index 00000000..45c54ecd
--- /dev/null
+++ b/app/deployer_factory.py
@@ -0,0 +1,26 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from app.deploy_k8s import K8sDeployer
+from app.deploy_docker import DockerDeployer
+
+
+def getDeployer(type, compose_files, compose_project_name, compose_env_file):
+ if type == "compose" or type is None:
+ return DockerDeployer(compose_files, compose_project_name, compose_env_file)
+ elif type == "k8s":
+ return K8sDeployer(compose_files, compose_project_name, compose_env_file)
+ else:
+ print(f"ERROR: deploy-to {type} is not valid")
diff --git a/app/deployment.py b/app/deployment.py
index aeabf61c..36cdecac 100644
--- a/app/deployment.py
+++ b/app/deployment.py
@@ -14,20 +14,25 @@
# along with this program. If not, see .
import click
-from dataclasses import dataclass
from pathlib import Path
import sys
from app.deploy import up_operation, down_operation, ps_operation, port_operation
from app.deploy import exec_operation, logs_operation, create_deploy_context
+from app.stack import Stack
+from app.spec import Spec
-@dataclass
class DeploymentContext:
dir: Path
+ spec: Spec
+ stack: Stack
def get_stack_file(self):
return self.dir.joinpath("stack.yml")
+ def get_spec_file(self):
+ return self.dir.joinpath("spec.yml")
+
def get_env_file(self):
return self.dir.joinpath("config.env")
@@ -35,12 +40,19 @@ class DeploymentContext:
def get_cluster_name(self):
return None
+ def init(self, dir):
+ self.dir = dir
+ self.stack = Stack()
+ self.stack.init_from_file(self.get_stack_file())
+ self.spec = Spec()
+ self.spec.init_from_file(self.get_spec_file())
+
@click.group()
@click.option("--dir", required=True, help="path to deployment directory")
@click.pass_context
def command(ctx, dir):
- '''create a deployment'''
+ '''manage a deployment'''
# Check that --stack wasn't supplied
if ctx.parent.obj.stack:
@@ -55,14 +67,18 @@ def command(ctx, dir):
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
sys.exit(1)
# Store the deployment context for subcommands
- ctx.obj = DeploymentContext(dir_path)
+ deployment_context = DeploymentContext()
+ deployment_context.init(dir_path)
+ ctx.obj = deployment_context
def make_deploy_context(ctx):
- stack_file_path = ctx.obj.get_stack_file()
- env_file = ctx.obj.get_env_file()
- cluster_name = ctx.obj.get_cluster_name()
- return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file)
+ context: DeploymentContext = ctx.obj
+ stack_file_path = context.get_stack_file()
+ env_file = context.get_env_file()
+ cluster_name = context.get_cluster_name()
+ return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file,
+ context.spec.obj["deploy-to"])
@command.command()
diff --git a/app/deployment_create.py b/app/deployment_create.py
index 7f416300..81259795 100644
--- a/app/deployment_create.py
+++ b/app/deployment_create.py
@@ -249,7 +249,7 @@ def init(ctx, config, output, map_ports_to_host):
stack = global_options(ctx).stack
debug = global_options(ctx).debug
default_spec_file_content = call_stack_deploy_init(ctx.obj)
- spec_file_content = {"stack": stack}
+ spec_file_content = {"stack": stack, "deploy-to": ctx.obj.deployer.name}
if default_spec_file_content:
spec_file_content.update(default_spec_file_content)
config_variables = _parse_config_variables(config)
@@ -315,7 +315,7 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
- copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
+ copyfile(spec_file, os.path.join(deployment_dir, "spec.yml"))
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy any config varibles from the spec file into an env file suitable for compose
_write_config_file(spec_file, os.path.join(deployment_dir, "config.env"))
diff --git a/app/spec.py b/app/spec.py
new file mode 100644
index 00000000..a23bc167
--- /dev/null
+++ b/app/spec.py
@@ -0,0 +1,30 @@
+# Copyright © 2022, 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from pathlib import Path
+import typing
+from app.util import get_yaml
+
+
+class Spec:
+
+ obj: typing.Any
+
+ def __init__(self) -> None:
+ pass
+
+ def init_from_file(self, file_path: Path):
+ with file_path:
+ self.obj = get_yaml().load(open(file_path, "r"))
diff --git a/app/stack.py b/app/stack.py
new file mode 100644
index 00000000..1f94acdf
--- /dev/null
+++ b/app/stack.py
@@ -0,0 +1,30 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from pathlib import Path
+import typing
+from app.util import get_yaml
+
+
+class Stack:
+
+ obj: typing.Any
+
+ def __init__(self) -> None:
+ pass
+
+ def init_from_file(self, file_path: Path):
+ with file_path:
+ self.obj = get_yaml().load(open(file_path, "r"))
diff --git a/requirements.txt b/requirements.txt
index 7f60f9dd..bf4845a1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,3 +8,4 @@ ruamel.yaml>=0.17.32
pydantic==1.10.9
tomli==2.0.1
validators==0.22.0
+kubernetes>=28.1.0