diff --git a/app/deploy/k8s/cluster_info.py b/app/deploy/k8s/cluster_info.py
new file mode 100644
index 00000000..540f5f8c
--- /dev/null
+++ b/app/deploy/k8s/cluster_info.py
@@ -0,0 +1,84 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from kubernetes import client
+from typing import Any, List, Set
+
+from app.opts import opts
+from app.util import get_yaml
+
+
+class ClusterInfo:
+ parsed_pod_yaml_map: Any = {}
+ image_set: Set[str] = set()
+ app_name: str = "test-app"
+ deployment_name: str = "test-deployment"
+
+ def __init__(self) -> None:
+ pass
+
+ def int_from_pod_files(self, pod_files: List[str]):
+ for pod_file in pod_files:
+ with open(pod_file, "r") as pod_file_descriptor:
+ parsed_pod_file = get_yaml().load(pod_file_descriptor)
+ self.parsed_pod_yaml_map[pod_file] = parsed_pod_file
+ if opts.o.debug:
+ print(f"parsed_pod_yaml_map: {self.parsed_pod_yaml_map}")
+ # Find the set of images in the pods
+ for pod_name in self.parsed_pod_yaml_map:
+ pod = self.parsed_pod_yaml_map[pod_name]
+ services = pod["services"]
+ for service_name in services:
+ service_info = services[service_name]
+ image = service_info["image"]
+ self.image_set.add(image)
+ if opts.o.debug:
+ print(f"image_set: {self.image_set}")
+
+ def get_deployment(self):
+ containers = []
+ for pod_name in self.parsed_pod_yaml_map:
+ pod = self.parsed_pod_yaml_map[pod_name]
+ services = pod["services"]
+ for service_name in services:
+ container_name = service_name
+ service_info = services[service_name]
+ image = service_info["image"]
+ container = client.V1Container(
+ name=container_name,
+ image=image,
+ ports=[client.V1ContainerPort(container_port=80)],
+ resources=client.V1ResourceRequirements(
+ requests={"cpu": "100m", "memory": "200Mi"},
+ limits={"cpu": "500m", "memory": "500Mi"},
+ ),
+ )
+ containers.append(container)
+ template = client.V1PodTemplateSpec(
+ metadata=client.V1ObjectMeta(labels={"app": self.app_name}),
+ spec=client.V1PodSpec(containers=containers),
+ )
+ spec = client.V1DeploymentSpec(
+ replicas=1, template=template, selector={
+ "matchLabels":
+ {"app": self.app_name}})
+
+ deployment = client.V1Deployment(
+ api_version="apps/v1",
+ kind="Deployment",
+ metadata=client.V1ObjectMeta(name=self.deployment_name),
+ spec=spec,
+ )
+ return deployment
diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py
index 7cf0261d..e67f3974 100644
--- a/app/deploy/k8s/deploy_k8s.py
+++ b/app/deploy/k8s/deploy_k8s.py
@@ -14,33 +14,86 @@
# along with this program. If not, see .
from kubernetes import client, config
+
from app.deploy.deployer import Deployer
+from app.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
+from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string
+from app.deploy.k8s.cluster_info import ClusterInfo
+from app.opts import opts
class K8sDeployer(Deployer):
name: str = "k8s"
+ core_api: client.CoreV1Api
+ apps_api: client.AppsV1Api
+ kind_cluster_name: str
+ cluster_info : ClusterInfo
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
- config.load_kube_config()
- self.client = client.CoreV1Api()
+ if (opts.o.debug):
+ print(f"Compose files: {compose_files}")
+ print(f"Project name: {compose_project_name}")
+ print(f"Env file: {compose_env_file}")
+ self.kind_cluster_name = compose_project_name
+ self.cluster_info = ClusterInfo()
+ self.cluster_info.int_from_pod_files(compose_files)
+
+ def connect_api(self):
+ config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
+ self.core_api = client.CoreV1Api()
+ self.apps_api = client.AppsV1Api()
def up(self, detach, services):
- pass
+ # Create the kind cluster
+ create_cluster(self.kind_cluster_name)
+ self.connect_api()
+ # Ensure the referenced containers are copied into kind
+ load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
+ # Process compose files into a Deployment
+ deployment = self.cluster_info.get_deployment()
+ # Create the k8s objects
+ resp = self.apps_api.create_namespaced_deployment(
+ body=deployment, namespace="default"
+ )
+
+ if opts.o.debug:
+ print("Deployment created.\n")
+ print(f"{resp.metadata.namespace} {resp.metadata.name} \
+ {resp.metadata.generation} {resp.spec.template.spec.containers[0].image}")
def down(self, timeout, volumes):
- pass
+ # Delete the k8s objects
+ # Destroy the kind cluster
+ destroy_cluster(self.kind_cluster_name)
def ps(self):
- pass
+ self.connect_api()
+ # Call whatever API we need to get the running container list
+ ret = self.core_api.list_pod_for_all_namespaces(watch=False)
+ if ret.items:
+ for i in ret.items:
+ print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
+ ret = self.core_api.list_node(pretty=True, watch=False)
+ return []
def port(self, service, private_port):
+ # Since we handle the port mapping, need to figure out where this comes from
+ # Also look into whether it makes sense to get ports for k8s
pass
def execute(self, service_name, command, envs):
+ # Call the API to execute a command in a running container
pass
def logs(self, services, tail, follow, stream):
- pass
+ self.connect_api()
+ pods = pods_in_deployment(self.core_api, "test-deployment")
+ if len(pods) > 1:
+ print("Warning: more than one pod in the deployment")
+ k8s_pod_name = pods[0]
+ log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test")
+ return log_stream_from_string(log_data)
def run(self, image, command, user, volumes, entrypoint=None):
+ # We need to figure out how to do this -- check why we're being called first
pass
diff --git a/app/deploy/k8s/helpers.py b/app/deploy/k8s/helpers.py
new file mode 100644
index 00000000..731d667d
--- /dev/null
+++ b/app/deploy/k8s/helpers.py
@@ -0,0 +1,57 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from kubernetes import client
+import subprocess
+from typing import Set
+
+from app.opts import opts
+
+
+def _run_command(command: str):
+ if opts.o.debug:
+ print(f"Running: {command}")
+ result = subprocess.run(command, shell=True)
+ if opts.o.debug:
+ print(f"Result: {result}")
+
+
+def create_cluster(name: str):
+ _run_command(f"kind create cluster --name {name}")
+
+
+def destroy_cluster(name: str):
+ _run_command(f"kind delete cluster --name {name}")
+
+
+def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
+ for image in image_set:
+ _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
+
+
+def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
+ pods = []
+ pod_response = core_api.list_namespaced_pod(namespace="default", label_selector="app=test-app")
+ if opts.o.debug:
+ print(f"pod_response: {pod_response}")
+ for pod_info in pod_response.items:
+ pod_name = pod_info.metadata.name
+ pods.append(pod_name)
+ return pods
+
+
+def log_stream_from_string(s: str):
+ # Note response has to be UTF-8 encoded because the caller expects to decode it
+ yield ("ignore", s.encode())
diff --git a/app/opts.py b/app/opts.py
new file mode 100644
index 00000000..193637c2
--- /dev/null
+++ b/app/opts.py
@@ -0,0 +1,20 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from app.command_types import CommandOptions
+
+
+class opts:
+ o: CommandOptions = None
diff --git a/cli.py b/cli.py
index 5dea43ca..38bdddd9 100644
--- a/cli.py
+++ b/cli.py
@@ -22,6 +22,7 @@ from app.build import build_npms
from app.deploy import deploy
from app import version
from app.deploy import deployment
+from app import opts
from app import update
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@@ -39,7 +40,9 @@ CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.pass_context
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
"""Laconic Stack Orchestrator"""
- ctx.obj = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
+ command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
+ opts.opts.o = command_options
+ ctx.obj = command_options
cli.add_command(setup_repositories.command, "setup-repositories")