kind test stack (#629)

This commit is contained in:
David Boreham 2023-11-08 01:11:00 -07:00 committed by GitHub
parent 36e13f7199
commit 5e91c2224e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 134 additions and 64 deletions

View File

@ -21,7 +21,7 @@ from stack_orchestrator.deploy.deployer import Deployer, DeployerException, Depl
class DockerDeployer(Deployer):
name: str = "compose"
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
compose_env_file=compose_env_file)

View File

@ -28,6 +28,7 @@ from stack_orchestrator.util import include_exclude_check, get_parsed_stack_conf
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.deployment_create import create as deployment_create
from stack_orchestrator.deploy.deployment_create import init as deployment_init
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
@ -56,14 +57,17 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
if deploy_to is None:
deploy_to = "compose"
ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to)
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
# Subcommand is executed now, by the magic of click
def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer):
def create_deploy_context(
global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deployer):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
deployment_dir = deployment_context.deployment_dir if deployment_context else None
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
deployer = getDeployer(deployer, deployment_dir, compose_files=cluster_context.compose_files,
compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(stack, cluster_context, deployer)

View File

@ -15,7 +15,6 @@
from typing import List
from dataclasses import dataclass
from pathlib import Path
from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.deploy.deployer import Deployer
@ -38,12 +37,6 @@ class DeployCommandContext:
deployer: Deployer
@dataclass
class DeploymentContext:
deployment_dir: Path
command_context: DeployCommandContext
@dataclass
class VolumeMapping:
host_path: str

View File

@ -26,10 +26,10 @@ def getDeployerConfigGenerator(type: str):
print(f"ERROR: deploy-to {type} is not valid")
def getDeployer(type: str, compose_files, compose_project_name, compose_env_file):
def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file):
if type == "compose" or type is None:
return DockerDeployer(compose_files, compose_project_name, compose_env_file)
return DockerDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
elif type == "k8s":
return K8sDeployer(compose_files, compose_project_name, compose_env_file)
return K8sDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
else:
print(f"ERROR: deploy-to {type} is not valid")

View File

@ -18,34 +18,7 @@ from pathlib import Path
import sys
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.deploy.spec import Spec
class DeploymentContext:
dir: Path
spec: Spec
stack: Stack
def get_stack_file(self):
return self.dir.joinpath("stack.yml")
def get_spec_file(self):
return self.dir.joinpath("spec.yml")
def get_env_file(self):
return self.dir.joinpath("config.env")
# TODO: implement me
def get_cluster_name(self):
return None
def init(self, dir):
self.dir = dir
self.stack = Stack()
self.stack.init_from_file(self.get_stack_file())
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
from stack_orchestrator.deploy.deployment_context import DeploymentContext
@click.group()
@ -77,7 +50,7 @@ def make_deploy_context(ctx):
stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
cluster_name = context.get_cluster_name()
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file,
return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
context.spec.obj["deploy-to"])

View File

@ -0,0 +1,46 @@
# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.deploy.spec import Spec
class DeploymentContext:
deployment_dir: Path
spec: Spec
stack: Stack
def get_stack_file(self):
return self.deployment_dir.joinpath("stack.yml")
def get_spec_file(self):
return self.deployment_dir.joinpath("spec.yml")
def get_env_file(self):
return self.deployment_dir.joinpath("config.env")
# TODO: implement me
def get_cluster_name(self):
return None
def init(self, dir):
self.deployment_dir = dir
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
self.stack = Stack(self.spec.obj["stack"])
self.stack.init_from_file(self.get_stack_file())

View File

@ -24,8 +24,9 @@ import sys
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
get_pod_script_paths, get_plugin_code_paths)
from stack_orchestrator.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext
def _make_default_deployment_dir():
@ -108,8 +109,8 @@ def _fixup_pod_file(pod, spec, compose_dir):
pod["services"][container_name]["ports"] = container_ports
def _commands_plugin_paths(ctx: DeployCommandContext):
plugin_paths = get_plugin_code_paths(ctx.stack)
def _commands_plugin_paths(stack_name: str):
plugin_paths = get_plugin_code_paths(stack_name)
ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths]
return ret
@ -123,7 +124,7 @@ def call_stack_deploy_init(deploy_command_context):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_paths = _commands_plugin_paths(deploy_command_context)
python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
ret = None
init_done = False
@ -147,7 +148,7 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_paths = _commands_plugin_paths(deploy_command_context)
python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
for python_file_path in python_file_paths:
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
@ -162,7 +163,7 @@ def call_stack_deploy_create(deployment_context, extra_args):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
python_file_paths = _commands_plugin_paths(deployment_context.command_context)
python_file_paths = _commands_plugin_paths(deployment_context.stack.name)
for python_file_path in python_file_paths:
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
@ -311,7 +312,7 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_name = parsed_spec['stack']
stack_name = parsed_spec["stack"]
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:
@ -367,7 +368,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
# stack member here.
deployment_command_context = ctx.obj
deployment_command_context.stack = stack_name
deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context)
deployment_context = DeploymentContext()
deployment_context.init(Path(deployment_dir))
# Call the deployer to generate any deployer-specific files (e.g. for kind)
deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"])
# TODO: make deployment_dir a Path above

View File

@ -18,7 +18,7 @@ from typing import Any, List, Set
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names
from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path
class ClusterInfo:
@ -50,11 +50,12 @@ class ClusterInfo:
print(f"Volumes: {volumes}")
for volume_name in volumes:
spec = client.V1PersistentVolumeClaimSpec(
storage_class_name="standard",
access_modes=["ReadWriteOnce"],
storage_class_name="manual",
resources=client.V1ResourceRequirements(
requests={"storage": "2Gi"}
)
),
volume_name=volume_name
)
pvc = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(name=volume_name,
@ -64,6 +65,24 @@ class ClusterInfo:
result.append(pvc)
return result
def get_pvs(self):
result = []
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
for volume_name in volumes:
spec = client.V1PersistentVolumeSpec(
storage_class_name="manual",
access_modes=["ReadWriteOnce"],
capacity={"storage": "2Gi"},
host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name))
)
pv = client.V1PersistentVolume(
metadata=client.V1ObjectMeta(name=volume_name,
labels={"volume-label": volume_name}),
spec=spec,
)
result.append(pv)
return result
# to suit the deployment, and also annotate the container specs to point at said volumes
def get_deployment(self):
containers = []

View File

@ -30,12 +30,15 @@ class K8sDeployer(Deployer):
k8s_namespace: str = "default"
kind_cluster_name: str
cluster_info : ClusterInfo
deployment_dir: Path
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
if (opts.o.debug):
print(f"Deployment dir: {deployment_dir}")
print(f"Compose files: {compose_files}")
print(f"Project name: {compose_project_name}")
print(f"Env file: {compose_env_file}")
self.deployment_dir = deployment_dir
self.kind_cluster_name = compose_project_name
self.cluster_info = ClusterInfo()
self.cluster_info.int_from_pod_files(compose_files)
@ -47,16 +50,26 @@ class K8sDeployer(Deployer):
def up(self, detach, services):
# Create the kind cluster
# HACK: pass in the config file path here
create_cluster(self.kind_cluster_name, "./test-deployment-dir/kind-config.yml")
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath("kind-config.yml"))
self.connect_api()
# Ensure the referenced containers are copied into kind
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
# Create the host-path-mounted PVs for this deployment
pvs = self.cluster_info.get_pvs()
for pv in pvs:
if opts.o.debug:
print(f"Sending this pv: {pv}")
pv_resp = self.core_api.create_persistent_volume(body=pv)
if opts.o.debug:
print("PVs created:")
print(f"{pv_resp}")
# Figure out the PVCs for this deployment
pvcs = self.cluster_info.get_pvcs()
for pvc in pvcs:
if opts.o.debug:
print(f"Sending this: {pvc}")
print(f"Sending this pvc: {pvc}")
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
if opts.o.debug:
print("PVCs created:")
@ -65,7 +78,7 @@ class K8sDeployer(Deployer):
deployment = self.cluster_info.get_deployment()
# Create the k8s objects
if opts.o.debug:
print(f"Sending this: {deployment}")
print(f"Sending this deployment: {deployment}")
deployment_resp = self.apps_api.create_namespaced_deployment(
body=deployment, namespace=self.k8s_namespace
)
@ -122,6 +135,8 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
# Check the file isn't already there
# Get the config file contents
content = generate_kind_config(deployment_dir)
if opts.o.debug:
print(f"kind config is: {content}")
config_file = deployment_dir.joinpath(self.config_file_name)
# Write the file
with open(config_file, "w") as output_file:

View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from kubernetes import client
import os
from pathlib import Path
import subprocess
from typing import Any, Set
@ -73,6 +74,10 @@ def named_volumes_from_pod_files(parsed_pod_files):
return named_volumes
def get_node_pv_mount_path(volume_name: str):
return f"/mnt/{volume_name}"
def volume_mounts_for_service(parsed_pod_files, service):
result = []
# Find the service
@ -119,6 +124,14 @@ def _get_host_paths_for_volumes(parsed_pod_files):
return result
def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path:
if os.path.isabs(data_mount_path):
return data_mount_path
else:
# Python Path voodo that looks pretty odd:
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
def parsed_pod_files_map_from_file_names(pod_files):
parsed_pod_yaml_map : Any = {}
for pod_file in pod_files:
@ -130,9 +143,12 @@ def parsed_pod_files_map_from_file_names(pod_files):
return parsed_pod_yaml_map
def _generate_kind_mounts(parsed_pod_files):
def _generate_kind_mounts(parsed_pod_files, deployment_dir):
volume_definitions = []
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
# Note these paths are relative to the location of the pod files (at present)
# So we need to fix up to make them correct and absolute because kind assumes
# relative to the cwd.
for pod in parsed_pod_files:
parsed_pod_file = parsed_pod_files[pod]
if "services" in parsed_pod_file:
@ -145,7 +161,8 @@ def _generate_kind_mounts(parsed_pod_files):
# Looks like: test-data:/data
(volume_name, mount_path) = mount_string.split(":")
volume_definitions.append(
f" - hostPath: {volume_host_path_map[volume_name]}\n containerPath: /var/local-path-provisioner"
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
f" containerPath: {get_node_pv_mount_path(volume_name)}"
)
return (
"" if len(volume_definitions) == 0 else (
@ -201,7 +218,7 @@ def generate_kind_config(deployment_dir: Path):
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
mounts_yml = _generate_kind_mounts(parsed_pod_files_map)
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir)
return (
"kind: Cluster\n"
"apiVersion: kind.x-k8s.io/v1alpha4\n"

View File

@ -20,10 +20,11 @@ from stack_orchestrator.util import get_yaml
class Stack:
name: str
obj: typing.Any
def __init__(self) -> None:
pass
def __init__(self, name: str) -> None:
self.name = name
def init_from_file(self, file_path: Path):
with file_path: