Merge branch 'main' into dboreham/test-database-stack
Some checks failed
Lint Checks / Run linter (pull_request) Failing after 40s
Webapp Test / Run webapp test suite (pull_request) Successful in 3m45s
Deploy Test / Run deploy test suite (pull_request) Successful in 6m4s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Successful in 7m45s
Smoke Test / Run basic test suite (pull_request) Successful in 5m35s

This commit is contained in:
David Boreham 2024-02-14 21:54:19 -07:00
commit e8b8f112aa
12 changed files with 268 additions and 106 deletions

View File

@ -1,6 +1,8 @@
name: K8s Deploy Test name: K8s Deploy Test
on: on:
pull_request:
branches: '*'
push: push:
branches: '*' branches: '*'
paths: paths:

View File

@ -27,6 +27,12 @@ kube_config_key = "kube-config"
deploy_to_key = "deploy-to" deploy_to_key = "deploy-to"
network_key = "network" network_key = "network"
http_proxy_key = "http-proxy" http_proxy_key = "http-proxy"
image_resigtry_key = "image-registry" image_registry_key = "image-registry"
configmaps_key = "configmaps"
resources_key = "resources"
volumes_key = "volumes"
security_key = "security"
annotations_key = "annotations"
labels_key = "labels"
kind_config_filename = "kind-config.yml" kind_config_filename = "kind-config.yml"
kube_config_filename = "kubeconfig.yml" kube_config_filename = "kubeconfig.yml"

View File

@ -7,11 +7,13 @@ services:
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED} CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE" CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
volumes: volumes:
- test-data:/data - test-data-bind:/data
- test-data-auto:/data2
- test-config:/config:ro - test-config:/config:ro
ports: ports:
- "80" - "80"
volumes: volumes:
test-data: test-data-bind:
test-data-auto:
test-config: test-config:

View File

@ -1,19 +1,38 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
# Test if the container's filesystem is old (run previously) or new
EXISTSFILENAME=/data/exists
echo "Test container starting" echo "Test container starting"
if [[ -f "$EXISTSFILENAME" ]];
then DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
TIMESTAMP=`cat $EXISTSFILENAME` if [[ -n "$DATA_DEVICE" ]]; then
echo "Filesystem is old, created: $TIMESTAMP" echo "/data: MOUNTED dev=${DATA_DEVICE}"
else else
echo "Filesystem is fresh" echo "/data: not mounted"
echo `date` > $EXISTSFILENAME
fi fi
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
if [[ -n "$DATA_DEVICE" ]]; then
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
else
echo "/data2: not mounted"
fi
# Test if the container's filesystem is old (run previously) or new
for d in /data /data2; do
if [[ -f "$d/exists" ]];
then
TIMESTAMP=`cat $d/exists`
echo "$d filesystem is old, created: $TIMESTAMP"
else
echo "$d filesystem is fresh"
echo `date` > $d/exists
fi
done
if [ -n "$CERC_TEST_PARAM_1" ]; then if [ -n "$CERC_TEST_PARAM_1" ]; then
echo "Test-param-1: ${CERC_TEST_PARAM_1}" echo "Test-param-1: ${CERC_TEST_PARAM_1}"
fi fi

View File

@ -27,6 +27,7 @@ from stack_orchestrator.opts import opts
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file) get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file)
from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_context import DeploymentContext
@ -111,6 +112,7 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml # See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod, spec, compose_dir): def _fixup_pod_file(pod, spec, compose_dir):
deployment_type = spec[constants.deploy_to_key]
# Fix up volumes # Fix up volumes
if "volumes" in spec: if "volumes" in spec:
spec_volumes = spec["volumes"] spec_volumes = spec["volumes"]
@ -119,9 +121,13 @@ def _fixup_pod_file(pod, spec, compose_dir):
for volume in pod_volumes.keys(): for volume in pod_volumes.keys():
if volume in spec_volumes: if volume in spec_volumes:
volume_spec = spec_volumes[volume] volume_spec = spec_volumes[volume]
if volume_spec:
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}" volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir) _create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local", # this is Docker specific
if spec.is_docker_deployment():
new_volume_spec = {
"driver": "local",
"driver_opts": { "driver_opts": {
"type": "none", "type": "none",
"device": volume_spec_fixedup, "device": volume_spec_fixedup,
@ -131,15 +137,18 @@ def _fixup_pod_file(pod, spec, compose_dir):
pod["volumes"][volume] = new_volume_spec pod["volumes"][volume] = new_volume_spec
# Fix up configmaps # Fix up configmaps
if "configmaps" in spec: if constants.configmaps_key in spec:
spec_cfgmaps = spec["configmaps"] if spec.is_kubernetes_deployment():
spec_cfgmaps = spec[constants.configmaps_key]
if "volumes" in pod: if "volumes" in pod:
pod_volumes = pod["volumes"] pod_volumes = pod[constants.volumes_key]
for volume in pod_volumes.keys(): for volume in pod_volumes.keys():
if volume in spec_cfgmaps: if volume in spec_cfgmaps:
volume_cfg = spec_cfgmaps[volume] volume_cfg = spec_cfgmaps[volume]
# Just make the dir (if necessary) # Just make the dir (if necessary)
_create_bind_dir_if_relative(volume, volume_cfg, compose_dir) _create_bind_dir_if_relative(volume, volume_cfg, compose_dir)
else:
print(f"Warning: ConfigMaps not supported for {deployment_type}")
# Fix up ports # Fix up ports
if "network" in spec and "ports" in spec["network"]: if "network" in spec and "ports" in spec["network"]:
@ -323,7 +332,7 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
if image_registry is None: if image_registry is None:
error_exit("--image-registry must be supplied with --deploy-to k8s") error_exit("--image-registry must be supplied with --deploy-to k8s")
spec_file_content.update({constants.kube_config_key: kube_config}) spec_file_content.update({constants.kube_config_key: kube_config})
spec_file_content.update({constants.image_resigtry_key: image_registry}) spec_file_content.update({constants.image_registry_key: image_registry})
else: else:
# Check for --kube-config supplied for non-relevant deployer types # Check for --kube-config supplied for non-relevant deployer types
if kube_config is not None: if kube_config is not None:
@ -358,10 +367,16 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
volume_descriptors = {} volume_descriptors = {}
configmap_descriptors = {} configmap_descriptors = {}
for named_volume in named_volumes["rw"]: for named_volume in named_volumes["rw"]:
if "k8s" in deployer_type:
volume_descriptors[named_volume] = None
else:
volume_descriptors[named_volume] = f"./data/{named_volume}" volume_descriptors[named_volume] = f"./data/{named_volume}"
for named_volume in named_volumes["ro"]: for named_volume in named_volumes["ro"]:
if "k8s" in deployer_type and "config" in named_volume: if "k8s" in deployer_type:
configmap_descriptors[named_volume] = f"./data/{named_volume}" if "config" in named_volume:
configmap_descriptors[named_volume] = f"./configmaps/{named_volume}"
else:
volume_descriptors[named_volume] = None
else: else:
volume_descriptors[named_volume] = f"./data/{named_volume}" volume_descriptors[named_volume] = f"./data/{named_volume}"
if volume_descriptors: if volume_descriptors:
@ -406,6 +421,17 @@ def _create_deployment_file(deployment_dir: Path):
output_file.write(f"{constants.cluster_id_key}: {cluster}\n") output_file.write(f"{constants.cluster_id_key}: {cluster}\n")
def _check_volume_definitions(spec):
if spec.is_kubernetes_deployment():
for volume_name, volume_path in spec.get_volumes().items():
if volume_path:
if not os.path.isabs(volume_path):
raise Exception(
f"Relative path {volume_path} for volume {volume_name} not "
f"supported for deployment type {spec.get_deployment_type()}"
)
@click.command() @click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment") @click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory") @click.option("--deployment-dir", help="Create deployment files in this directory")
@ -421,7 +447,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
# The init command's implementation is in a separate function so that we can # The init command's implementation is in a separate function so that we can
# call it from other commands, bypassing the click decoration stuff # call it from other commands, bypassing the click decoration stuff
def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers): def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers):
parsed_spec = get_parsed_deployment_spec(spec_file) parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
_check_volume_definitions(parsed_spec)
stack_name = parsed_spec["stack"] stack_name = parsed_spec["stack"]
deployment_type = parsed_spec[constants.deploy_to_key] deployment_type = parsed_spec[constants.deploy_to_key]
stack_file = get_stack_file_path(stack_name) stack_file = get_stack_file_path(stack_name)

View File

@ -46,7 +46,7 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
cluster_context = command_context.cluster_context cluster_context = command_context.cluster_context
images: Set[str] = images_for_deployment(cluster_context.compose_files) images: Set[str] = images_for_deployment(cluster_context.compose_files)
# Tag the images for the remote repo # Tag the images for the remote repo
remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key] remote_repo_url = deployment_context.spec.obj[constants.image_registry_key]
docker = DockerClient() docker = DockerClient()
for image in images: for image in images:
if _image_needs_pushed(image): if _image_needs_pushed(image):

View File

@ -21,7 +21,7 @@ from typing import Any, List, Set
from stack_orchestrator.opts import opts from stack_orchestrator.opts import opts
from stack_orchestrator.util import env_var_map_from_file from stack_orchestrator.util import env_var_map_from_file
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
from stack_orchestrator.deploy.deploy_types import DeployEnvVars from stack_orchestrator.deploy.deploy_types import DeployEnvVars
@ -171,21 +171,33 @@ class ClusterInfo:
print(f"Spec Volumes: {spec_volumes}") print(f"Spec Volumes: {spec_volumes}")
print(f"Named Volumes: {named_volumes}") print(f"Named Volumes: {named_volumes}")
print(f"Resources: {resources}") print(f"Resources: {resources}")
for volume_name in spec_volumes: for volume_name, volume_path in spec_volumes.items():
if volume_name not in named_volumes: if volume_name not in named_volumes:
if opts.o.debug: if opts.o.debug:
print(f"{volume_name} not in pod files") print(f"{volume_name} not in pod files")
continue continue
labels = {
"app": self.app_name,
"volume-label": f"{self.app_name}-{volume_name}"
}
if volume_path:
storage_class_name = "manual"
k8s_volume_name = f"{self.app_name}-{volume_name}"
else:
# These will be auto-assigned.
storage_class_name = None
k8s_volume_name = None
spec = client.V1PersistentVolumeClaimSpec( spec = client.V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"], access_modes=["ReadWriteOnce"],
storage_class_name="manual", storage_class_name=storage_class_name,
resources=to_k8s_resource_requirements(resources), resources=to_k8s_resource_requirements(resources),
volume_name=f"{self.app_name}-{volume_name}" volume_name=k8s_volume_name
) )
pvc = client.V1PersistentVolumeClaim( pvc = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
labels={"volume-label": f"{self.app_name}-{volume_name}"}), spec=spec
spec=spec,
) )
result.append(pvc) result.append(pvc)
return result return result
@ -226,16 +238,32 @@ class ClusterInfo:
resources = self.spec.get_volume_resources() resources = self.spec.get_volume_resources()
if not resources: if not resources:
resources = DEFAULT_VOLUME_RESOURCES resources = DEFAULT_VOLUME_RESOURCES
for volume_name in spec_volumes: for volume_name, volume_path in spec_volumes.items():
# We only need to create a volume if it is fully qualified HostPath.
# Otherwise, we create the PVC and expect the node to allocate the volume for us.
if not volume_path:
if opts.o.debug:
print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.")
continue
if volume_name not in named_volumes: if volume_name not in named_volumes:
if opts.o.debug: if opts.o.debug:
print(f"{volume_name} not in pod files") print(f"{volume_name} not in pod files")
continue continue
if not os.path.isabs(volume_path):
print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.")
continue
if self.spec.is_kind_deployment():
host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name))
else:
host_path = client.V1HostPathVolumeSource(path=volume_path)
spec = client.V1PersistentVolumeSpec( spec = client.V1PersistentVolumeSpec(
storage_class_name="manual", storage_class_name="manual",
access_modes=["ReadWriteOnce"], access_modes=["ReadWriteOnce"],
capacity=to_k8s_resource_requirements(resources).requests, capacity=to_k8s_resource_requirements(resources).requests,
host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) host_path=host_path
) )
pv = client.V1PersistentVolume( pv = client.V1PersistentVolume(
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",

View File

@ -88,6 +88,16 @@ class K8sDeployer(Deployer):
if opts.o.debug: if opts.o.debug:
print(f"Sending this pv: {pv}") print(f"Sending this pv: {pv}")
if not opts.o.dry_run: if not opts.o.dry_run:
try:
pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name)
if pv_resp:
if opts.o.debug:
print("PVs already present:")
print(f"{pv_resp}")
continue
except: # noqa: E722
pass
pv_resp = self.core_api.create_persistent_volume(body=pv) pv_resp = self.core_api.create_persistent_volume(body=pv)
if opts.o.debug: if opts.o.debug:
print("PVs created:") print("PVs created:")
@ -100,6 +110,17 @@ class K8sDeployer(Deployer):
print(f"Sending this pvc: {pvc}") print(f"Sending this pvc: {pvc}")
if not opts.o.dry_run: if not opts.o.dry_run:
try:
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
name=pvc.metadata.name, namespace=self.k8s_namespace)
if pvc_resp:
if opts.o.debug:
print("PVCs already present:")
print(f"{pvc_resp}")
continue
except: # noqa: E722
pass
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
if opts.o.debug: if opts.o.debug:
print("PVCs created:") print("PVCs created:")
@ -181,6 +202,8 @@ class K8sDeployer(Deployer):
def down(self, timeout, volumes): # noqa: C901 def down(self, timeout, volumes): # noqa: C901
self.connect_api() self.connect_api()
# Delete the k8s objects # Delete the k8s objects
if volumes:
# Create the host-path-mounted PVs for this deployment # Create the host-path-mounted PVs for this deployment
pvs = self.cluster_info.get_pvs() pvs = self.cluster_info.get_pvs()
for pv in pvs: for pv in pvs:

View File

@ -92,7 +92,7 @@ def named_volumes_from_pod_files(parsed_pod_files):
return named_volumes return named_volumes
def get_node_pv_mount_path(volume_name: str): def get_kind_pv_bind_mount_path(volume_name: str):
return f"/mnt/{volume_name}" return f"/mnt/{volume_name}"
@ -117,11 +117,14 @@ def volume_mounts_for_service(parsed_pod_files, service):
mount_path = mount_split[1] mount_path = mount_split[1]
mount_options = mount_split[2] if len(mount_split) == 3 else None mount_options = mount_split[2] if len(mount_split) == 3 else None
if opts.o.debug: if opts.o.debug:
print(f"volumne_name: {volume_name}") print(f"volume_name: {volume_name}")
print(f"mount path: {mount_path}") print(f"mount path: {mount_path}")
print(f"mount options: {mount_options}") print(f"mount options: {mount_options}")
volume_device = client.V1VolumeMount( volume_device = client.V1VolumeMount(
mount_path=mount_path, name=volume_name, read_only="ro" == mount_options) mount_path=mount_path,
name=volume_name,
read_only="ro" == mount_options
)
result.append(volume_device) result.append(volume_device)
return result return result
@ -144,18 +147,8 @@ def volumes_for_pod_files(parsed_pod_files, spec, app_name):
return result return result
def _get_host_paths_for_volumes(parsed_pod_files): def _get_host_paths_for_volumes(deployment_context):
result = {} return deployment_context.spec.get_volumes()
for pod in parsed_pod_files:
parsed_pod_file = parsed_pod_files[pod]
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
for volume_name in volumes.keys():
volume_definition = volumes[volume_name]
if volume_definition and "driver_opts" in volume_definition:
host_path = volume_definition["driver_opts"]["device"]
result[volume_name] = host_path
return result
def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path: def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path:
@ -163,12 +156,12 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat
return data_mount_path return data_mount_path
else: else:
# Python Path voodo that looks pretty odd: # Python Path voodo that looks pretty odd:
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve() return Path.cwd().joinpath(deployment_dir.joinpath(data_mount_path)).resolve()
def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context): def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
volume_definitions = [] volume_definitions = []
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) volume_host_path_map = _get_host_paths_for_volumes(deployment_context)
# Note these paths are relative to the location of the pod files (at present) # Note these paths are relative to the location of the pod files (at present)
# So we need to fix up to make them correct and absolute because kind assumes # So we need to fix up to make them correct and absolute because kind assumes
# relative to the cwd. # relative to the cwd.
@ -188,13 +181,14 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
volume_name = mount_split[0] volume_name = mount_split[0]
mount_path = mount_split[1] mount_path = mount_split[1]
if opts.o.debug: if opts.o.debug:
print(f"volumne_name: {volume_name}") print(f"volume_name: {volume_name}")
print(f"map: {volume_host_path_map}") print(f"map: {volume_host_path_map}")
print(f"mount path: {mount_path}") print(f"mount path: {mount_path}")
if volume_name not in deployment_context.spec.get_configmaps(): if volume_name not in deployment_context.spec.get_configmaps():
if volume_host_path_map[volume_name]:
volume_definitions.append( volume_definitions.append(
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
f" containerPath: {get_node_pv_mount_path(volume_name)}\n" f" containerPath: {get_kind_pv_bind_mount_path(volume_name)}\n"
) )
return ( return (
"" if len(volume_definitions) == 0 else ( "" if len(volume_definitions) == 0 else (

View File

@ -72,8 +72,18 @@ class Spec:
obj: typing.Any obj: typing.Any
file_path: Path file_path: Path
def __init__(self) -> None: def __init__(self, file_path: Path = None, obj={}) -> None:
pass self.file_path = file_path
self.obj = obj
def __getitem__(self, item):
return self.obj[item]
def __contains__(self, item):
return item in self.obj
def get(self, item, default=None):
return self.obj.get(item, default)
def init_from_file(self, file_path: Path): def init_from_file(self, file_path: Path):
with file_path: with file_path:
@ -81,8 +91,8 @@ class Spec:
self.file_path = file_path self.file_path = file_path
def get_image_registry(self): def get_image_registry(self):
return (self.obj[constants.image_resigtry_key] return (self.obj[constants.image_registry_key]
if self.obj and constants.image_resigtry_key in self.obj if self.obj and constants.image_registry_key in self.obj
else None) else None)
def get_volumes(self): def get_volumes(self):
@ -118,3 +128,15 @@ class Spec:
def get_capabilities(self): def get_capabilities(self):
return self.obj.get("security", {}).get("capabilities", []) return self.obj.get("security", {}).get("capabilities", [])
def get_deployment_type(self):
return self.obj[constants.deploy_to_key]
def is_kubernetes_deployment(self):
return self.get_deployment_type() in [constants.k8s_kind_deploy_type, constants.k8s_deploy_type]
def is_kind_deployment(self):
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
def is_docker_deployment(self):
return self.get_deployment_type() in [constants.compose_deploy_type]

View File

@ -63,7 +63,7 @@ $TEST_TARGET_SO --stack test deploy down
# The next time we bring the container up the volume will be old (from the previous run above) # The next time we bring the container up the volume will be old (from the previous run above)
$TEST_TARGET_SO --stack test deploy up $TEST_TARGET_SO --stack test deploy up
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs ) log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then if [[ "$log_output_1" == *"filesystem is old"* ]]; then
echo "Retain volumes test: passed" echo "Retain volumes test: passed"
else else
echo "Retain volumes test: FAILED" echo "Retain volumes test: FAILED"
@ -73,7 +73,7 @@ $TEST_TARGET_SO --stack test deploy down --delete-volumes
# Now when we bring the container up the volume will be new again # Now when we bring the container up the volume will be new again
$TEST_TARGET_SO --stack test deploy up $TEST_TARGET_SO --stack test deploy up
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs ) log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then
echo "Delete volumes test: passed" echo "Delete volumes test: passed"
else else
echo "Delete volumes test: FAILED" echo "Delete volumes test: FAILED"
@ -121,7 +121,7 @@ echo "deploy create output file test: passed"
$TEST_TARGET_SO deployment --dir $test_deployment_dir start $TEST_TARGET_SO deployment --dir $test_deployment_dir start
# Check logs command works # Check logs command works
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
echo "deployment logs test: passed" echo "deployment logs test: passed"
else else
echo "deployment logs test: FAILED" echo "deployment logs test: FAILED"
@ -158,7 +158,7 @@ $TEST_TARGET_SO deployment --dir $test_deployment_dir stop
sleep 20 sleep 20
$TEST_TARGET_SO deployment --dir $test_deployment_dir start $TEST_TARGET_SO deployment --dir $test_deployment_dir start
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_5" == *"Filesystem is old"* ]]; then if [[ "$log_output_5" == *"filesystem is old"* ]]; then
echo "Retain volumes test: passed" echo "Retain volumes test: passed"
else else
echo "Retain volumes test: FAILED" echo "Retain volumes test: FAILED"

View File

@ -76,6 +76,10 @@ if [ ! -f "$test_deployment_spec" ]; then
exit 1 exit 1
fi fi
echo "deploy init test: passed" echo "deploy init test: passed"
# Switch to a full path for bind mount.
sed -i "s|^\(\s*test-data-bind:$\)$|\1 ${test_deployment_dir}/data/test-data-bind|" $test_deployment_spec
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir $TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
# Check the deployment dir exists # Check the deployment dir exists
if [ ! -d "$test_deployment_dir" ]; then if [ ! -d "$test_deployment_dir" ]; then
@ -99,7 +103,7 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then
fi fi
# Add a config file to be picked up by the ConfigMap before starting. # Add a config file to be picked up by the ConfigMap before starting.
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/configmaps/test-config/test_config
echo "deploy create output file test: passed" echo "deploy create output file test: passed"
# Try to start the deployment # Try to start the deployment
@ -107,11 +111,13 @@ $TEST_TARGET_SO deployment --dir $test_deployment_dir start
wait_for_pods_started wait_for_pods_started
# Check logs command works # Check logs command works
wait_for_log_output wait_for_log_output
sleep 1
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
echo "deployment logs test: passed" echo "deployment logs test: passed"
else else
echo "deployment logs test: FAILED" echo "deployment logs test: FAILED"
echo $log_output_3
delete_cluster_exit delete_cluster_exit
fi fi
@ -140,6 +146,26 @@ else
delete_cluster_exit delete_cluster_exit
fi fi
# Check that the bind-mount volume is mounted.
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_5" == *"/data: MOUNTED"* ]]; then
echo "deployment bind volumes test: passed"
else
echo "deployment bind volumes test: FAILED"
echo $log_output_5
delete_cluster_exit
fi
# Check that the provisioner managed volume is mounted.
log_output_6=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_6" == *"/data2: MOUNTED"* ]]; then
echo "deployment provisioner volumes test: passed"
else
echo "deployment provisioner volumes test: FAILED"
echo $log_output_6
delete_cluster_exit
fi
# Stop then start again and check the volume was preserved # Stop then start again and check the volume was preserved
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop $TEST_TARGET_SO deployment --dir $test_deployment_dir stop
# Sleep a bit just in case # Sleep a bit just in case
@ -148,13 +174,26 @@ sleep 20
$TEST_TARGET_SO deployment --dir $test_deployment_dir start $TEST_TARGET_SO deployment --dir $test_deployment_dir start
wait_for_pods_started wait_for_pods_started
wait_for_log_output wait_for_log_output
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) sleep 1
if [[ "$log_output_5" == *"Filesystem is old"* ]]; then
echo "Retain volumes test: passed" log_output_10=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_10" == *"/data filesystem is old"* ]]; then
echo "Retain bind volumes test: passed"
else else
echo "Retain volumes test: FAILED" echo "Retain bind volumes test: FAILED"
delete_cluster_exit delete_cluster_exit
fi fi
# These volumes will be completely destroyed by the kind delete/create, because they lived inside
# the kind container. So, unlike the bind-mount case, they will appear fresh after the restart.
log_output_11=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
if [[ "$log_output_11" == *"/data2 filesystem is fresh"* ]]; then
echo "Fresh provisioner volumes test: passed"
else
echo "Fresh provisioner volumes test: FAILED"
delete_cluster_exit
fi
# Stop and clean up # Stop and clean up
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
echo "Test passed" echo "Test passed"