diff --git a/requirements.txt b/requirements.txt index bbf97b4a..f6e3d07c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ pydantic==1.10.9 tomli==2.0.1 validators==0.22.0 kubernetes>=28.1.0 +humanfriendly>=10.0 diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 35b2b9da..30b2ab11 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -25,9 +25,38 @@ from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_types import DeployEnvVars -from stack_orchestrator.deploy.spec import Spec +from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits from stack_orchestrator.deploy.images import remote_tag_for_image +DEFAULT_VOLUME_RESOURCES = Resources({ + "reservations": {"storage": "2Gi"} +}) + +DEFAULT_CONTAINER_RESOURCES = Resources({ + "reservations": {"cpus": "0.1", "memory": "200M"}, + "limits": {"cpus": "1.0", "memory": "2000M"}, +}) + + +def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements: + def to_dict(limits: ResourceLimits): + if not limits: + return None + + ret = {} + if limits.cpus: + ret["cpu"] = str(limits.cpus) + if limits.memory: + ret["memory"] = f"{int(limits.memory / (1000 * 1000))}M" + if limits.storage: + ret["storage"] = f"{int(limits.storage / (1000 * 1000))}M" + return ret + + return client.V1ResourceRequirements( + requests=to_dict(resources.reservations), + limits=to_dict(resources.limits) + ) + class ClusterInfo: parsed_pod_yaml_map: Any @@ -135,9 +164,13 @@ class ClusterInfo: result = [] spec_volumes = self.spec.get_volumes() named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + resources = self.spec.get_volume_resources() + if not resources: + resources = DEFAULT_VOLUME_RESOURCES if opts.o.debug: print(f"Spec Volumes: {spec_volumes}") print(f"Named Volumes: {named_volumes}") + print(f"Resources: {resources}") for volume_name in spec_volumes: if volume_name not in named_volumes: if opts.o.debug: @@ -146,9 +179,7 @@ class ClusterInfo: spec = client.V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], storage_class_name="manual", - resources=client.V1ResourceRequirements( - requests={"storage": "2Gi"} - ), + resources=to_k8s_resource_requirements(resources), volume_name=f"{self.app_name}-{volume_name}" ) pvc = client.V1PersistentVolumeClaim( @@ -192,6 +223,9 @@ class ClusterInfo: result = [] spec_volumes = self.spec.get_volumes() named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + resources = self.spec.get_volume_resources() + if not resources: + resources = DEFAULT_VOLUME_RESOURCES for volume_name in spec_volumes: if volume_name not in named_volumes: if opts.o.debug: @@ -200,7 +234,7 @@ class ClusterInfo: spec = client.V1PersistentVolumeSpec( storage_class_name="manual", access_modes=["ReadWriteOnce"], - capacity={"storage": "2Gi"}, + capacity=to_k8s_resource_requirements(resources).requests, host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) ) pv = client.V1PersistentVolume( @@ -214,6 +248,9 @@ class ClusterInfo: # TODO: put things like image pull policy into an object-scope struct def get_deployment(self, image_pull_policy: str = None): containers = [] + resources = self.spec.get_container_resources() + if not resources: + resources = DEFAULT_CONTAINER_RESOURCES for pod_name in self.parsed_pod_yaml_map: pod = self.parsed_pod_yaml_map[pod_name] services = pod["services"] @@ -237,10 +274,7 @@ class ClusterInfo: env=envs_from_environment_variables_map(self.environment_variables.map), ports=[client.V1ContainerPort(container_port=port)], volume_mounts=volume_mounts, - resources=client.V1ResourceRequirements( - requests={"cpu": "100m", "memory": "200Mi"}, - limits={"cpu": "1000m", "memory": "2000Mi"}, - ), + resources=to_k8s_resource_requirements(resources), ) containers.append(container) volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name) diff --git a/stack_orchestrator/deploy/spec.py b/stack_orchestrator/deploy/spec.py index dd6cd107..fa0489e7 100644 --- a/stack_orchestrator/deploy/spec.py +++ b/stack_orchestrator/deploy/spec.py @@ -13,12 +13,60 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from pathlib import Path import typing +import humanfriendly + +from pathlib import Path + from stack_orchestrator.util import get_yaml from stack_orchestrator import constants +class ResourceLimits: + cpus: float = None + memory: int = None + storage: int = None + + def __init__(self, obj={}): + if "cpus" in obj: + self.cpus = float(obj["cpus"]) + if "memory" in obj: + self.memory = humanfriendly.parse_size(obj["memory"]) + if "storage" in obj: + self.storage = humanfriendly.parse_size(obj["storage"]) + + def __len__(self): + return len(self.__dict__) + + def __iter__(self): + for k in self.__dict__: + yield k, self.__dict__[k] + + def __repr__(self): + return str(self.__dict__) + + +class Resources: + limits: ResourceLimits = None + reservations: ResourceLimits = None + + def __init__(self, obj={}): + if "reservations" in obj: + self.reservations = ResourceLimits(obj["reservations"]) + if "limits" in obj: + self.limits = ResourceLimits(obj["limits"]) + + def __len__(self): + return len(self.__dict__) + + def __iter__(self): + for k in self.__dict__: + yield k, self.__dict__[k] + + def __repr__(self): + return str(self.__dict__) + + class Spec: obj: typing.Any @@ -47,6 +95,12 @@ class Spec: if self.obj and "configmaps" in self.obj else {}) + def get_container_resources(self): + return Resources(self.obj.get("resources", {}).get("containers", {})) + + def get_volume_resources(self): + return Resources(self.obj.get("resources", {}).get("volumes", {})) + def get_http_proxy(self): return (self.obj[constants.network_key][constants.http_proxy_key] if self.obj and constants.network_key in self.obj