Add image push command #656

Merged
telackey merged 15 commits from dboreham/test-stack-k8s into main 2023-11-21 03:23:55 +00:00
12 changed files with 238 additions and 104 deletions

View File

@ -18,5 +18,7 @@ compose_deploy_type = "compose"
k8s_kind_deploy_type = "k8s-kind"
k8s_deploy_type = "k8s"
kube_config_key = "kube-config"
deploy_to_key = "deploy-to"
image_resigtry_key = "image-registry"
kind_config_filename = "kind-config.yml"
kube_config_filename = "kubeconfig.yml"

View File

@ -16,13 +16,14 @@
from pathlib import Path
from python_on_whales import DockerClient, DockerException
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext
class DockerDeployer(Deployer):
name: str = "compose"
type: str
def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
compose_env_file=compose_env_file)
self.type = type

View File

@ -62,11 +62,16 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
def create_deploy_context(
global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deploy_to):
global_context,
deployment_context: DeploymentContext,
stack,
include,
exclude,
cluster,
env_file,
deploy_to) -> DeployCommandContext:
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
deployment_dir = deployment_context.deployment_dir if deployment_context else None
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
deployer = getDeployer(deploy_to, deployment_dir, compose_files=cluster_context.compose_files,
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(stack, cluster_context, deployer)

View File

@ -14,9 +14,10 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
from typing import List
from typing import List, Any
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
from stack_orchestrator.opts import opts
def _container_image_from_service(stack: str, service: str):
@ -37,6 +38,33 @@ def _container_image_from_service(stack: str, service: str):
return image_name
def parsed_pod_files_map_from_file_names(pod_files):
parsed_pod_yaml_map : Any = {}
for pod_file in pod_files:
with open(pod_file, "r") as pod_file_descriptor:
parsed_pod_file = get_yaml().load(pod_file_descriptor)
parsed_pod_yaml_map[pod_file] = parsed_pod_file
if opts.o.debug:
print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}")
return parsed_pod_yaml_map
def images_for_deployment(pod_files: List[str]):
image_set = set()
parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods
for pod_name in parsed_pod_yaml_map:
pod = parsed_pod_yaml_map[pod_name]
services = pod["services"]
for service_name in services:
service_info = services[service_name]
image = service_info["image"]
image_set.add(image)
if opts.o.debug:
print(f"image_set: {image_set}")
return image_set
def _volumes_to_docker(mounts: List[VolumeMapping]):
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
result = []

View File

@ -27,10 +27,10 @@ def getDeployerConfigGenerator(type: str):
print(f"ERROR: deploy-to {type} is not valid")
def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file):
def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file):
if type == "compose" or type is None:
return DockerDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file)
return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
return K8sDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file)
return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
else:
print(f"ERROR: deploy-to {type} is not valid")

View File

@ -17,8 +17,10 @@ import click
from pathlib import Path
import sys
from stack_orchestrator import constants
from stack_orchestrator.deploy.images import push_images_operation
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deployment_context import DeploymentContext
@ -46,13 +48,13 @@ def command(ctx, dir):
ctx.obj = deployment_context
def make_deploy_context(ctx):
def make_deploy_context(ctx) -> DeployCommandContext:
context: DeploymentContext = ctx.obj
stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
cluster_name = context.get_cluster_name()
if "deploy-to" in context.spec.obj:
deployment_type = context.spec.obj["deploy-to"]
if constants.deploy_to_key in context.spec.obj:
deployment_type = context.spec.obj[constants.deploy_to_key]
else:
deployment_type = constants.compose_deploy_type
return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
@ -109,6 +111,14 @@ def ps(ctx):
ps_operation(ctx)
@command.command()
@click.pass_context
def push_images(ctx):
deploy_command_context: DeployCommandContext = make_deploy_context(ctx)
deployment_context: DeploymentContext = ctx.obj
push_images_operation(deploy_command_context, deployment_context)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context

View File

@ -250,20 +250,28 @@ def _parse_config_variables(variable_values: str):
@click.command()
@click.option("--config", help="Provide config variables for the deployment")
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
@click.option("--output", required=True, help="Write yaml spec file here")
@click.option("--map-ports-to-host", required=False,
help="Map ports to the host as one of: any-variable-random (default), "
"localhost-same, any-same, localhost-fixed-random, any-fixed-random")
@click.pass_context
def init(ctx, config, kube_config, output, map_ports_to_host):
def init(ctx, config, kube_config, image_registry, output, map_ports_to_host):
yaml = get_yaml()
stack = global_options(ctx).stack
debug = global_options(ctx).debug
deployer_type = ctx.obj.deployer.type
default_spec_file_content = call_stack_deploy_init(ctx.obj)
spec_file_content = {"stack": stack, "deploy-to": deployer_type}
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
if deployer_type == "k8s":
spec_file_content.update({constants.kube_config_key: kube_config})
spec_file_content.update({constants.image_resigtry_key: image_registry})
else:
# Check for --kube-config supplied for non-relevant deployer types
if kube_config is not None:
error_exit(f"--kube-config is not allowed with a {deployer_type} deployment")
if image_registry is not None:
error_exit(f"--image-registry is not allowed with a {deployer_type} deployment")
if default_spec_file_content:
spec_file_content.update(default_spec_file_content)
config_variables = _parse_config_variables(config)
@ -323,7 +331,7 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_name = parsed_spec["stack"]
deployment_type = parsed_spec["deploy-to"]
deployment_type = parsed_spec[constants.deploy_to_key]
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:

View File

@ -0,0 +1,62 @@
# Copyright © 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from typing import Set
from python_on_whales import DockerClient
from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deploy_util import images_for_deployment
def _image_needs_pushed(image: str):
# TODO: this needs to be more intelligent
return image.endswith(":local")
def remote_tag_for_image(image: str, remote_repo_url: str):
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
(org, image_name_with_version) = image.split("/")
(image_name, image_version) = image_name_with_version.split(":")
if image_version == "local":
return f"{remote_repo_url}/{image_name}:deploy"
else:
return image
# TODO: needs lots of error handling
def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext):
# Get the list of images for the stack
cluster_context = command_context.cluster_context
images: Set[str] = images_for_deployment(cluster_context.compose_files)
# Tag the images for the remote repo
remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key]
docker = DockerClient()
for image in images:
if _image_needs_pushed(image):
remote_tag = remote_tag_for_image(image, remote_repo_url)
if opts.o.verbose:
print(f"Tagging {image} to {remote_tag}")
docker.image.tag(image, remote_tag)
# Run docker push commands to upload
for image in images:
if _image_needs_pushed(image):
remote_tag = remote_tag_for_image(image, remote_repo_url)
if opts.o.verbose:
print(f"Pushing image {remote_tag}")
docker.image.push(remote_tag)

View File

@ -18,34 +18,30 @@ from typing import Any, List, Set
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path
from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path
from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
from stack_orchestrator.deploy.images import remote_tag_for_image
class ClusterInfo:
parsed_pod_yaml_map: Any = {}
parsed_pod_yaml_map: Any
image_set: Set[str] = set()
app_name: str = "test-app"
deployment_name: str = "test-deployment"
environment_variables: DeployEnvVars
remote_image_repo: str
def __init__(self) -> None:
pass
def int(self, pod_files: List[str], compose_env_file):
def int(self, pod_files: List[str], compose_env_file, remote_image_repo):
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods
for pod_name in self.parsed_pod_yaml_map:
pod = self.parsed_pod_yaml_map[pod_name]
services = pod["services"]
for service_name in services:
service_info = services[service_name]
image = service_info["image"]
self.image_set.add(image)
if opts.o.debug:
print(f"image_set: {self.image_set}")
self.image_set = images_for_deployment(pod_files)
self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file))
self.remote_image_repo = remote_image_repo
if (opts.o.debug):
print(f"Env vars: {self.environment_variables.map}")
@ -99,10 +95,12 @@ class ClusterInfo:
container_name = service_name
service_info = services[service_name]
image = service_info["image"]
# Re-write the image tag for remote deployment
image_to_use = remote_tag_for_image(image, self.remote_image_repo) if self.remote_image_repo is not None else image
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
container = client.V1Container(
name=container_name,
image=image,
image=image_to_use,
env=envs_from_environment_variables_map(self.environment_variables.map),
ports=[client.V1ContainerPort(container_port=80)],
volume_mounts=volume_mounts,

View File

@ -22,6 +22,7 @@ from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluste
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.deployment_context import DeploymentContext
class K8sDeployer(Deployer):
@ -33,19 +34,21 @@ class K8sDeployer(Deployer):
kind_cluster_name: str
cluster_info : ClusterInfo
deployment_dir: Path
deployment_context: DeploymentContext
def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
if (opts.o.debug):
print(f"Deployment dir: {deployment_dir}")
print(f"Deployment dir: {deployment_context.deployment_dir}")
print(f"Compose files: {compose_files}")
print(f"Project name: {compose_project_name}")
print(f"Env file: {compose_env_file}")
print(f"Type: {type}")
self.type = type
self.deployment_dir = deployment_dir
self.deployment_dir = deployment_context.deployment_dir
self.deployment_context = deployment_context
self.kind_cluster_name = compose_project_name
self.cluster_info = ClusterInfo()
self.cluster_info.int(compose_files, compose_env_file)
self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key])
def connect_api(self):
if self.is_kind():
@ -97,7 +100,35 @@ class K8sDeployer(Deployer):
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
def down(self, timeout, volumes):
self.connect_api()
# Delete the k8s objects
# Create the host-path-mounted PVs for this deployment
pvs = self.cluster_info.get_pvs()
for pv in pvs:
if opts.o.debug:
print(f"Deleting this pv: {pv}")
pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
if opts.o.debug:
print("PV deleted:")
print(f"{pv_resp}")
# Figure out the PVCs for this deployment
pvcs = self.cluster_info.get_pvcs()
for pvc in pvcs:
if opts.o.debug:
print(f"Deleting this pvc: {pvc}")
pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(name=pvc.metadata.name, namespace=self.k8s_namespace)
if opts.o.debug:
print("PVCs deleted:")
print(f"{pvc_resp}")
# Process compose files into a Deployment
deployment = self.cluster_info.get_deployment()
# Create the k8s objects
if opts.o.debug:
print(f"Deleting this deployment: {deployment}")
self.apps_api.delete_namespaced_deployment(
name=deployment.metadata.name, namespace=self.k8s_namespace
)
if self.is_kind():
# Destroy the kind cluster
destroy_cluster(self.kind_cluster_name)

View File

@ -18,10 +18,10 @@ from dotenv import dotenv_values
import os
from pathlib import Path
import subprocess
from typing import Any, Set, Mapping, List
from typing import Set, Mapping, List
from stack_orchestrator.opts import opts
from stack_orchestrator.util import get_yaml
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
def _run_command(command: str):
@ -133,17 +133,6 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
def parsed_pod_files_map_from_file_names(pod_files):
parsed_pod_yaml_map : Any = {}
for pod_file in pod_files:
with open(pod_file, "r") as pod_file_descriptor:
parsed_pod_file = get_yaml().load(pod_file_descriptor)
parsed_pod_yaml_map[pod_file] = parsed_pod_file
if opts.o.debug:
print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}")
return parsed_pod_yaml_map
def _generate_kind_mounts(parsed_pod_files, deployment_dir):
volume_definitions = []
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)

View File

@ -1,59 +1,59 @@
# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds webapp containers
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import hashlib
import click
from dotenv import dotenv_values
from stack_orchestrator.deploy.deployer_factory import getDeployer
@click.command()
@click.option("--image", help="image to deploy", required=True)
@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')")
@click.option("--env-file", help="environment file for webapp")
@click.pass_context
def command(ctx, image, deploy_to, env_file):
'''build the specified webapp container'''
env = {}
if env_file:
env = dotenv_values(env_file)
unique_cluster_descriptor = f"{image},{env}"
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
cluster = f"laconic-webapp-{hash}"
deployer = getDeployer(deploy_to,
deployment_dir=None,
compose_files=None,
compose_project_name=cluster,
compose_env_file=None)
container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True)
# Make configurable?
webappPort = "3000/tcp"
# TODO: This assumes a Docker container object...
if webappPort in container.network_settings.ports:
mapping = container.network_settings.ports[webappPort][0]
print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")
# Copyright © 2022, 2023 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds webapp containers
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import hashlib
import click
from dotenv import dotenv_values
from stack_orchestrator.deploy.deployer_factory import getDeployer
@click.command()
@click.option("--image", help="image to deploy", required=True)
@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')")
@click.option("--env-file", help="environment file for webapp")
@click.pass_context
def command(ctx, image, deploy_to, env_file):
'''build the specified webapp container'''
env = {}
if env_file:
env = dotenv_values(env_file)
unique_cluster_descriptor = f"{image},{env}"
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
cluster = f"laconic-webapp-{hash}"
deployer = getDeployer(deploy_to,
deployment_context=None,
compose_files=None,
compose_project_name=cluster,
compose_env_file=None)
container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True)
# Make configurable?
webappPort = "3000/tcp"
# TODO: This assumes a Docker container object...
if webappPort in container.network_settings.ports:
mapping = container.network_settings.ports[webappPort][0]
print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")