diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py
index aedc4f3c..1e24794c 100644
--- a/stack_orchestrator/constants.py
+++ b/stack_orchestrator/constants.py
@@ -14,11 +14,3 @@
# along with this program. If not, see .
stack_file_name = "stack.yml"
-compose_deploy_type = "compose"
-k8s_kind_deploy_type = "k8s-kind"
-k8s_deploy_type = "k8s"
-kube_config_key = "kube-config"
-deploy_to_key = "deploy-to"
-image_resigtry_key = "image-registry"
-kind_config_filename = "kind-config.yml"
-kube_config_filename = "kubeconfig.yml"
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
index d08c6214..0a83af89 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml
@@ -56,7 +56,6 @@ services:
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
- SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
command: ["bash", "./start-server.sh"]
volumes:
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
index 219688db..f7b75ca5 100644
--- a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
+++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml
@@ -56,7 +56,6 @@ services:
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
- SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
command: ["bash", "./start-server.sh"]
volumes:
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
index 1b14f2e3..e2bbdaad 100755
--- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh
@@ -16,8 +16,5 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
-echo "Initializing watcher..."
-yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1))
-
echo "Running server..."
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
index 894a4660..f5355a4b 100644
--- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml
@@ -2,6 +2,7 @@
host = "0.0.0.0"
port = 3008
kind = "active"
+ gqlPath = '/'
# Checkpointing state.
checkpointing = true
diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
index 1b14f2e3..e2bbdaad 100755
--- a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
+++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh
@@ -16,8 +16,5 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
-echo "Initializing watcher..."
-yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1))
-
echo "Running server..."
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
index 07880a8d..7cfabedd 100644
--- a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
+++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml
@@ -2,6 +2,7 @@
host = "0.0.0.0"
port = 3008
kind = "active"
+ gqlPath = "/"
# Checkpointing state.
checkpointing = true
diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
index 8f5cb7ee..3f9dd43e 100644
--- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
+++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: merkl-sushiswap-v3
description: "SushiSwap v3 watcher stack"
repos:
- - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.2
+ - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.4
containers:
- cerc/watcher-merkl-sushiswap-v3
pods:
diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
index 05350996..49c604bf 100644
--- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
+++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml
@@ -2,7 +2,7 @@ version: "1.0"
name: sushiswap-v3
description: "SushiSwap v3 watcher stack"
repos:
- - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.2
+ - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.4
containers:
- cerc/watcher-sushiswap-v3
pods:
diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py
index 4b4e7426..b37f3cf0 100644
--- a/stack_orchestrator/deploy/compose/deploy_docker.py
+++ b/stack_orchestrator/deploy/compose/deploy_docker.py
@@ -16,17 +16,14 @@
from pathlib import Path
from python_on_whales import DockerClient, DockerException
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
-from stack_orchestrator.deploy.deployment_context import DeploymentContext
class DockerDeployer(Deployer):
name: str = "compose"
- type: str
- def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
+ def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
compose_env_file=compose_env_file)
- self.type = type
def up(self, detach, services):
try:
@@ -73,8 +70,9 @@ class DockerDeployer(Deployer):
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
+ config_file_name: str = "kind-config.yml"
- def __init__(self, type: str) -> None:
+ def __init__(self) -> None:
super().__init__()
# Nothing needed at present for the docker deployer
diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py
index 32c13a61..f931a0d0 100644
--- a/stack_orchestrator/deploy/deploy.py
+++ b/stack_orchestrator/deploy/deploy.py
@@ -39,7 +39,7 @@ from stack_orchestrator.deploy.deployment_create import setup as deployment_setu
@click.option("--exclude", help="don\'t start these components")
@click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name")
-@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)")
+@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s)")
@click.pass_context
def command(ctx, include, exclude, env_file, cluster, deploy_to):
'''deploy a stack'''
@@ -62,16 +62,11 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
def create_deploy_context(
- global_context,
- deployment_context: DeploymentContext,
- stack,
- include,
- exclude,
- cluster,
- env_file,
- deploy_to) -> DeployCommandContext:
+ global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deployer):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
- deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
+ deployment_dir = deployment_context.deployment_dir if deployment_context else None
+ # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
+ deployer = getDeployer(deployer, deployment_dir, compose_files=cluster_context.compose_files,
compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(stack, cluster_context, deployer)
diff --git a/stack_orchestrator/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py
index 8b812d3a..9829490d 100644
--- a/stack_orchestrator/deploy/deploy_util.py
+++ b/stack_orchestrator/deploy/deploy_util.py
@@ -14,10 +14,9 @@
# along with this program. If not, see .
import os
-from typing import List, Any
+from typing import List
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
-from stack_orchestrator.opts import opts
def _container_image_from_service(stack: str, service: str):
@@ -38,33 +37,6 @@ def _container_image_from_service(stack: str, service: str):
return image_name
-def parsed_pod_files_map_from_file_names(pod_files):
- parsed_pod_yaml_map : Any = {}
- for pod_file in pod_files:
- with open(pod_file, "r") as pod_file_descriptor:
- parsed_pod_file = get_yaml().load(pod_file_descriptor)
- parsed_pod_yaml_map[pod_file] = parsed_pod_file
- if opts.o.debug:
- print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}")
- return parsed_pod_yaml_map
-
-
-def images_for_deployment(pod_files: List[str]):
- image_set = set()
- parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
- # Find the set of images in the pods
- for pod_name in parsed_pod_yaml_map:
- pod = parsed_pod_yaml_map[pod_name]
- services = pod["services"]
- for service_name in services:
- service_info = services[service_name]
- image = service_info["image"]
- image_set.add(image)
- if opts.o.debug:
- print(f"image_set: {image_set}")
- return image_set
-
-
def _volumes_to_docker(mounts: List[VolumeMapping]):
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
result = []
diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py
index 959c1b7a..5d515418 100644
--- a/stack_orchestrator/deploy/deployer_factory.py
+++ b/stack_orchestrator/deploy/deployer_factory.py
@@ -13,24 +13,23 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from stack_orchestrator import constants
from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
def getDeployerConfigGenerator(type: str):
if type == "compose" or type is None:
- return DockerDeployerConfigGenerator(type)
- elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
- return K8sDeployerConfigGenerator(type)
+ return DockerDeployerConfigGenerator()
+ elif type == "k8s":
+ return K8sDeployerConfigGenerator()
else:
print(f"ERROR: deploy-to {type} is not valid")
-def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file):
+def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file):
if type == "compose" or type is None:
- return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
- elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
- return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
+ return DockerDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
+ elif type == "k8s":
+ return K8sDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
else:
print(f"ERROR: deploy-to {type} is not valid")
diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py
index 8d74a62d..e22d7dcc 100644
--- a/stack_orchestrator/deploy/deployment.py
+++ b/stack_orchestrator/deploy/deployment.py
@@ -16,11 +16,8 @@
import click
from pathlib import Path
import sys
-from stack_orchestrator import constants
-from stack_orchestrator.deploy.images import push_images_operation
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
-from stack_orchestrator.deploy.deploy_types import DeployCommandContext
from stack_orchestrator.deploy.deployment_context import DeploymentContext
@@ -48,17 +45,13 @@ def command(ctx, dir):
ctx.obj = deployment_context
-def make_deploy_context(ctx) -> DeployCommandContext:
+def make_deploy_context(ctx):
context: DeploymentContext = ctx.obj
stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
cluster_name = context.get_cluster_name()
- if constants.deploy_to_key in context.spec.obj:
- deployment_type = context.spec.obj[constants.deploy_to_key]
- else:
- deployment_type = constants.compose_deploy_type
return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
- deployment_type)
+ context.spec.obj["deploy-to"])
@command.command()
@@ -111,14 +104,6 @@ def ps(ctx):
ps_operation(ctx)
-@command.command()
-@click.pass_context
-def push_images(ctx):
- deploy_command_context: DeployCommandContext = make_deploy_context(ctx)
- deployment_context: DeploymentContext = ctx.obj
- push_images_operation(deploy_command_context, deployment_context)
-
-
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port
@click.pass_context
diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py
index e999c1df..c00c0dc6 100644
--- a/stack_orchestrator/deploy/deployment_create.py
+++ b/stack_orchestrator/deploy/deployment_create.py
@@ -21,17 +21,16 @@ from typing import List
import random
from shutil import copy, copyfile, copytree
import sys
-from stack_orchestrator import constants
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
- get_pod_script_paths, get_plugin_code_paths, error_exit)
+ get_pod_script_paths, get_plugin_code_paths)
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
from stack_orchestrator.deploy.deployment_context import DeploymentContext
def _make_default_deployment_dir():
- return Path("deployment-001")
+ return "deployment-001"
def _get_ports(stack):
@@ -249,29 +248,17 @@ def _parse_config_variables(variable_values: str):
@click.command()
@click.option("--config", help="Provide config variables for the deployment")
-@click.option("--kube-config", help="Provide a config file for a k8s deployment")
-@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
@click.option("--output", required=True, help="Write yaml spec file here")
@click.option("--map-ports-to-host", required=False,
help="Map ports to the host as one of: any-variable-random (default), "
"localhost-same, any-same, localhost-fixed-random, any-fixed-random")
@click.pass_context
-def init(ctx, config, kube_config, image_registry, output, map_ports_to_host):
+def init(ctx, config, output, map_ports_to_host):
yaml = get_yaml()
stack = global_options(ctx).stack
debug = global_options(ctx).debug
- deployer_type = ctx.obj.deployer.type
default_spec_file_content = call_stack_deploy_init(ctx.obj)
- spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
- if deployer_type == "k8s":
- spec_file_content.update({constants.kube_config_key: kube_config})
- spec_file_content.update({constants.image_resigtry_key: image_registry})
- else:
- # Check for --kube-config supplied for non-relevant deployer types
- if kube_config is not None:
- error_exit(f"--kube-config is not allowed with a {deployer_type} deployment")
- if image_registry is not None:
- error_exit(f"--image-registry is not allowed with a {deployer_type} deployment")
+ spec_file_content = {"stack": stack, "deploy-to": ctx.obj.deployer.name}
if default_spec_file_content:
spec_file_content.update(default_spec_file_content)
config_variables = _parse_config_variables(config)
@@ -309,12 +296,6 @@ def _write_config_file(spec_file: Path, config_env_file: Path):
output_file.write(f"{variable_name}={variable_value}\n")
-def _write_kube_config_file(external_path: Path, internal_path: Path):
- if not external_path.exists():
- error_exit(f"Kube config file {external_path} does not exist")
- copyfile(external_path, internal_path)
-
-
def _copy_files_to_directory(file_paths: List[Path], directory: Path):
for path in file_paths:
# Using copy to preserve the execute bit
@@ -329,34 +310,29 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
@click.option("--initial-peers", help="Initial set of persistent peers")
@click.pass_context
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
+ # This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_name = parsed_spec["stack"]
- deployment_type = parsed_spec[constants.deploy_to_key]
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
- deployment_dir_path = _make_default_deployment_dir()
- else:
- deployment_dir_path = Path(deployment_dir)
- if deployment_dir_path.exists():
- error_exit(f"{deployment_dir_path} already exists")
- os.mkdir(deployment_dir_path)
+ deployment_dir = _make_default_deployment_dir()
+ if os.path.exists(deployment_dir):
+ print(f"Error: {deployment_dir} already exists")
+ sys.exit(1)
+ os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
- copyfile(spec_file, deployment_dir_path.joinpath("spec.yml"))
- copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file)))
+ copyfile(spec_file, os.path.join(deployment_dir, "spec.yml"))
+ copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy any config varibles from the spec file into an env file suitable for compose
- _write_config_file(spec_file, deployment_dir_path.joinpath("config.env"))
- # Copy any k8s config file into the deployment dir
- if deployment_type == "k8s":
- _write_kube_config_file(Path(parsed_spec[constants.kube_config_key]),
- deployment_dir_path.joinpath(constants.kube_config_filename))
+ _write_config_file(spec_file, os.path.join(deployment_dir, "config.env"))
# Copy the pod files into the deployment dir, fixing up content
pods = get_pod_list(parsed_stack)
- destination_compose_dir = deployment_dir_path.joinpath("compose")
+ destination_compose_dir = os.path.join(deployment_dir, "compose")
os.mkdir(destination_compose_dir)
- destination_pods_dir = deployment_dir_path.joinpath("pods")
+ destination_pods_dir = os.path.join(deployment_dir, "pods")
os.mkdir(destination_pods_dir)
data_dir = Path(__file__).absolute().parent.parent.joinpath("data")
yaml = get_yaml()
@@ -364,12 +340,12 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
pod_file_path = get_pod_file_path(parsed_stack, pod)
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
- destination_pod_dir = destination_pods_dir.joinpath(pod)
+ destination_pod_dir = os.path.join(destination_pods_dir, pod)
os.mkdir(destination_pod_dir)
if global_options(ctx).debug:
print(f"extra config dirs: {extra_config_dirs}")
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
- with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file:
+ with open(os.path.join(destination_compose_dir, "docker-compose-%s.yml" % pod), "w") as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any
config_dirs = {pod}
@@ -377,13 +353,13 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
for config_dir in config_dirs:
source_config_dir = data_dir.joinpath("config", config_dir)
if os.path.exists(source_config_dir):
- destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
+ destination_config_dir = os.path.join(deployment_dir, "config", config_dir)
# If the same config dir appears in multiple pods, it may already have been copied
if not os.path.exists(destination_config_dir):
copytree(source_config_dir, destination_config_dir)
# Copy the script files for the pod, if any
if pod_has_scripts(parsed_stack, pod):
- destination_script_dir = destination_pod_dir.joinpath("scripts")
+ destination_script_dir = os.path.join(destination_pod_dir, "scripts")
os.mkdir(destination_script_dir)
script_paths = get_pod_script_paths(parsed_stack, pod)
_copy_files_to_directory(script_paths, destination_script_dir)
@@ -393,11 +369,11 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
deployment_command_context = ctx.obj
deployment_command_context.stack = stack_name
deployment_context = DeploymentContext()
- deployment_context.init(deployment_dir_path)
+ deployment_context.init(Path(deployment_dir))
# Call the deployer to generate any deployer-specific files (e.g. for kind)
- deployer_config_generator = getDeployerConfigGenerator(deployment_type)
- # TODO: make deployment_dir_path a Path above
- deployer_config_generator.generate(deployment_dir_path)
+ deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"])
+ # TODO: make deployment_dir a Path above
+ deployer_config_generator.generate(Path(deployment_dir))
call_stack_deploy_create(deployment_context, [network_dir, initial_peers])
diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py
deleted file mode 100644
index ddbb33f7..00000000
--- a/stack_orchestrator/deploy/images.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright © 2023 Vulcanize
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-from typing import Set
-
-from python_on_whales import DockerClient
-
-from stack_orchestrator import constants
-from stack_orchestrator.opts import opts
-from stack_orchestrator.deploy.deployment_context import DeploymentContext
-from stack_orchestrator.deploy.deploy_types import DeployCommandContext
-from stack_orchestrator.deploy.deploy_util import images_for_deployment
-
-
-def _image_needs_pushed(image: str):
- # TODO: this needs to be more intelligent
- return image.endswith(":local")
-
-
-def remote_tag_for_image(image: str, remote_repo_url: str):
- # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
- (org, image_name_with_version) = image.split("/")
- (image_name, image_version) = image_name_with_version.split(":")
- if image_version == "local":
- return f"{remote_repo_url}/{image_name}:deploy"
- else:
- return image
-
-
-# TODO: needs lots of error handling
-def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext):
- # Get the list of images for the stack
- cluster_context = command_context.cluster_context
- images: Set[str] = images_for_deployment(cluster_context.compose_files)
- # Tag the images for the remote repo
- remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key]
- docker = DockerClient()
- for image in images:
- if _image_needs_pushed(image):
- remote_tag = remote_tag_for_image(image, remote_repo_url)
- if opts.o.verbose:
- print(f"Tagging {image} to {remote_tag}")
- docker.image.tag(image, remote_tag)
- # Run docker push commands to upload
- for image in images:
- if _image_needs_pushed(image):
- remote_tag = remote_tag_for_image(image, remote_repo_url)
- if opts.o.verbose:
- print(f"Pushing image {remote_tag}")
- docker.image.push(remote_tag)
diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py
index ff052bf9..9275db2b 100644
--- a/stack_orchestrator/deploy/k8s/cluster_info.py
+++ b/stack_orchestrator/deploy/k8s/cluster_info.py
@@ -18,30 +18,34 @@ from typing import Any, List, Set
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
-from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path
+from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path
from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map
-from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
-from stack_orchestrator.deploy.images import remote_tag_for_image
class ClusterInfo:
- parsed_pod_yaml_map: Any
+ parsed_pod_yaml_map: Any = {}
image_set: Set[str] = set()
app_name: str = "test-app"
deployment_name: str = "test-deployment"
environment_variables: DeployEnvVars
- remote_image_repo: str
def __init__(self) -> None:
pass
- def int(self, pod_files: List[str], compose_env_file, remote_image_repo):
+ def int(self, pod_files: List[str], compose_env_file):
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
# Find the set of images in the pods
- self.image_set = images_for_deployment(pod_files)
+ for pod_name in self.parsed_pod_yaml_map:
+ pod = self.parsed_pod_yaml_map[pod_name]
+ services = pod["services"]
+ for service_name in services:
+ service_info = services[service_name]
+ image = service_info["image"]
+ self.image_set.add(image)
+ if opts.o.debug:
+ print(f"image_set: {self.image_set}")
self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file))
- self.remote_image_repo = remote_image_repo
if (opts.o.debug):
print(f"Env vars: {self.environment_variables.map}")
@@ -95,12 +99,10 @@ class ClusterInfo:
container_name = service_name
service_info = services[service_name]
image = service_info["image"]
- # Re-write the image tag for remote deployment
- image_to_use = remote_tag_for_image(image, self.remote_image_repo) if self.remote_image_repo is not None else image
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
container = client.V1Container(
name=container_name,
- image=image_to_use,
+ image=image,
env=envs_from_environment_variables_map(self.environment_variables.map),
ports=[client.V1ContainerPort(container_port=80)],
volume_mounts=volume_mounts,
diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py
index 3d0ef3ff..627d6e0b 100644
--- a/stack_orchestrator/deploy/k8s/deploy_k8s.py
+++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py
@@ -16,56 +16,44 @@
from pathlib import Path
from kubernetes import client, config
-from stack_orchestrator import constants
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
from stack_orchestrator.opts import opts
-from stack_orchestrator.deploy.deployment_context import DeploymentContext
class K8sDeployer(Deployer):
name: str = "k8s"
- type: str
core_api: client.CoreV1Api
apps_api: client.AppsV1Api
k8s_namespace: str = "default"
kind_cluster_name: str
cluster_info : ClusterInfo
deployment_dir: Path
- deployment_context: DeploymentContext
- def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
+ def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
if (opts.o.debug):
- print(f"Deployment dir: {deployment_context.deployment_dir}")
+ print(f"Deployment dir: {deployment_dir}")
print(f"Compose files: {compose_files}")
print(f"Project name: {compose_project_name}")
print(f"Env file: {compose_env_file}")
- print(f"Type: {type}")
- self.type = type
- self.deployment_dir = deployment_context.deployment_dir
- self.deployment_context = deployment_context
+ self.deployment_dir = deployment_dir
self.kind_cluster_name = compose_project_name
self.cluster_info = ClusterInfo()
- self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key])
+ self.cluster_info.int(compose_files, compose_env_file)
def connect_api(self):
- if self.is_kind():
- config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
- else:
- # Get the config file and pass to load_kube_config()
- config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix())
+ config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
self.core_api = client.CoreV1Api()
self.apps_api = client.AppsV1Api()
def up(self, detach, services):
- if self.is_kind():
- # Create the kind cluster
- create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
- # Ensure the referenced containers are copied into kind
- load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
+ # Create the kind cluster
+ create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath("kind-config.yml"))
self.connect_api()
+ # Ensure the referenced containers are copied into kind
+ load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
# Create the host-path-mounted PVs for this deployment
pvs = self.cluster_info.get_pvs()
@@ -100,38 +88,9 @@ class K8sDeployer(Deployer):
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
def down(self, timeout, volumes):
- self.connect_api()
# Delete the k8s objects
- # Create the host-path-mounted PVs for this deployment
- pvs = self.cluster_info.get_pvs()
- for pv in pvs:
- if opts.o.debug:
- print(f"Deleting this pv: {pv}")
- pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
- if opts.o.debug:
- print("PV deleted:")
- print(f"{pv_resp}")
-
- # Figure out the PVCs for this deployment
- pvcs = self.cluster_info.get_pvcs()
- for pvc in pvcs:
- if opts.o.debug:
- print(f"Deleting this pvc: {pvc}")
- pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(name=pvc.metadata.name, namespace=self.k8s_namespace)
- if opts.o.debug:
- print("PVCs deleted:")
- print(f"{pvc_resp}")
- # Process compose files into a Deployment
- deployment = self.cluster_info.get_deployment()
- # Create the k8s objects
- if opts.o.debug:
- print(f"Deleting this deployment: {deployment}")
- self.apps_api.delete_namespaced_deployment(
- name=deployment.metadata.name, namespace=self.k8s_namespace
- )
- if self.is_kind():
- # Destroy the kind cluster
- destroy_cluster(self.kind_cluster_name)
+ # Destroy the kind cluster
+ destroy_cluster(self.kind_cluster_name)
def ps(self):
self.connect_api()
@@ -165,26 +124,20 @@ class K8sDeployer(Deployer):
# We need to figure out how to do this -- check why we're being called first
pass
- def is_kind(self):
- return self.type == "k8s-kind"
-
class K8sDeployerConfigGenerator(DeployerConfigGenerator):
- type: str
+ config_file_name: str = "kind-config.yml"
- def __init__(self, type: str) -> None:
- self.type = type
+ def __init__(self) -> None:
super().__init__()
def generate(self, deployment_dir: Path):
- # No need to do this for the remote k8s case
- if self.type == "k8s-kind":
- # Check the file isn't already there
- # Get the config file contents
- content = generate_kind_config(deployment_dir)
- if opts.o.debug:
- print(f"kind config is: {content}")
- config_file = deployment_dir.joinpath(constants.kind_config_filename)
- # Write the file
- with open(config_file, "w") as output_file:
- output_file.write(content)
+ # Check the file isn't already there
+ # Get the config file contents
+ content = generate_kind_config(deployment_dir)
+ if opts.o.debug:
+ print(f"kind config is: {content}")
+ config_file = deployment_dir.joinpath(self.config_file_name)
+ # Write the file
+ with open(config_file, "w") as output_file:
+ output_file.write(content)
diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py
index 82a33792..db1ef075 100644
--- a/stack_orchestrator/deploy/k8s/helpers.py
+++ b/stack_orchestrator/deploy/k8s/helpers.py
@@ -18,10 +18,10 @@ from dotenv import dotenv_values
import os
from pathlib import Path
import subprocess
-from typing import Set, Mapping, List
+from typing import Any, Set, Mapping, List
from stack_orchestrator.opts import opts
-from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
+from stack_orchestrator.util import get_yaml
def _run_command(command: str):
@@ -133,6 +133,17 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
+def parsed_pod_files_map_from_file_names(pod_files):
+ parsed_pod_yaml_map : Any = {}
+ for pod_file in pod_files:
+ with open(pod_file, "r") as pod_file_descriptor:
+ parsed_pod_file = get_yaml().load(pod_file_descriptor)
+ parsed_pod_yaml_map[pod_file] = parsed_pod_file
+ if opts.o.debug:
+ print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}")
+ return parsed_pod_yaml_map
+
+
def _generate_kind_mounts(parsed_pod_files, deployment_dir):
volume_definitions = []
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
diff --git a/stack_orchestrator/deploy/run_webapp.py b/stack_orchestrator/deploy/run_webapp.py
index aa22acdf..8b1073b1 100644
--- a/stack_orchestrator/deploy/run_webapp.py
+++ b/stack_orchestrator/deploy/run_webapp.py
@@ -1,59 +1,59 @@
-# Copyright © 2022, 2023 Vulcanize
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-# Builds webapp containers
-
-# env vars:
-# CERC_REPO_BASE_DIR defaults to ~/cerc
-
-# TODO: display the available list of containers; allow re-build of either all or specific containers
-
-import hashlib
-import click
-
-from dotenv import dotenv_values
-from stack_orchestrator.deploy.deployer_factory import getDeployer
-
-
-@click.command()
-@click.option("--image", help="image to deploy", required=True)
-@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')")
-@click.option("--env-file", help="environment file for webapp")
-@click.pass_context
-def command(ctx, image, deploy_to, env_file):
- '''build the specified webapp container'''
-
- env = {}
- if env_file:
- env = dotenv_values(env_file)
-
- unique_cluster_descriptor = f"{image},{env}"
- hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
- cluster = f"laconic-webapp-{hash}"
-
- deployer = getDeployer(deploy_to,
- deployment_context=None,
- compose_files=None,
- compose_project_name=cluster,
- compose_env_file=None)
-
- container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True)
-
- # Make configurable?
- webappPort = "3000/tcp"
- # TODO: This assumes a Docker container object...
- if webappPort in container.network_settings.ports:
- mapping = container.network_settings.ports[webappPort][0]
- print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")
+# Copyright © 2022, 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+# Builds webapp containers
+
+# env vars:
+# CERC_REPO_BASE_DIR defaults to ~/cerc
+
+# TODO: display the available list of containers; allow re-build of either all or specific containers
+
+import hashlib
+import click
+
+from dotenv import dotenv_values
+from stack_orchestrator.deploy.deployer_factory import getDeployer
+
+
+@click.command()
+@click.option("--image", help="image to deploy", required=True)
+@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')")
+@click.option("--env-file", help="environment file for webapp")
+@click.pass_context
+def command(ctx, image, deploy_to, env_file):
+ '''build the specified webapp container'''
+
+ env = {}
+ if env_file:
+ env = dotenv_values(env_file)
+
+ unique_cluster_descriptor = f"{image},{env}"
+ hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
+ cluster = f"laconic-webapp-{hash}"
+
+ deployer = getDeployer(deploy_to,
+ deployment_dir=None,
+ compose_files=None,
+ compose_project_name=cluster,
+ compose_env_file=None)
+
+ container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True)
+
+ # Make configurable?
+ webappPort = "3000/tcp"
+ # TODO: This assumes a Docker container object...
+ if webappPort in container.network_settings.ports:
+ mapping = container.network_settings.ports[webappPort][0]
+ print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")
diff --git a/tests/k8s-deploy/run-deploy-test.sh b/tests/k8s-deploy/run-deploy-test.sh
index b7ee9dd0..91c7890c 100755
--- a/tests/k8s-deploy/run-deploy-test.sh
+++ b/tests/k8s-deploy/run-deploy-test.sh
@@ -21,7 +21,7 @@ mkdir -p $CERC_REPO_BASE_DIR
# Test basic stack-orchestrator deploy
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
-$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
+$TEST_TARGET_SO --stack test deploy --deploy-to k8s init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
# Check the file now exists
if [ ! -f "$test_deployment_spec" ]; then
echo "deploy init test: spec file not present"