diff --git a/app/base.py b/app/base.py
index e70f7289..940d488f 100644
--- a/app/base.py
+++ b/app/base.py
@@ -13,22 +13,65 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import os
+from abc import ABC, abstractmethod
+from .deploy_system import get_stack_status
+
+
def get_stack(config, stack):
- return base_stack(config, stack)
+ if stack == "package-registry":
+ return package_registry_stack(config, stack)
+ else:
+ return base_stack(config, stack)
-class base_stack():
+class base_stack(ABC):
def __init__(self, config, stack):
self.config = config
self.stack = stack
+ @abstractmethod
def ensure_available(self):
- if self.config.verbose:
- print(f"Checking that base stack {self.stack} is available")
- return 1
+ pass
+
+ @abstractmethod
+ def get_url(self):
+ pass
+
+
+class package_registry_stack(base_stack):
+
+ def ensure_available(self):
+ self.url = ""
+ # Check if we were given an external registry URL
+ url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
+ if url_from_environment:
+ if self.config.verbose:
+ print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
+ self.url = url_from_environment
+ else:
+ # Otherwise we expect to use the local package-registry stack
+ # First check if the stack is up
+ registry_running = get_stack_status(self.config, "package-registry")
+ if registry_running:
+ # If it is available, get its mapped port and construct its URL
+ if self.config.debug:
+ print("Found local package registry stack is up")
+ # TODO: get url from deploy-stack
+ self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
+ else:
+ # If not, print a message about how to start it and return fail to the caller
+ print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
+ print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
+ return False
+ return True
def get_url(self):
- return "http://gitea.local:3000/api/packages/cerc-io/npm/"
+ return self.url
-# TODO: finish this implementation for the npm package registry
+# Temporary helper functions while we figure out a good interface to the stack deploy code
+
+
+def _get_stack_mapped_port(stack, service, exposed_port):
+ return 3000
diff --git a/app/build_npms.py b/app/build_npms.py
index d794d027..101fde3e 100644
--- a/app/build_npms.py
+++ b/app/build_npms.py
@@ -44,9 +44,16 @@ def command(ctx, include, exclude):
# build-npms depends on having access to a writable package registry
# so we check here that it is available
- package_registry_stack = get_stack(ctx.obj, 'package-registry')
- package_registry_stack.ensure_available()
- npm_registry_url = package_registry_stack.get_url('package-registry')
+ package_registry_stack = get_stack(ctx.obj, "package-registry")
+ registry_available = package_registry_stack.ensure_available()
+ if not registry_available:
+ print("FATAL: no npm registry available for build-npms command")
+ sys.exit(1)
+ npm_registry_url = package_registry_stack.get_url()
+ npm_registry_url_token = config("CERC_NPM_AUTH_TOKEN", default=None)
+ if not npm_registry_url_token:
+ print("FATAL: CERC_NPM_AUTH_TOKEN is not defined")
+ sys.exit(1)
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
@@ -86,7 +93,7 @@ def command(ctx, include, exclude):
if not dry_run:
if verbose:
print(f"Executing: {build_command}")
- envs = {"CERC_NPM_AUTH_TOKEN": os.environ["CERC_NPM_AUTH_TOKEN"]} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
+ envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
try:
docker.run("cerc/builder-js",
remove=True,
diff --git a/app/data/stacks/build-support/stack.yml b/app/data/stacks/build-support/stack.yml
new file mode 100644
index 00000000..e4efb457
--- /dev/null
+++ b/app/data/stacks/build-support/stack.yml
@@ -0,0 +1,6 @@
+version: "1.1"
+name: build-support
+decription: "Build Support Components"
+containers:
+ - cerc/builder-js
+ - cerc/builder-gerbil
diff --git a/app/deploy_system.py b/app/deploy_system.py
index 798e7b70..bff573bd 100644
--- a/app/deploy_system.py
+++ b/app/deploy_system.py
@@ -16,6 +16,7 @@
# Deploys the system components using docker-compose
import hashlib
+import copy
import os
import sys
from decouple import config
@@ -46,74 +47,10 @@ def command(ctx, include, exclude, cluster, command, extra_args):
dry_run = ctx.obj.dry_run
stack = ctx.obj.stack
- if local_stack:
- dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
- print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
- else:
- dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
-
- # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
- compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
-
- if cluster is None:
- # Create default unique, stable cluster name from confile file path
- # TODO: change this to the config file path
- path = os.path.realpath(sys.argv[0])
- hash = hashlib.md5(path.encode()).hexdigest()
- cluster = f"laconic-{hash}"
- if verbose:
- print(f"Using cluster name: {cluster}")
-
- # See: https://stackoverflow.com/a/20885799/1701505
- from . import data
- with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
- all_pods = pod_list_file.read().splitlines()
-
- pods_in_scope = []
- if stack:
- stack_config = get_parsed_stack_config(stack)
- # TODO: syntax check the input here
- pods_in_scope = stack_config['pods']
- else:
- pods_in_scope = all_pods
-
- # Convert all pod definitions to v1.1 format
- pods_in_scope = _convert_to_new_format(pods_in_scope)
-
- if verbose:
- print(f"Pods: {pods_in_scope}")
-
- # Construct a docker compose command suitable for our purpose
-
- compose_files = []
- pre_start_commands = []
- post_start_commands = []
- for pod in pods_in_scope:
- pod_name = pod["name"]
- pod_repository = pod["repository"]
- pod_path = pod["path"]
- if include_exclude_check(pod_name, include, exclude):
- if pod_repository is None or pod_repository == "internal":
- compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
- else:
- pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
- compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml")
- pod_pre_start_command = pod["pre_start_command"]
- pod_post_start_command = pod["post_start_command"]
- if pod_pre_start_command is not None:
- pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
- if pod_post_start_command is not None:
- post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
- compose_files.append(compose_file_name)
- else:
- if verbose:
- print(f"Excluding: {pod_name}")
-
- if verbose:
- print(f"files: {compose_files}")
+ cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
- docker = DockerClient(compose_files=compose_files, compose_project_name=cluster)
+ docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
extra_args_list = list(extra_args) or None
@@ -123,11 +60,11 @@ def command(ctx, include, exclude, cluster, command, extra_args):
os.environ["CERC_SCRIPT_DEBUG"] = "true"
if verbose:
print(f"Running compose up for extra_args: {extra_args_list}")
- for pre_start_command in pre_start_commands:
- _run_command(ctx.obj, cluster, pre_start_command)
+ for pre_start_command in cluster_context.pre_start_commands:
+ _run_command(ctx.obj, cluster_context.cluster, pre_start_command)
docker.compose.up(detach=True, services=extra_args_list)
- for post_start_command in post_start_commands:
- _run_command(ctx.obj, cluster, post_start_command)
+ for post_start_command in cluster_context.post_start_commands:
+ _run_command(ctx.obj, cluster_context.cluster, post_start_command)
elif command == "down":
if verbose:
print("Running compose down")
@@ -181,6 +118,106 @@ def command(ctx, include, exclude, cluster, command, extra_args):
docker.compose.logs()
+def get_stack_status(ctx, stack):
+
+ ctx_copy = copy.copy(ctx)
+ ctx_copy.stack = stack
+
+ cluster_context = _make_cluster_context(ctx_copy, None, None, None)
+ docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
+ # TODO: refactor to avoid duplicating this code above
+ if ctx.verbose:
+ print("Running compose ps")
+ container_list = docker.compose.ps()
+ if len(container_list) > 0:
+ if ctx.debug:
+ print(f"Container list from compose ps: {container_list}")
+ return True
+ else:
+ if ctx.debug:
+ print("No containers found from compose ps")
+ False
+
+
+def _make_cluster_context(ctx, include, exclude, cluster):
+
+ if ctx.local_stack:
+ dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
+ print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
+ else:
+ dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
+
+ # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
+ compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
+
+ if cluster is None:
+ # Create default unique, stable cluster name from confile file path
+ # TODO: change this to the config file path
+ path = os.path.realpath(sys.argv[0])
+ hash = hashlib.md5(path.encode()).hexdigest()
+ cluster = f"laconic-{hash}"
+ if ctx.verbose:
+ print(f"Using cluster name: {cluster}")
+
+ # See: https://stackoverflow.com/a/20885799/1701505
+ from . import data
+ with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
+ all_pods = pod_list_file.read().splitlines()
+
+ pods_in_scope = []
+ if ctx.stack:
+ stack_config = get_parsed_stack_config(ctx.stack)
+ # TODO: syntax check the input here
+ pods_in_scope = stack_config['pods']
+ else:
+ pods_in_scope = all_pods
+
+ # Convert all pod definitions to v1.1 format
+ pods_in_scope = _convert_to_new_format(pods_in_scope)
+
+ if ctx.verbose:
+ print(f"Pods: {pods_in_scope}")
+
+ # Construct a docker compose command suitable for our purpose
+
+ compose_files = []
+ pre_start_commands = []
+ post_start_commands = []
+ for pod in pods_in_scope:
+ pod_name = pod["name"]
+ pod_repository = pod["repository"]
+ pod_path = pod["path"]
+ if include_exclude_check(pod_name, include, exclude):
+ if pod_repository is None or pod_repository == "internal":
+ compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
+ else:
+ pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
+ compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml")
+ pod_pre_start_command = pod["pre_start_command"]
+ pod_post_start_command = pod["post_start_command"]
+ if pod_pre_start_command is not None:
+ pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
+ if pod_post_start_command is not None:
+ post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
+ compose_files.append(compose_file_name)
+ else:
+ if ctx.verbose:
+ print(f"Excluding: {pod_name}")
+
+ if ctx.verbose:
+ print(f"files: {compose_files}")
+
+ return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands)
+
+
+class cluster_context:
+ def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands) -> None:
+ self.cluster = cluster
+ self.compose_files = compose_files
+ self.pre_start_commands = pre_start_commands
+ self.post_start_commands = post_start_commands
+
+
def _convert_to_new_format(old_pod_array):
new_pod_array = []
for old_pod in old_pod_array:
diff --git a/requirements.txt b/requirements.txt
index 128ee3a0..8249a55a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
python-decouple>=3.6
GitPython>=3.1.27
tqdm>=4.64.0
-python-on-whales>=0.52.0
+python-on-whales>=0.58.0
click>=8.1.3
pyyaml>=6.0