From 7d51e4b9aa8bb3b876c9e8628e2aa6bd252dbd05 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Sun, 19 Feb 2023 17:46:47 -0700 Subject: [PATCH 1/5] Update python on whales --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 128ee3a0..8249a55a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ python-decouple>=3.6 GitPython>=3.1.27 tqdm>=4.64.0 -python-on-whales>=0.52.0 +python-on-whales>=0.58.0 click>=8.1.3 pyyaml>=6.0 -- 2.45.2 From 68293cbaa3fc8f250e479773859c95f563f04beb Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Feb 2023 06:09:35 -0700 Subject: [PATCH 2/5] Initial implementation --- app/base.py | 51 ++++++++++++-- app/deploy_system.py | 155 ++++++++++++++++++++++++------------------- 2 files changed, 131 insertions(+), 75 deletions(-) diff --git a/app/base.py b/app/base.py index e70f7289..52528a6d 100644 --- a/app/base.py +++ b/app/base.py @@ -13,22 +13,59 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import os +from abc import ABC, abstractmethod + def get_stack(config, stack): - return base_stack(config, stack) + if stack == "package-registry": + return package_registry_stack(config, stack) + else: + return base_stack(config, stack) -class base_stack(): +class base_stack(ABC): def __init__(self, config, stack): self.config = config self.stack = stack + @abstractmethod def ensure_available(self): - if self.config.verbose: - print(f"Checking that base stack {self.stack} is available") - return 1 + pass + + @abstractmethod + def get_url(self): + pass + + +class package_registry_stack(base_stack): + + def ensure_available(self): + self.url = "" + # Check if we were given an external registry URL + url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") + if url_from_environment: + if self.config.verbose: + print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}") + self.url = url_from_environment + else: + # Otherwise we expect to use the local package-registry stack + # First check if the stack is up + # If not, print a message about how to start it and return fail to the caller + return False + # If it is available, get its mapped port and construct its URL + self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" + return True def get_url(self): - return "http://gitea.local:3000/api/packages/cerc-io/npm/" + return self.url -# TODO: finish this implementation for the npm package registry +# Temporary helper functions while we figure out a good interface to the stack deploy code + + +def _is_stack_running(stack): + return True + + +def _get_stack_mapped_port(stack, service, exposed_port): + return 3000 diff --git a/app/deploy_system.py b/app/deploy_system.py index 798e7b70..3fd42f58 100644 --- a/app/deploy_system.py +++ b/app/deploy_system.py @@ -46,74 +46,10 @@ def command(ctx, include, exclude, cluster, command, extra_args): dry_run = ctx.obj.dry_run stack = ctx.obj.stack - if local_stack: - dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] - print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') - else: - dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) - - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose") - - if cluster is None: - # Create default unique, stable cluster name from confile file path - # TODO: change this to the config file path - path = os.path.realpath(sys.argv[0]) - hash = hashlib.md5(path.encode()).hexdigest() - cluster = f"laconic-{hash}" - if verbose: - print(f"Using cluster name: {cluster}") - - # See: https://stackoverflow.com/a/20885799/1701505 - from . import data - with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file: - all_pods = pod_list_file.read().splitlines() - - pods_in_scope = [] - if stack: - stack_config = get_parsed_stack_config(stack) - # TODO: syntax check the input here - pods_in_scope = stack_config['pods'] - else: - pods_in_scope = all_pods - - # Convert all pod definitions to v1.1 format - pods_in_scope = _convert_to_new_format(pods_in_scope) - - if verbose: - print(f"Pods: {pods_in_scope}") - - # Construct a docker compose command suitable for our purpose - - compose_files = [] - pre_start_commands = [] - post_start_commands = [] - for pod in pods_in_scope: - pod_name = pod["name"] - pod_repository = pod["repository"] - pod_path = pod["path"] - if include_exclude_check(pod_name, include, exclude): - if pod_repository is None or pod_repository == "internal": - compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") - else: - pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) - compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml") - pod_pre_start_command = pod["pre_start_command"] - pod_post_start_command = pod["post_start_command"] - if pod_pre_start_command is not None: - pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command)) - if pod_post_start_command is not None: - post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command)) - compose_files.append(compose_file_name) - else: - if verbose: - print(f"Excluding: {pod_name}") - - if verbose: - print(f"files: {compose_files}") + cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster) # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ - docker = DockerClient(compose_files=compose_files, compose_project_name=cluster) + docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) extra_args_list = list(extra_args) or None @@ -123,10 +59,10 @@ def command(ctx, include, exclude, cluster, command, extra_args): os.environ["CERC_SCRIPT_DEBUG"] = "true" if verbose: print(f"Running compose up for extra_args: {extra_args_list}") - for pre_start_command in pre_start_commands: + for pre_start_command in cluster_context.pre_start_commands: _run_command(ctx.obj, cluster, pre_start_command) docker.compose.up(detach=True, services=extra_args_list) - for post_start_command in post_start_commands: + for post_start_command in cluster_context.post_start_commands: _run_command(ctx.obj, cluster, post_start_command) elif command == "down": if verbose: @@ -181,6 +117,89 @@ def command(ctx, include, exclude, cluster, command, extra_args): docker.compose.logs() +def get_stack_status(stack): + pass + + +def _make_cluster_context(ctx, include, exclude, cluster): + + if ctx.local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose") + + if cluster is None: + # Create default unique, stable cluster name from confile file path + # TODO: change this to the config file path + path = os.path.realpath(sys.argv[0]) + hash = hashlib.md5(path.encode()).hexdigest() + cluster = f"laconic-{hash}" + if ctx.verbose: + print(f"Using cluster name: {cluster}") + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file: + all_pods = pod_list_file.read().splitlines() + + pods_in_scope = [] + if ctx.stack: + stack_config = get_parsed_stack_config(ctx.stack) + # TODO: syntax check the input here + pods_in_scope = stack_config['pods'] + else: + pods_in_scope = all_pods + + # Convert all pod definitions to v1.1 format + pods_in_scope = _convert_to_new_format(pods_in_scope) + + if ctx.verbose: + print(f"Pods: {pods_in_scope}") + + # Construct a docker compose command suitable for our purpose + + compose_files = [] + pre_start_commands = [] + post_start_commands = [] + for pod in pods_in_scope: + pod_name = pod["name"] + pod_repository = pod["repository"] + pod_path = pod["path"] + if include_exclude_check(pod_name, include, exclude): + if pod_repository is None or pod_repository == "internal": + compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") + else: + pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) + compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml") + pod_pre_start_command = pod["pre_start_command"] + pod_post_start_command = pod["post_start_command"] + if pod_pre_start_command is not None: + pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command)) + if pod_post_start_command is not None: + post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command)) + compose_files.append(compose_file_name) + else: + if ctx.verbose: + print(f"Excluding: {pod_name}") + + if ctx.verbose: + print(f"files: {compose_files}") + + return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands) + + +class cluster_context: + def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands) -> None: + self.cluster = cluster + self.compose_files = compose_files + self.pre_start_commands = pre_start_commands + self.post_start_commands = post_start_commands + + def _convert_to_new_format(old_pod_array): new_pod_array = [] for old_pod in old_pod_array: -- 2.45.2 From f1cbce1d00db85009fc4c1bc424458cba76b9bfc Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Feb 2023 06:23:21 -0700 Subject: [PATCH 3/5] Call from base stack class --- app/base.py | 21 ++++++++++++--------- app/deploy_system.py | 22 ++++++++++++++++++++-- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/app/base.py b/app/base.py index 52528a6d..b5ca26dc 100644 --- a/app/base.py +++ b/app/base.py @@ -15,6 +15,8 @@ import os from abc import ABC, abstractmethod +from .deploy_system import get_stack_status + def get_stack(config, stack): if stack == "package-registry": @@ -40,7 +42,7 @@ class base_stack(ABC): class package_registry_stack(base_stack): - def ensure_available(self): + def ensure_available(self, ctx): self.url = "" # Check if we were given an external registry URL url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") @@ -51,10 +53,15 @@ class package_registry_stack(base_stack): else: # Otherwise we expect to use the local package-registry stack # First check if the stack is up - # If not, print a message about how to start it and return fail to the caller - return False - # If it is available, get its mapped port and construct its URL - self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" + registry_running = get_stack_status("package-registry") + if registry_running: + # If it is available, get its mapped port and construct its URL + if self.config.debug: + print("Found local package registry stack is up") + self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" + else: + # If not, print a message about how to start it and return fail to the caller + return False return True def get_url(self): @@ -63,9 +70,5 @@ class package_registry_stack(base_stack): # Temporary helper functions while we figure out a good interface to the stack deploy code -def _is_stack_running(stack): - return True - - def _get_stack_mapped_port(stack, service, exposed_port): return 3000 diff --git a/app/deploy_system.py b/app/deploy_system.py index 3fd42f58..4012690a 100644 --- a/app/deploy_system.py +++ b/app/deploy_system.py @@ -16,6 +16,7 @@ # Deploys the system components using docker-compose import hashlib +import copy import os import sys from decouple import config @@ -117,8 +118,25 @@ def command(ctx, include, exclude, cluster, command, extra_args): docker.compose.logs() -def get_stack_status(stack): - pass +def get_stack_status(ctx, stack): + + ctx_copy = copy.copy(ctx) + ctx_copy.stack = stack + + cluster_context = _make_cluster_context(ctx_copy, [], [], None) + docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) + # TODO: refactor to avoid duplicating this code above + if ctx.verbose: + print("Running compose ps") + container_list = docker.compose.ps() + if len(container_list) > 0: + if ctx.debug: + print(f"Container list from compose ps: {container_list}") + return True + else: + if ctx.debug: + print("No containers found from compose ps") + False def _make_cluster_context(ctx, include, exclude, cluster): -- 2.45.2 From 7e6268c39d3d2b0b895500d695f28f946fec426e Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Feb 2023 06:43:06 -0700 Subject: [PATCH 4/5] Wire up to build-npms --- app/base.py | 6 ++++-- app/build_npms.py | 15 +++++++++++---- app/deploy_system.py | 6 +++--- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/app/base.py b/app/base.py index b5ca26dc..3c6dbe64 100644 --- a/app/base.py +++ b/app/base.py @@ -42,7 +42,7 @@ class base_stack(ABC): class package_registry_stack(base_stack): - def ensure_available(self, ctx): + def ensure_available(self): self.url = "" # Check if we were given an external registry URL url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") @@ -53,7 +53,7 @@ class package_registry_stack(base_stack): else: # Otherwise we expect to use the local package-registry stack # First check if the stack is up - registry_running = get_stack_status("package-registry") + registry_running = get_stack_status(self.config, "package-registry") if registry_running: # If it is available, get its mapped port and construct its URL if self.config.debug: @@ -61,6 +61,8 @@ class package_registry_stack(base_stack): self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" else: # If not, print a message about how to start it and return fail to the caller + print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL") + print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up") return False return True diff --git a/app/build_npms.py b/app/build_npms.py index d794d027..101fde3e 100644 --- a/app/build_npms.py +++ b/app/build_npms.py @@ -44,9 +44,16 @@ def command(ctx, include, exclude): # build-npms depends on having access to a writable package registry # so we check here that it is available - package_registry_stack = get_stack(ctx.obj, 'package-registry') - package_registry_stack.ensure_available() - npm_registry_url = package_registry_stack.get_url('package-registry') + package_registry_stack = get_stack(ctx.obj, "package-registry") + registry_available = package_registry_stack.ensure_available() + if not registry_available: + print("FATAL: no npm registry available for build-npms command") + sys.exit(1) + npm_registry_url = package_registry_stack.get_url() + npm_registry_url_token = config("CERC_NPM_AUTH_TOKEN", default=None) + if not npm_registry_url_token: + print("FATAL: CERC_NPM_AUTH_TOKEN is not defined") + sys.exit(1) if local_stack: dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] @@ -86,7 +93,7 @@ def command(ctx, include, exclude): if not dry_run: if verbose: print(f"Executing: {build_command}") - envs = {"CERC_NPM_AUTH_TOKEN": os.environ["CERC_NPM_AUTH_TOKEN"]} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) try: docker.run("cerc/builder-js", remove=True, diff --git a/app/deploy_system.py b/app/deploy_system.py index 4012690a..bff573bd 100644 --- a/app/deploy_system.py +++ b/app/deploy_system.py @@ -61,10 +61,10 @@ def command(ctx, include, exclude, cluster, command, extra_args): if verbose: print(f"Running compose up for extra_args: {extra_args_list}") for pre_start_command in cluster_context.pre_start_commands: - _run_command(ctx.obj, cluster, pre_start_command) + _run_command(ctx.obj, cluster_context.cluster, pre_start_command) docker.compose.up(detach=True, services=extra_args_list) for post_start_command in cluster_context.post_start_commands: - _run_command(ctx.obj, cluster, post_start_command) + _run_command(ctx.obj, cluster_context.cluster, post_start_command) elif command == "down": if verbose: print("Running compose down") @@ -123,7 +123,7 @@ def get_stack_status(ctx, stack): ctx_copy = copy.copy(ctx) ctx_copy.stack = stack - cluster_context = _make_cluster_context(ctx_copy, [], [], None) + cluster_context = _make_cluster_context(ctx_copy, None, None, None) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) # TODO: refactor to avoid duplicating this code above if ctx.verbose: -- 2.45.2 From f14a4a33d78033c812bd94e17388eabbbbd4b52b Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Feb 2023 12:46:56 -0700 Subject: [PATCH 5/5] Working feature --- app/base.py | 1 + app/data/stacks/build-support/stack.yml | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 app/data/stacks/build-support/stack.yml diff --git a/app/base.py b/app/base.py index 3c6dbe64..940d488f 100644 --- a/app/base.py +++ b/app/base.py @@ -58,6 +58,7 @@ class package_registry_stack(base_stack): # If it is available, get its mapped port and construct its URL if self.config.debug: print("Found local package registry stack is up") + # TODO: get url from deploy-stack self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" else: # If not, print a message about how to start it and return fail to the caller diff --git a/app/data/stacks/build-support/stack.yml b/app/data/stacks/build-support/stack.yml new file mode 100644 index 00000000..e4efb457 --- /dev/null +++ b/app/data/stacks/build-support/stack.yml @@ -0,0 +1,6 @@ +version: "1.1" +name: build-support +decription: "Build Support Components" +containers: + - cerc/builder-js + - cerc/builder-gerbil -- 2.45.2