1
0

log diagnostics & errors to stderr

This commit is contained in:
Roy Crihfield 2023-06-29 14:01:57 +08:00
parent afed7bf469
commit 241023086f
5 changed files with 79 additions and 74 deletions

View File

@ -15,8 +15,9 @@
import os
from abc import ABC, abstractmethod
from .deploy import get_stack_status
from decouple import config
from .deploy import get_stack_status
from .util import _log
def get_stack(config, stack):
@ -49,7 +50,7 @@ class package_registry_stack(base_stack):
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment:
if self.config.verbose:
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
_log(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
self.url = url_from_environment
else:
# Otherwise we expect to use the local package-registry stack
@ -58,13 +59,13 @@ class package_registry_stack(base_stack):
if registry_running:
# If it is available, get its mapped port and construct its URL
if self.config.debug:
print("Found local package registry stack is up")
_log("Found local package registry stack is up")
# TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else:
# If not, print a message about how to start it and return fail to the caller
print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
_log("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
_log("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
return False
return True

View File

@ -27,7 +27,7 @@ import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
from .util import _log, include_exclude_check, get_parsed_stack_config
from .base import get_npm_registry_url
# TODO: find a place for this
@ -56,15 +56,15 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f'Dev Root is: {dev_root_path}')
_log(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
_log('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
@ -79,9 +79,9 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
containers_in_scope = all_containers
if verbose:
print(f'Containers: {containers_in_scope}')
_log(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
_log(f"Stack: {stack}")
# TODO: make this configurable
container_build_env = {
@ -102,16 +102,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
def process_container(container):
if not quiet:
print(f"Building: {container}")
_log(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
_log(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
_log(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
@ -120,23 +120,23 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
_log(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
_log(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
_log(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
_log("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
_log("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
_log("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")
_log(f"Excluding: {container}")

View File

@ -26,7 +26,7 @@ import subprocess
from python_on_whales import DockerClient, DockerException
import click
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config, global_options2
from .util import _log, include_exclude_check, get_parsed_stack_config, global_options2
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init
@ -69,7 +69,7 @@ def up_operation(ctx, services_list):
for attr, value in container_exec_env.items():
os.environ[attr] = value
if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
_log(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command)
deploy_context.docker.compose.up(detach=True, services=services_list)
@ -82,7 +82,7 @@ def down_operation(ctx, delete_volumes, extra_args_list):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose down")
_log("Running compose down")
timeout_arg = None
if extra_args_list:
timeout_arg = extra_args_list[0]
@ -94,7 +94,7 @@ def ps_operation(ctx):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose ps")
_log("Running compose ps")
container_list = ctx.obj.docker.compose.ps()
if len(container_list) > 0:
print("Running containers:")
@ -120,12 +120,12 @@ def port_operation(ctx, extra_args):
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: port <service> <exposed-port>")
_log("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}")
_log(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@ -135,17 +135,17 @@ def exec_operation(ctx, extra_args):
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: exec <service> <cmd>")
_log("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(global_context)
if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
_log(f"Running compose exec {service_name} {command_to_exec}")
try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
print(f"container command returned error exit status")
_log(f"container command returned error exit status")
def logs_operation(ctx, extra_args):
@ -153,7 +153,7 @@ def logs_operation(ctx, extra_args):
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose logs")
_log("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
@ -211,15 +211,15 @@ def get_stack_status(ctx, stack):
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above
if ctx.verbose:
print("Running compose ps")
_log("Running compose ps")
container_list = docker.compose.ps()
if len(container_list) > 0:
if ctx.debug:
print(f"Container list from compose ps: {container_list}")
_log(f"Container list from compose ps: {container_list}")
return True
else:
if ctx.debug:
print("No containers found from compose ps")
_log("No containers found from compose ps")
False
@ -237,7 +237,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
_log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
@ -256,11 +256,11 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
path = os.path.realpath(sys.argv[0])
unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
if ctx.debug:
print(f"pre-hash descriptor: {unique_cluster_descriptor}")
_log(f"pre-hash descriptor: {unique_cluster_descriptor}")
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
cluster = f"laconic-{hash}"
if ctx.verbose:
print(f"Using cluster name: {cluster}")
_log(f"Using cluster name: {cluster}")
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
@ -281,7 +281,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
pods_in_scope = _convert_to_new_format(pods_in_scope)
if ctx.verbose:
print(f"Pods: {pods_in_scope}")
_log(f"Pods: {pods_in_scope}")
# Construct a docker compose command suitable for our purpose
@ -307,10 +307,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
compose_files.append(compose_file_name)
else:
if ctx.verbose:
print(f"Excluding: {pod_name}")
_log(f"Excluding: {pod_name}")
if ctx.verbose:
print(f"files: {compose_files}")
_log(f"files: {compose_files}")
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
@ -342,7 +342,7 @@ def _convert_to_new_format(old_pod_array):
def _run_command(ctx, cluster_name, command):
if ctx.verbose:
print(f"Running command: {command}")
_log(f"Running command: {command}")
command_dir = os.path.dirname(command)
command_file = os.path.join(".", os.path.basename(command))
command_env = os.environ.copy()
@ -351,7 +351,7 @@ def _run_command(ctx, cluster_name, command):
command_env["CERC_SCRIPT_DEBUG"] = "true"
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
if command_result.returncode != 0:
print(f"FATAL Error running command: {command}")
_log(f"FATAL Error running command: {command}")
sys.exit(1)
@ -368,7 +368,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
for container in cluster_config:
container_config = cluster_config[container]
if ctx.verbose:
print(f"{container} config: {container_config}")
_log(f"{container} config: {container_config}")
for directive in container_config:
pd = ConfigDirective(
container_config[directive].split(".")[0],
@ -377,7 +377,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
directive
)
if ctx.verbose:
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
_log(f"Setting {pd.destination_container}.{pd.destination_variable}"
f" = {pd.source_container}.{pd.source_variable}")
# TODO: add a timeout
waiting_for_data = True
@ -394,19 +394,19 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
envs=container_exec_env)
except DockerException as error:
if ctx.debug:
print(f"Docker exception reading config source: {error}")
_log(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get:
# "It returned with code 1"
if "It returned with code 1" in str(error):
if ctx.verbose:
print("Config export script returned an error, re-trying")
_log("Config export script returned an error, re-trying")
# If the script failed to execute (e.g. the file is not there) then we get:
# "It returned with code 2"
if "It returned with code 2" in str(error):
print(f"Fatal error reading config source: {error}")
_log(f"Fatal error reading config source: {error}")
if source_value:
if ctx.debug:
print(f"fetched source value: {source_value}")
_log(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container,
["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh"
@ -415,7 +415,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
envs=container_exec_env)
waiting_for_data = False
if ctx.debug:
print(f"destination output: {destination_output}")
_log(f"destination output: {destination_output}")
command.add_command(deployment_init)

View File

@ -25,7 +25,7 @@ import click
import importlib.resources
from pathlib import Path
import yaml
from .util import include_exclude_check
from .util import _log, include_exclude_check
class GitProgress(git.RemoteProgress):
@ -88,7 +88,7 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
# TODO: fix the messy arg list here
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
if verbose:
print(f"Processing repo: {fully_qualified_repo}")
_log(f"Processing repo: {fully_qualified_repo}")
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:"
git_http_prefix = f"https://{repo_host}/"
@ -100,40 +100,40 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
if not quiet:
present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
else 'Needs to be fetched'
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
_log(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo
if is_present:
if not is_git_repo(full_filesystem_repo_path):
print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
_log(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
sys.exit(1)
else:
if pull:
if verbose:
print(f"Running git pull for {full_filesystem_repo_path}")
_log(f"Running git pull for {full_filesystem_repo_path}")
if not check_only:
if is_branch:
git_repo = git.Repo(full_filesystem_repo_path)
origin = git_repo.remotes.origin
origin.pull(progress=None if quiet else GitProgress())
else:
print(f"skipping pull because this repo checked out a tag")
_log(f"skipping pull because this repo checked out a tag")
else:
print("(git pull skipped)")
_log("(git pull skipped)")
if not is_present:
# Clone
if verbose:
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
_log(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
if not dry_run:
git.Repo.clone_from(full_github_repo_path,
full_filesystem_repo_path,
progress=None if quiet else GitProgress())
else:
print("(git clone skipped)")
_log("(git clone skipped)")
# Checkout the requested branch, if one was specified
branch_to_checkout = None
if branches_array:
# Find the current repo in the branches list
print("Checking")
_log("Checking")
for repo_branch in branches_array:
repo_branch_tuple = repo_branch.split(" ")
if repo_branch_tuple[0] == branch_strip(fully_qualified_repo):
@ -145,13 +145,13 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
if branch_to_checkout:
if current_repo_branch_or_tag is None or (current_repo_branch_or_tag and (current_repo_branch_or_tag != branch_to_checkout)):
if not quiet:
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
_log(f"switching to branch {branch_to_checkout} in repo {repo_path}")
git_repo = git.Repo(full_filesystem_repo_path)
# git checkout works for both branches and tags
git_repo.git.checkout(branch_to_checkout)
else:
if verbose:
print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
_log(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
def parse_branches(branches_string):
@ -161,7 +161,7 @@ def parse_branches(branches_string):
for branch_directive in branches_directives:
split_directive = branch_directive.split("@")
if len(split_directive) != 2:
print(f"Error: branch specified is not valid: {branch_directive}")
_log(f"Error: branch specified is not valid: {branch_directive}")
sys.exit(1)
result_array.append(f"{split_directive[0]} {split_directive[1]}")
return result_array
@ -191,39 +191,39 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
# TODO: branches file needs to be re-worked in the context of stacks
if branches_file:
if branches:
print("Error: can't specify both --branches and --branches-file")
_log("Error: can't specify both --branches and --branches-file")
sys.exit(1)
else:
if verbose:
print(f"loading branches from: {branches_file}")
_log(f"loading branches from: {branches_file}")
with open(branches_file) as branches_file_open:
branches_array = branches_file_open.read().splitlines()
print(f"branches: {branches}")
_log(f"branches: {branches}")
if branches:
if branches_file:
print("Error: can't specify both --branches and --branches-file")
_log("Error: can't specify both --branches and --branches-file")
sys.exit(1)
else:
branches_array = parse_branches(branches)
if branches_array and verbose:
print(f"Branches are: {branches_array}")
_log(f"Branches are: {branches_array}")
local_stack = ctx.obj.local_stack
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}")
_log(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}")
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f"Dev Root is: {dev_root_path}")
_log(f"Dev Root is: {dev_root_path}")
if not os.path.isdir(dev_root_path):
if not quiet:
print('Dev root directory doesn\'t exist, creating')
_log('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path)
# See: https://stackoverflow.com/a/20885799/1701505
@ -244,9 +244,9 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
repos_in_scope = all_repos
if verbose:
print(f"Repos: {repos_in_scope}")
_log(f"Repos: {repos_in_scope}")
if stack:
print(f"Stack: {stack}")
_log(f"Stack: {stack}")
repos = []
for repo in repos_in_scope:
@ -254,11 +254,11 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
repos.append(repo)
else:
if verbose:
print(f"Excluding: {repo}")
_log(f"Excluding: {repo}")
for repo in repos:
try:
process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
except git.exc.GitCommandError as error:
print(f"\n******* git command returned error exit status:\n{error}")
_log(f"\n******* git command returned error exit status:\n{error}")
sys.exit(1)

View File

@ -19,6 +19,10 @@ import yaml
from pathlib import Path
def _log(*args):
print(*args, file=sys.stderr)
def include_exclude_check(s, include, exclude):
if include is None and exclude is None:
return True