forked from cerc-io/stack-orchestrator
Initial implementation
This commit is contained in:
parent
7d51e4b9aa
commit
68293cbaa3
51
app/base.py
51
app/base.py
@ -13,22 +13,59 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
def get_stack(config, stack):
|
def get_stack(config, stack):
|
||||||
return base_stack(config, stack)
|
if stack == "package-registry":
|
||||||
|
return package_registry_stack(config, stack)
|
||||||
|
else:
|
||||||
|
return base_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack():
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
if self.config.verbose:
|
pass
|
||||||
print(f"Checking that base stack {self.stack} is available")
|
|
||||||
return 1
|
@abstractmethod
|
||||||
|
def get_url(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class package_registry_stack(base_stack):
|
||||||
|
|
||||||
|
def ensure_available(self):
|
||||||
|
self.url = "<no registry url set>"
|
||||||
|
# Check if we were given an external registry URL
|
||||||
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
|
if url_from_environment:
|
||||||
|
if self.config.verbose:
|
||||||
|
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
||||||
|
self.url = url_from_environment
|
||||||
|
else:
|
||||||
|
# Otherwise we expect to use the local package-registry stack
|
||||||
|
# First check if the stack is up
|
||||||
|
# If not, print a message about how to start it and return fail to the caller
|
||||||
|
return False
|
||||||
|
# If it is available, get its mapped port and construct its URL
|
||||||
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
|
return True
|
||||||
|
|
||||||
def get_url(self):
|
def get_url(self):
|
||||||
return "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
return self.url
|
||||||
|
|
||||||
# TODO: finish this implementation for the npm package registry
|
# Temporary helper functions while we figure out a good interface to the stack deploy code
|
||||||
|
|
||||||
|
|
||||||
|
def _is_stack_running(stack):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _get_stack_mapped_port(stack, service, exposed_port):
|
||||||
|
return 3000
|
||||||
|
@ -46,74 +46,10 @@ def command(ctx, include, exclude, cluster, command, extra_args):
|
|||||||
dry_run = ctx.obj.dry_run
|
dry_run = ctx.obj.dry_run
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
if local_stack:
|
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster)
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
|
||||||
else:
|
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
|
||||||
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
|
||||||
|
|
||||||
if cluster is None:
|
|
||||||
# Create default unique, stable cluster name from confile file path
|
|
||||||
# TODO: change this to the config file path
|
|
||||||
path = os.path.realpath(sys.argv[0])
|
|
||||||
hash = hashlib.md5(path.encode()).hexdigest()
|
|
||||||
cluster = f"laconic-{hash}"
|
|
||||||
if verbose:
|
|
||||||
print(f"Using cluster name: {cluster}")
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
|
||||||
from . import data
|
|
||||||
with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
|
|
||||||
all_pods = pod_list_file.read().splitlines()
|
|
||||||
|
|
||||||
pods_in_scope = []
|
|
||||||
if stack:
|
|
||||||
stack_config = get_parsed_stack_config(stack)
|
|
||||||
# TODO: syntax check the input here
|
|
||||||
pods_in_scope = stack_config['pods']
|
|
||||||
else:
|
|
||||||
pods_in_scope = all_pods
|
|
||||||
|
|
||||||
# Convert all pod definitions to v1.1 format
|
|
||||||
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print(f"Pods: {pods_in_scope}")
|
|
||||||
|
|
||||||
# Construct a docker compose command suitable for our purpose
|
|
||||||
|
|
||||||
compose_files = []
|
|
||||||
pre_start_commands = []
|
|
||||||
post_start_commands = []
|
|
||||||
for pod in pods_in_scope:
|
|
||||||
pod_name = pod["name"]
|
|
||||||
pod_repository = pod["repository"]
|
|
||||||
pod_path = pod["path"]
|
|
||||||
if include_exclude_check(pod_name, include, exclude):
|
|
||||||
if pod_repository is None or pod_repository == "internal":
|
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
|
||||||
else:
|
|
||||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
|
||||||
compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml")
|
|
||||||
pod_pre_start_command = pod["pre_start_command"]
|
|
||||||
pod_post_start_command = pod["post_start_command"]
|
|
||||||
if pod_pre_start_command is not None:
|
|
||||||
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
|
||||||
if pod_post_start_command is not None:
|
|
||||||
post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
|
|
||||||
compose_files.append(compose_file_name)
|
|
||||||
else:
|
|
||||||
if verbose:
|
|
||||||
print(f"Excluding: {pod_name}")
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print(f"files: {compose_files}")
|
|
||||||
|
|
||||||
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
|
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
|
||||||
docker = DockerClient(compose_files=compose_files, compose_project_name=cluster)
|
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
||||||
|
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
|
|
||||||
@ -123,10 +59,10 @@ def command(ctx, include, exclude, cluster, command, extra_args):
|
|||||||
os.environ["CERC_SCRIPT_DEBUG"] = "true"
|
os.environ["CERC_SCRIPT_DEBUG"] = "true"
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Running compose up for extra_args: {extra_args_list}")
|
print(f"Running compose up for extra_args: {extra_args_list}")
|
||||||
for pre_start_command in pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(ctx.obj, cluster, pre_start_command)
|
_run_command(ctx.obj, cluster, pre_start_command)
|
||||||
docker.compose.up(detach=True, services=extra_args_list)
|
docker.compose.up(detach=True, services=extra_args_list)
|
||||||
for post_start_command in post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(ctx.obj, cluster, post_start_command)
|
_run_command(ctx.obj, cluster, post_start_command)
|
||||||
elif command == "down":
|
elif command == "down":
|
||||||
if verbose:
|
if verbose:
|
||||||
@ -181,6 +117,89 @@ def command(ctx, include, exclude, cluster, command, extra_args):
|
|||||||
docker.compose.logs()
|
docker.compose.logs()
|
||||||
|
|
||||||
|
|
||||||
|
def get_stack_status(stack):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _make_cluster_context(ctx, include, exclude, cluster):
|
||||||
|
|
||||||
|
if ctx.local_stack:
|
||||||
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
|
else:
|
||||||
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
|
||||||
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
|
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
||||||
|
|
||||||
|
if cluster is None:
|
||||||
|
# Create default unique, stable cluster name from confile file path
|
||||||
|
# TODO: change this to the config file path
|
||||||
|
path = os.path.realpath(sys.argv[0])
|
||||||
|
hash = hashlib.md5(path.encode()).hexdigest()
|
||||||
|
cluster = f"laconic-{hash}"
|
||||||
|
if ctx.verbose:
|
||||||
|
print(f"Using cluster name: {cluster}")
|
||||||
|
|
||||||
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
|
from . import data
|
||||||
|
with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file:
|
||||||
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
|
pods_in_scope = []
|
||||||
|
if ctx.stack:
|
||||||
|
stack_config = get_parsed_stack_config(ctx.stack)
|
||||||
|
# TODO: syntax check the input here
|
||||||
|
pods_in_scope = stack_config['pods']
|
||||||
|
else:
|
||||||
|
pods_in_scope = all_pods
|
||||||
|
|
||||||
|
# Convert all pod definitions to v1.1 format
|
||||||
|
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
||||||
|
|
||||||
|
if ctx.verbose:
|
||||||
|
print(f"Pods: {pods_in_scope}")
|
||||||
|
|
||||||
|
# Construct a docker compose command suitable for our purpose
|
||||||
|
|
||||||
|
compose_files = []
|
||||||
|
pre_start_commands = []
|
||||||
|
post_start_commands = []
|
||||||
|
for pod in pods_in_scope:
|
||||||
|
pod_name = pod["name"]
|
||||||
|
pod_repository = pod["repository"]
|
||||||
|
pod_path = pod["path"]
|
||||||
|
if include_exclude_check(pod_name, include, exclude):
|
||||||
|
if pod_repository is None or pod_repository == "internal":
|
||||||
|
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||||
|
else:
|
||||||
|
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||||
|
compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml")
|
||||||
|
pod_pre_start_command = pod["pre_start_command"]
|
||||||
|
pod_post_start_command = pod["post_start_command"]
|
||||||
|
if pod_pre_start_command is not None:
|
||||||
|
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
||||||
|
if pod_post_start_command is not None:
|
||||||
|
post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
|
||||||
|
compose_files.append(compose_file_name)
|
||||||
|
else:
|
||||||
|
if ctx.verbose:
|
||||||
|
print(f"Excluding: {pod_name}")
|
||||||
|
|
||||||
|
if ctx.verbose:
|
||||||
|
print(f"files: {compose_files}")
|
||||||
|
|
||||||
|
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands)
|
||||||
|
|
||||||
|
|
||||||
|
class cluster_context:
|
||||||
|
def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands) -> None:
|
||||||
|
self.cluster = cluster
|
||||||
|
self.compose_files = compose_files
|
||||||
|
self.pre_start_commands = pre_start_commands
|
||||||
|
self.post_start_commands = post_start_commands
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_new_format(old_pod_array):
|
def _convert_to_new_format(old_pod_array):
|
||||||
new_pod_array = []
|
new_pod_array = []
|
||||||
for old_pod in old_pod_array:
|
for old_pod in old_pod_array:
|
||||||
|
Loading…
Reference in New Issue
Block a user