Merge branch 'main' into dboreham/mobymask-v3-demo-test
Some checks failed
Mobymask Demo Build / Run mobymask-v3-demo test suite (push) Has been cancelled
Some checks failed
Mobymask Demo Build / Run mobymask-v3-demo test suite (push) Has been cancelled
This commit is contained in:
commit
1345dbc4cf
@ -25,13 +25,13 @@ import sys
|
|||||||
from decouple import config
|
from decouple import config
|
||||||
import subprocess
|
import subprocess
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external, error_exit, warn_exit
|
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit
|
||||||
from stack_orchestrator.base import get_npm_registry_url
|
from stack_orchestrator.base import get_npm_registry_url
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
from stack_orchestrator.build.build_types import BuildContext
|
||||||
from stack_orchestrator.build.publish import publish_image
|
from stack_orchestrator.build.publish import publish_image
|
||||||
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
@ -149,24 +149,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
if not image_registry:
|
if not image_registry:
|
||||||
error_exit("--image-registry must be supplied with --publish-images")
|
error_exit("--image-registry must be supplied with --publish-images")
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
from stack_orchestrator import data
|
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
all_containers = container_list_file.read().splitlines()
|
|
||||||
|
|
||||||
containers_in_scope = []
|
|
||||||
if stack:
|
|
||||||
stack_config = get_parsed_stack_config(stack)
|
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
|
||||||
containers_in_scope = stack_config['containers']
|
|
||||||
else:
|
|
||||||
containers_in_scope = all_containers
|
|
||||||
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f'Containers: {containers_in_scope}')
|
|
||||||
if stack:
|
|
||||||
print(f"Stack: {stack}")
|
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(dev_root_path,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
|
43
stack_orchestrator/build/build_util.py
Normal file
43
stack_orchestrator/build/build_util.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright © 2024 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import importlib.resources
|
||||||
|
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
||||||
|
|
||||||
|
|
||||||
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
|
all_containers = container_list_file.read().splitlines()
|
||||||
|
|
||||||
|
containers_in_scope = []
|
||||||
|
if stack:
|
||||||
|
stack_config = get_parsed_stack_config(stack)
|
||||||
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
|
containers_in_scope = stack_config['containers']
|
||||||
|
else:
|
||||||
|
containers_in_scope = all_containers
|
||||||
|
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f'Containers: {containers_in_scope}')
|
||||||
|
if stack:
|
||||||
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
return containers_in_scope
|
@ -21,6 +21,8 @@
|
|||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
from decouple import config
|
from decouple import config
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -40,12 +42,9 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
'''build the specified webapp container'''
|
'''build the specified webapp container'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
|
||||||
dry_run = ctx.obj.dry_run
|
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
continue_on_error = ctx.obj.continue_on_error
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
@ -73,7 +72,10 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path,
|
dev_root_path,
|
||||||
)
|
)
|
||||||
build_containers.process_container(build_context_1)
|
ok = build_containers.process_container(build_context_1)
|
||||||
|
if not ok:
|
||||||
|
print("ERROR: Build failed.", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
@ -94,4 +96,7 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path,
|
dev_root_path,
|
||||||
)
|
)
|
||||||
build_containers.process_container(build_context_2)
|
ok = build_containers.process_container(build_context_2)
|
||||||
|
if not ok:
|
||||||
|
print("ERROR: Build failed.", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
180
stack_orchestrator/build/fetch_containers.py
Normal file
180
stack_orchestrator/build/fetch_containers.py
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
# Copyright © 2024 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import click
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import json
|
||||||
|
import platform
|
||||||
|
from python_on_whales import DockerClient
|
||||||
|
from python_on_whales.components.manifest.cli_wrapper import ManifestCLI, ManifestList
|
||||||
|
from python_on_whales.utils import run
|
||||||
|
import requests
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import include_exclude_check, error_exit
|
||||||
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
|
# Experimental fetch-container command
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RegistryInfo:
|
||||||
|
registry: str
|
||||||
|
registry_username: str
|
||||||
|
registry_token: str
|
||||||
|
|
||||||
|
|
||||||
|
# Extending this code to support the --verbose option, cnosider contributing upstream
|
||||||
|
# https://github.com/gabrieldemarmiesse/python-on-whales/blob/master/python_on_whales/components/manifest/cli_wrapper.py#L129
|
||||||
|
class ExtendedManifestCLI(ManifestCLI):
|
||||||
|
def inspect_verbose(self, x: str) -> ManifestList:
|
||||||
|
"""Returns a Docker manifest list object."""
|
||||||
|
json_str = run(self.docker_cmd + ["manifest", "inspect", "--verbose", x])
|
||||||
|
return json.loads(json_str)
|
||||||
|
|
||||||
|
|
||||||
|
def _local_tag_for(container: str):
|
||||||
|
return f"{container}:local"
|
||||||
|
|
||||||
|
|
||||||
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
|
# Emulate this:
|
||||||
|
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
|
registry_parts = registry_info.registry.split("/")
|
||||||
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Fetching tags from: {url}")
|
||||||
|
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
||||||
|
if response.status_code == 200:
|
||||||
|
tag_info = response.json()
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"container tags list: {tag_info}")
|
||||||
|
tags_array = tag_info["tags"]
|
||||||
|
return tags_array
|
||||||
|
else:
|
||||||
|
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
||||||
|
|
||||||
|
|
||||||
|
def _find_latest(candidate_tags: List[str]):
|
||||||
|
# Lex sort should give us the latest first
|
||||||
|
sorted_candidates = sorted(candidate_tags)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"sorted candidates: {sorted_candidates}")
|
||||||
|
return sorted_candidates[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _filter_for_platform(container: str,
|
||||||
|
registry_info: RegistryInfo,
|
||||||
|
tag_list: List[str]) -> List[str] :
|
||||||
|
filtered_tags = []
|
||||||
|
this_machine = platform.machine()
|
||||||
|
# Translate between Python platform names and docker
|
||||||
|
if this_machine == "x86_64":
|
||||||
|
this_machine = "amd64"
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Python says the architecture is: {this_machine}")
|
||||||
|
docker = DockerClient()
|
||||||
|
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
||||||
|
for tag in tag_list:
|
||||||
|
remote_tag = f"{registry_info.registry}/{container}:{tag}"
|
||||||
|
manifest_cmd = ExtendedManifestCLI(docker.client_config)
|
||||||
|
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"manifest: {manifest}")
|
||||||
|
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"image_architecture: {image_architecture}")
|
||||||
|
if this_machine == image_architecture:
|
||||||
|
filtered_tags.append(tag)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Tags filtered for platform: {filtered_tags}")
|
||||||
|
return filtered_tags
|
||||||
|
|
||||||
|
|
||||||
|
def _get_latest_image(container: str, registry_info: RegistryInfo):
|
||||||
|
all_tags = _get_tags_for_container(container, registry_info)
|
||||||
|
tags_for_platform = _filter_for_platform(container, registry_info, all_tags)
|
||||||
|
latest_tag = _find_latest(tags_for_platform)
|
||||||
|
return f"{container}:{latest_tag}"
|
||||||
|
|
||||||
|
|
||||||
|
def _fetch_image(tag: str, registry_info: RegistryInfo):
|
||||||
|
docker = DockerClient()
|
||||||
|
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
||||||
|
remote_tag = f"{registry_info.registry}/{tag}"
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Attempting to pull this image: {remote_tag}")
|
||||||
|
docker.image.pull(remote_tag)
|
||||||
|
|
||||||
|
|
||||||
|
def _exists_locally(container: str):
|
||||||
|
docker = DockerClient()
|
||||||
|
return docker.image.exists(_local_tag_for(container))
|
||||||
|
|
||||||
|
|
||||||
|
def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
||||||
|
docker = DockerClient()
|
||||||
|
docker.image.tag(f"{registry}/{remote_tag}", local_tag)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option('--include', help="only fetch these containers")
|
||||||
|
@click.option('--exclude', help="don\'t fetch these containers")
|
||||||
|
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
||||||
|
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
||||||
|
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
||||||
|
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
||||||
|
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
||||||
|
|
||||||
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
|
# Generate list of target containers
|
||||||
|
stack = ctx.obj.stack
|
||||||
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
for container in containers_in_scope:
|
||||||
|
local_tag = _local_tag_for(container)
|
||||||
|
if include_exclude_check(container, include, exclude):
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Processing: {container}")
|
||||||
|
# For each container, attempt to find the latest of a set of
|
||||||
|
# images with the correct name and platform in the specified registry
|
||||||
|
image_to_fetch = _get_latest_image(container, registry_info)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Fetching: {image_to_fetch}")
|
||||||
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
|
# Now check if the target container already exists exists locally already
|
||||||
|
if (_exists_locally(container)):
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Container image {container} already exists locally")
|
||||||
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
|
if (force_local_overwrite):
|
||||||
|
# In that case remove the existing :local tag
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Warning: overwriting local tag from this image: {container} because "
|
||||||
|
"--force-local-overwrite was specified")
|
||||||
|
else:
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Skipping local tagging for this image: {container} because that would "
|
||||||
|
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
||||||
|
# Tag the fetched image with the :local tag
|
||||||
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
|
else:
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f"Excluding: {container}")
|
@ -26,6 +26,8 @@ RUN \
|
|||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
|
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
|
||||||
# Install semver
|
# Install semver
|
||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
|
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
|
||||||
|
# Install pnpm
|
||||||
|
&& su ${USERNAME} -c "umask 0002 && npm install -g pnpm" \
|
||||||
&& npm cache clean --force > /dev/null 2>&1
|
&& npm cache clean --force > /dev/null 2>&1
|
||||||
|
|
||||||
# [Optional] Uncomment this section to install additional OS packages.
|
# [Optional] Uncomment this section to install additional OS packages.
|
||||||
@ -35,6 +37,9 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
|||||||
# [Optional] Uncomment if you want to install more global node modules
|
# [Optional] Uncomment if you want to install more global node modules
|
||||||
# RUN su node -c "npm install -g <your-package-list-here>"
|
# RUN su node -c "npm install -g <your-package-list-here>"
|
||||||
|
|
||||||
|
# We do this to get a yq binary from the published container, for the correct architecture we're building here
|
||||||
|
COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq
|
||||||
|
|
||||||
# Expose port for http
|
# Expose port for http
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
|
|
||||||
|
@ -10,10 +10,12 @@ TRG_DIR="${3:-.next-r}"
|
|||||||
|
|
||||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||||
if [ -f "yarn.lock" ]; then
|
if [ -f "pnpm-lock.yaml" ]; then
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=pnpm
|
||||||
else
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
|
else
|
||||||
|
CERC_BUILD_TOOL=npm
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -9,7 +9,9 @@ CERC_MIN_NEXTVER=13.4.2
|
|||||||
CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}"
|
CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}"
|
||||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||||
if [ -f "yarn.lock" ]; then
|
if [ -f "pnpm-lock.yaml" ]; then
|
||||||
|
CERC_BUILD_TOOL=pnpm
|
||||||
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
|
@ -16,7 +16,9 @@ trap ctrl_c INT
|
|||||||
|
|
||||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||||
if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then
|
if [ -f "pnpm-lock.yaml" ]; then
|
||||||
|
CERC_BUILD_TOOL=pnpm
|
||||||
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
|
@ -24,6 +24,10 @@ RUN \
|
|||||||
&& su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
|
&& su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
|
||||||
# Install eslint
|
# Install eslint
|
||||||
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
|
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
|
||||||
|
# Install semver
|
||||||
|
&& su ${USERNAME} -c "umask 0002 && npm install -g semver" \
|
||||||
|
# Install pnpm
|
||||||
|
&& su ${USERNAME} -c "umask 0002 && npm install -g pnpm" \
|
||||||
&& npm cache clean --force > /dev/null 2>&1
|
&& npm cache clean --force > /dev/null 2>&1
|
||||||
|
|
||||||
# [Optional] Uncomment this section to install additional OS packages.
|
# [Optional] Uncomment this section to install additional OS packages.
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
FROM cerc/webapp-base:local as builder
|
FROM cerc/webapp-base:local as builder
|
||||||
|
|
||||||
ARG CERC_BUILD_TOOL
|
ARG CERC_BUILD_TOOL
|
||||||
|
ARG CERC_BUILD_OUTPUT_DIR
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN rm -rf node_modules build .next*
|
RUN rm -rf node_modules build dist .next*
|
||||||
RUN /scripts/build-app.sh /app build /data
|
RUN /scripts/build-app.sh /app /data
|
||||||
|
|
||||||
FROM cerc/webapp-base:local
|
FROM cerc/webapp-base:local
|
||||||
COPY --from=builder /data /data
|
COPY --from=builder /data /data
|
||||||
|
@ -7,9 +7,10 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||||
|
CERC_BUILD_OUTPUT_DIR="${CERC_BUILD_OUTPUT_DIR}"
|
||||||
|
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
OUTPUT_DIR="${2:-build}"
|
DEST_DIR="${2:-/data}"
|
||||||
DEST_DIR="${3:-/data}"
|
|
||||||
|
|
||||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||||
@ -22,7 +23,9 @@ elif [ -f "${WORK_DIR}/package.json" ]; then
|
|||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||||
if [ -f "yarn.lock" ]; then
|
if [ -f "pnpm-lock.yaml" ]; then
|
||||||
|
CERC_BUILD_TOOL=pnpm
|
||||||
|
elif [ -f "yarn.lock" ]; then
|
||||||
CERC_BUILD_TOOL=yarn
|
CERC_BUILD_TOOL=yarn
|
||||||
else
|
else
|
||||||
CERC_BUILD_TOOL=npm
|
CERC_BUILD_TOOL=npm
|
||||||
@ -33,7 +36,17 @@ elif [ -f "${WORK_DIR}/package.json" ]; then
|
|||||||
$CERC_BUILD_TOOL build || exit 1
|
$CERC_BUILD_TOOL build || exit 1
|
||||||
|
|
||||||
rm -rf "${DEST_DIR}"
|
rm -rf "${DEST_DIR}"
|
||||||
mv "${WORK_DIR}/${OUTPUT_DIR}" "${DEST_DIR}"
|
if [ -z "${CERC_BUILD_OUTPUT_DIR}" ]; then
|
||||||
|
if [ -d "${WORK_DIR}/dist" ]; then
|
||||||
|
CERC_BUILD_OUTPUT_DIR="${WORK_DIR}/dist"
|
||||||
|
elif [ -d "${WORK_DIR}/build" ]; then
|
||||||
|
CERC_BUILD_OUTPUT_DIR="${WORK_DIR}/build"
|
||||||
|
else
|
||||||
|
echo "ERROR: Unable to locate build output. Set with --extra-build-args \"--build-arg CERC_BUILD_OUTPUT_DIR=path\"" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
mv "${CERC_BUILD_OUTPUT_DIR}" "${DEST_DIR}"
|
||||||
else
|
else
|
||||||
echo "Copying static app ..."
|
echo "Copying static app ..."
|
||||||
mv "${WORK_DIR}" "${DEST_DIR}"
|
mv "${WORK_DIR}" "${DEST_DIR}"
|
||||||
|
@ -3,13 +3,28 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
CERC_LISTEN_PORT=${CERC_LISTEN_PORT:-80}
|
||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
||||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||||
|
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
||||||
|
|
||||||
|
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
||||||
|
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ] && [ -d "${CERC_WEBAPP_FILES_DIR}/static" ]; then
|
||||||
|
CERC_SINGLE_PAGE_APP=true
|
||||||
|
else
|
||||||
|
CERC_SINGLE_PAGE_APP=false
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "true" == "$CERC_ENABLE_CORS" ]; then
|
if [ "true" == "$CERC_ENABLE_CORS" ]; then
|
||||||
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "true" == "$CERC_SINGLE_PAGE_APP" ]; then
|
||||||
|
# Create a catchall redirect back to /
|
||||||
|
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --proxy http://localhost:${CERC_LISTEN_PORT}?"
|
||||||
|
fi
|
||||||
|
|
||||||
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
||||||
if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||||
if [ -f "/config/laconic-hosted-config.yml" ]; then
|
if [ -f "/config/laconic-hosted-config.yml" ]; then
|
||||||
@ -20,8 +35,8 @@ if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||||
/scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE ${CERC_WEBAPP_FILES_DIR}
|
/scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE "${CERC_WEBAPP_FILES_DIR}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
||||||
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT:-80} ${CERC_WEBAPP_FILES_DIR}
|
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT} "${CERC_WEBAPP_FILES_DIR}"
|
@ -33,23 +33,29 @@ laconic-so --stack uniswap-urbit-app deploy init --output uniswap-urbit-app-spec
|
|||||||
|
|
||||||
### Ports
|
### Ports
|
||||||
|
|
||||||
Edit `network` in spec file to map container ports to same ports in host:
|
Edit `uniswap-urbit-app-spec.yml` such that it looks like:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
...
|
stack: uniswap-urbit-app
|
||||||
|
deploy-to: compose
|
||||||
network:
|
network:
|
||||||
ports:
|
ports:
|
||||||
urbit-fake-ship:
|
|
||||||
- '8080:80'
|
|
||||||
proxy-server:
|
proxy-server:
|
||||||
- '4000:4000'
|
- '4000:4000'
|
||||||
|
urbit-fake-ship:
|
||||||
|
- '8080:80'
|
||||||
ipfs:
|
ipfs:
|
||||||
- '8081:8080'
|
- '4001'
|
||||||
- '5001:5001'
|
- '8081:8080'
|
||||||
...
|
- 0.0.0.0:5001:5001
|
||||||
|
volumes:
|
||||||
|
urbit_app_builds: ./data/urbit_app_builds
|
||||||
|
urbit_data: ./data/urbit_data
|
||||||
|
ipfs-import: ./data/ipfs-import
|
||||||
|
ipfs-data: ./data/ipfs-data
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: Skip the `ipfs` ports if need to use an externally running IPFS node
|
Note: Skip the `ipfs` ports if using an externally running IPFS node, set via `config.env`, below.
|
||||||
|
|
||||||
### Data volumes
|
### Data volumes
|
||||||
|
|
||||||
|
@ -29,6 +29,29 @@ def _image_needs_pushed(image: str):
|
|||||||
return image.endswith(":local")
|
return image.endswith(":local")
|
||||||
|
|
||||||
|
|
||||||
|
def remote_image_exists(remote_repo_url: str, local_tag: str):
|
||||||
|
docker = DockerClient()
|
||||||
|
try:
|
||||||
|
remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
|
result = docker.manifest.inspect(remote_tag)
|
||||||
|
return True if result else False
|
||||||
|
except Exception: # noqa: E722
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
||||||
|
if not additional_tags:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not remote_image_exists(remote_repo_url, local_tag):
|
||||||
|
raise Exception(f"{local_tag} does not exist in {remote_repo_url}")
|
||||||
|
|
||||||
|
docker = DockerClient()
|
||||||
|
remote_tag = remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
|
new_remote_tags = [remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
||||||
|
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
||||||
|
|
||||||
|
|
||||||
def remote_tag_for_image(image: str, remote_repo_url: str):
|
def remote_tag_for_image(image: str, remote_repo_url: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
|
@ -24,6 +24,7 @@ import uuid
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.images import remote_image_exists, add_tags_to_image
|
||||||
from stack_orchestrator.deploy.webapp import deploy_webapp
|
from stack_orchestrator.deploy.webapp import deploy_webapp
|
||||||
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
||||||
build_container_image, push_container_image,
|
build_container_image, push_container_image,
|
||||||
@ -43,6 +44,7 @@ def process_app_deployment_request(
|
|||||||
deployment_parent_dir,
|
deployment_parent_dir,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
|
force_rebuild=False,
|
||||||
log_file=None
|
log_file=None
|
||||||
):
|
):
|
||||||
# 1. look up application
|
# 1. look up application
|
||||||
@ -91,7 +93,9 @@ def process_app_deployment_request(
|
|||||||
deployment_record = laconic.get_record(app_deployment_crn)
|
deployment_record = laconic.get_record(app_deployment_crn)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
||||||
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
||||||
|
# TODO: Is there any reason not to simplify the hash input to the app_deployment_crn?
|
||||||
deployment_container_tag = "laconic-webapp/%s:local" % hashlib.md5(deployment_dir.encode()).hexdigest()
|
deployment_container_tag = "laconic-webapp/%s:local" % hashlib.md5(deployment_dir.encode()).hexdigest()
|
||||||
|
app_image_shared_tag = f"laconic-webapp/{app.id}:local"
|
||||||
# b. check for deployment directory (create if necessary)
|
# b. check for deployment directory (create if necessary)
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
if deployment_record:
|
if deployment_record:
|
||||||
@ -106,11 +110,20 @@ def process_app_deployment_request(
|
|||||||
needs_k8s_deploy = False
|
needs_k8s_deploy = False
|
||||||
# 6. build container (if needed)
|
# 6. build container (if needed)
|
||||||
if not deployment_record or deployment_record.attributes.application != app.id:
|
if not deployment_record or deployment_record.attributes.application != app.id:
|
||||||
# TODO: pull from request
|
|
||||||
extra_build_args = []
|
|
||||||
build_container_image(app, deployment_container_tag, extra_build_args, log_file)
|
|
||||||
push_container_image(deployment_dir, log_file)
|
|
||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
|
# check if the image already exists
|
||||||
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
|
if shared_tag_exists and not force_rebuild:
|
||||||
|
# simply add our unique tag to the existing image and we are done
|
||||||
|
print(f"Using existing app image {app_image_shared_tag} for {deployment_container_tag}", file=log_file)
|
||||||
|
add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
||||||
|
else:
|
||||||
|
extra_build_args = [] # TODO: pull from request
|
||||||
|
build_container_image(app, deployment_container_tag, extra_build_args, log_file)
|
||||||
|
push_container_image(deployment_dir, log_file)
|
||||||
|
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||||
|
print(f"Updating app image tag {app_image_shared_tag} from build of {deployment_container_tag}", file=log_file)
|
||||||
|
add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||||
|
|
||||||
# 7. update config (if needed)
|
# 7. update config (if needed)
|
||||||
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
||||||
@ -171,12 +184,13 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
||||||
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
||||||
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
||||||
|
@click.option("--force-rebuild", help="Rebuild even if the image already exists.", is_flag=True)
|
||||||
@click.option("--log-dir", help="Output build/deployment logs to directory.", default=None)
|
@click.option("--log-dir", help="Output build/deployment logs to directory.", default=None)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
||||||
request_id, discover, state_file, only_update_state,
|
request_id, discover, state_file, only_update_state,
|
||||||
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
|
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
|
||||||
include_tags, exclude_tags, log_dir):
|
include_tags, exclude_tags, force_rebuild, log_dir):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
@ -306,6 +320,7 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
os.path.abspath(deployment_parent_dir),
|
os.path.abspath(deployment_parent_dir),
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
|
force_rebuild,
|
||||||
run_log_file
|
run_log_file
|
||||||
)
|
)
|
||||||
status = "DEPLOYED"
|
status = "DEPLOYED"
|
||||||
|
@ -17,7 +17,7 @@ import click
|
|||||||
|
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.command_types import CommandOptions
|
||||||
from stack_orchestrator.repos import setup_repositories
|
from stack_orchestrator.repos import setup_repositories
|
||||||
from stack_orchestrator.build import build_containers
|
from stack_orchestrator.build import build_containers, fetch_containers
|
||||||
from stack_orchestrator.build import build_npms
|
from stack_orchestrator.build import build_npms
|
||||||
from stack_orchestrator.build import build_webapp
|
from stack_orchestrator.build import build_webapp
|
||||||
from stack_orchestrator.deploy.webapp import (run_webapp,
|
from stack_orchestrator.deploy.webapp import (run_webapp,
|
||||||
@ -52,6 +52,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
|
|||||||
|
|
||||||
cli.add_command(setup_repositories.command, "setup-repositories")
|
cli.add_command(setup_repositories.command, "setup-repositories")
|
||||||
cli.add_command(build_containers.command, "build-containers")
|
cli.add_command(build_containers.command, "build-containers")
|
||||||
|
cli.add_command(fetch_containers.command, "fetch-containers")
|
||||||
cli.add_command(build_npms.command, "build-npms")
|
cli.add_command(build_npms.command, "build-npms")
|
||||||
cli.add_command(build_webapp.command, "build-webapp")
|
cli.add_command(build_webapp.command, "build-webapp")
|
||||||
cli.add_command(run_webapp.command, "run-webapp")
|
cli.add_command(run_webapp.command, "run-webapp")
|
||||||
|
Loading…
Reference in New Issue
Block a user