diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml
deleted file mode 100644
index 9fbf84b2..00000000
--- a/.gitea/workflows/test-webapp.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: Webapp Test
-
-on:
- pull_request:
- branches: '*'
- push:
- branches:
- - main
- - ci-test
- paths-ignore:
- - '.gitea/workflows/triggers/*'
-
-# Needed until we can incorporate docker startup into the executor container
-env:
- DOCKER_HOST: unix:///var/run/dind.sock
-
-jobs:
- test:
- name: "Run webapp test suite"
- runs-on: ubuntu-latest
- steps:
- - name: "Clone project repository"
- uses: actions/checkout@v3
- # At present the stock setup-python action fails on Linux/aarch64
- # Conditional steps below workaroud this by using deadsnakes for that case only
- - name: "Install Python for ARM on Linux"
- if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
- uses: deadsnakes/action@v3.0.1
- with:
- python-version: '3.8'
- - name: "Install Python cases other than ARM on Linux"
- if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- - name: "Print Python version"
- run: python3 --version
- - name: "Install shiv"
- run: pip install shiv
- - name: "Generate build version file"
- run: ./scripts/create_build_tag_file.sh
- - name: "Build local shiv package"
- run: ./scripts/build_shiv_package.sh
- - name: Start dockerd # Also needed until we can incorporate into the executor
- run: |
- dockerd -H $DOCKER_HOST --userland-proxy=false &
- sleep 5
- - name: "Run webapp tests"
- run: ./tests/webapp-test/run-webapp-test.sh
diff --git a/.github/workflows/test-webapp.yml b/.github/workflows/test-webapp.yml
deleted file mode 100644
index 3b920828..00000000
--- a/.github/workflows/test-webapp.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: Webapp Test
-
-on:
- pull_request:
- branches: '*'
- push:
- branches: '*'
-
-jobs:
- test:
- name: "Run webapp test suite"
- runs-on: ubuntu-latest
- steps:
- - name: "Clone project repository"
- uses: actions/checkout@v3
- - name: "Install Python"
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- - name: "Print Python version"
- run: python3 --version
- - name: "Install shiv"
- run: pip install shiv
- - name: "Generate build version file"
- run: ./scripts/create_build_tag_file.sh
- - name: "Build local shiv package"
- run: ./scripts/build_shiv_package.sh
- - name: "Run webapp tests"
- run: ./tests/webapp-test/run-webapp-test.sh
diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py
index 5b2748cc..c97a974f 100644
--- a/stack_orchestrator/build/build_containers.py
+++ b/stack_orchestrator/build/build_containers.py
@@ -33,73 +33,6 @@ from stack_orchestrator.base import get_npm_registry_url
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
-def make_container_build_env(dev_root_path: str,
- container_build_dir: str,
- debug: bool,
- force_rebuild: bool,
- extra_build_args: str):
- container_build_env = {
- "CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
- "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
- "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
- "CERC_REPO_BASE_DIR": dev_root_path,
- "CERC_CONTAINER_BASE_DIR": container_build_dir,
- "CERC_HOST_UID": f"{os.getuid()}",
- "CERC_HOST_GID": f"{os.getgid()}",
- "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
- }
- container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
- container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
- container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
- docker_host_env = os.getenv("DOCKER_HOST")
- if docker_host_env:
- container_build_env.update({"DOCKER_HOST": docker_host_env})
-
- return container_build_env
-
-
-def process_container(container,
- container_build_dir: str,
- container_build_env: dict,
- dev_root_path: str,
- quiet: bool,
- verbose: bool,
- dry_run: bool,
- continue_on_error: bool,
- ):
- if not quiet:
- print(f"Building: {container}")
- build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
- build_script_filename = os.path.join(build_dir, "build.sh")
- if verbose:
- print(f"Build script filename: {build_script_filename}")
- if os.path.exists(build_script_filename):
- build_command = build_script_filename
- else:
- if verbose:
- print(f"No script file found: {build_script_filename}, using default build script")
- repo_dir = container.split('/')[1]
- # TODO: make this less of a hack -- should be specified in some metadata somewhere
- # Check if we have a repo for this container. If not, set the context dir to the container-build subdir
- repo_full_path = os.path.join(dev_root_path, repo_dir)
- repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
- build_command = os.path.join(container_build_dir,
- "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
- if not dry_run:
- if verbose:
- print(f"Executing: {build_command} with environment: {container_build_env}")
- build_result = subprocess.run(build_command, shell=True, env=container_build_env)
- if verbose:
- print(f"Return code is: {build_result.returncode}")
- if build_result.returncode != 0:
- print(f"Error running build for {container}")
- if not continue_on_error:
- print("FATAL Error: container build failed and --continue-on-error not set, exiting")
- sys.exit(1)
- else:
- print("****** Container Build Error, continuing because --continue-on-error is set")
- else:
- print("Skipped")
@click.command()
@click.option('--include', help="only build these containers")
@@ -150,16 +83,61 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if stack:
print(f"Stack: {stack}")
- container_build_env = make_container_build_env(dev_root_path,
- container_build_dir,
- debug,
- force_rebuild,
- extra_build_args)
+ # TODO: make this configurable
+ container_build_env = {
+ "CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
+ "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
+ "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
+ "CERC_REPO_BASE_DIR": dev_root_path,
+ "CERC_CONTAINER_BASE_DIR": container_build_dir,
+ "CERC_HOST_UID": f"{os.getuid()}",
+ "CERC_HOST_GID": f"{os.getgid()}",
+ "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
+ }
+ container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
+ container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
+ container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
+ docker_host_env = os.getenv("DOCKER_HOST")
+ if docker_host_env:
+ container_build_env.update({"DOCKER_HOST": docker_host_env})
+
+ def process_container(container):
+ if not quiet:
+ print(f"Building: {container}")
+ build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
+ build_script_filename = os.path.join(build_dir, "build.sh")
+ if verbose:
+ print(f"Build script filename: {build_script_filename}")
+ if os.path.exists(build_script_filename):
+ build_command = build_script_filename
+ else:
+ if verbose:
+ print(f"No script file found: {build_script_filename}, using default build script")
+ repo_dir = container.split('/')[1]
+ # TODO: make this less of a hack -- should be specified in some metadata somewhere
+ # Check if we have a repo for this container. If not, set the context dir to the container-build subdir
+ repo_full_path = os.path.join(dev_root_path, repo_dir)
+ repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
+ build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
+ if not dry_run:
+ if verbose:
+ print(f"Executing: {build_command} with environment: {container_build_env}")
+ build_result = subprocess.run(build_command, shell=True, env=container_build_env)
+ if verbose:
+ print(f"Return code is: {build_result.returncode}")
+ if build_result.returncode != 0:
+ print(f"Error running build for {container}")
+ if not continue_on_error:
+ print("FATAL Error: container build failed and --continue-on-error not set, exiting")
+ sys.exit(1)
+ else:
+ print("****** Container Build Error, continuing because --continue-on-error is set")
+ else:
+ print("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
- process_container(container, container_build_dir, container_build_env,
- dev_root_path, quiet, verbose, dry_run, continue_on_error)
+ process_container(container)
else:
if verbose:
print(f"Excluding: {container}")
diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py
deleted file mode 100644
index f4668c5d..00000000
--- a/stack_orchestrator/build/build_webapp.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright © 2022, 2023 Vulcanize
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-# Builds webapp containers
-
-# env vars:
-# CERC_REPO_BASE_DIR defaults to ~/cerc
-
-# TODO: display the available list of containers; allow re-build of either all or specific containers
-
-import os
-from decouple import config
-import click
-from pathlib import Path
-from stack_orchestrator.build import build_containers
-
-
-@click.command()
-@click.option('--base-container', default="cerc/nextjs-base")
-@click.option('--source-repo', help="directory containing the webapp to build", required=True)
-@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
-@click.option("--extra-build-args", help="Supply extra arguments to build")
-@click.pass_context
-def command(ctx, base_container, source_repo, force_rebuild, extra_build_args):
- '''build the specified webapp container'''
-
- quiet = ctx.obj.quiet
- verbose = ctx.obj.verbose
- dry_run = ctx.obj.dry_run
- debug = ctx.obj.debug
- local_stack = ctx.obj.local_stack
- stack = ctx.obj.stack
- continue_on_error = ctx.obj.continue_on_error
-
- # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
- container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
-
- if local_stack:
- dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
- print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
- else:
- dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
-
- if not quiet:
- print(f'Dev Root is: {dev_root_path}')
-
- # First build the base container.
- container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
- force_rebuild, extra_build_args)
-
- build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
- verbose, dry_run, continue_on_error)
-
-
- # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
- container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
- container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
- base_container.replace("/", "-"),
- "Dockerfile.webapp")
- webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
- container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
-
- build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
- verbose, dry_run, continue_on_error)
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile
deleted file mode 100644
index 147cec29..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile
+++ /dev/null
@@ -1,55 +0,0 @@
-# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
-# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
-ARG VARIANT=18-bullseye
-FROM node:${VARIANT}
-
-ARG USERNAME=node
-ARG NPM_GLOBAL=/usr/local/share/npm-global
-
-# Add NPM global to PATH.
-ENV PATH=${NPM_GLOBAL}/bin:${PATH}
-# Prevents npm from printing version warnings
-ENV NPM_CONFIG_UPDATE_NOTIFIER=false
-
-RUN \
- # Configure global npm install location, use group to adapt to UID/GID changes
- if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \
- && usermod -a -G npm ${USERNAME} \
- && umask 0002 \
- && mkdir -p ${NPM_GLOBAL} \
- && touch /usr/local/etc/npmrc \
- && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \
- && chmod g+s ${NPM_GLOBAL} \
- && npm config -g set prefix ${NPM_GLOBAL} \
- && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
- # Install eslint
- && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
- && npm cache clean --force > /dev/null 2>&1
-
-# [Optional] Uncomment this section to install additional OS packages.
-RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
- && apt-get -y install --no-install-recommends jq gettext-base
-
-# [Optional] Uncomment if you want to install an additional version of node using nvm
-# ARG EXTRA_NODE_VERSION=10
-# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}"
-
-# We do this to get a yq binary from the published container, for the correct architecture we're building here
-# COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq
-
-COPY /scripts /scripts
-
-# [Optional] Uncomment if you want to install more global node modules
-# RUN su node -c "npm install -g "
-
-# RUN mkdir -p /config
-# COPY ./config.yml /config
-
-# Install simple web server for now (use nginx perhaps later)
-# RUN yarn global add http-server
-
-# Expose port for http
-EXPOSE 3000
-
-# Default command sleeps forever so docker doesn't kill it
-CMD ["/scripts/start-serving-app.sh"]
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp
deleted file mode 100644
index f4b5d4d8..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM cerc/nextjs-base:local
-WORKDIR /app
-COPY . .
-RUN rm -rf node_modules build .next*
-RUN /scripts/build-app.sh /app
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh
deleted file mode 100755
index 3cf5f7f4..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-# Build cerc/laconic-registry-cli
-
-source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
-
-# See: https://stackoverflow.com/a/246128/1701505
-SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-
-CERC_CONTAINER_BUILD_WORK_DIR=${CERC_CONTAINER_BUILD_WORK_DIR:-$SCRIPT_DIR}
-CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/Dockerfile}
-CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local}
-
-docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh
deleted file mode 100755
index ba1cd17d..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-
-WORK_DIR="${1:-./}"
-SRC_DIR="${2:-.next}"
-TRG_DIR="${3:-.next-r}"
-
-cd "${WORK_DIR}" || exit 1
-
-rm -rf "$TRG_DIR"
-mkdir -p "$TRG_DIR"
-cp -rp "$SRC_DIR" "$TRG_DIR/"
-
-if [ -f ".env" ]; then
- TMP_ENV=`mktemp`
- declare -px > $TMP_ENV
- set -a
- source .env
- source $TMP_ENV
- set +a
- rm -f $TMP_ENV
-fi
-
-for f in $(find "$TRG_DIR" -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do
- for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o '^"CERC_RUNTIME_ENV[^\"]+"$'); do
- orig_name=$(echo -n "${e}" | sed 's/"//g')
- cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g')
- cur_val=$(echo -n "\$${cur_name}" | envsubst)
- esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val")
- echo "$cur_name=$cur_val"
- sed -i "s/$orig_name/$esc_val/g" $f
- done
-done
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh
deleted file mode 100755
index 9277abc6..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-
-SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-
-WORK_DIR="${1:-/app}"
-
-cd "${WORK_DIR}" || exit 1
-
-cp next.config.js next.config.dist
-
-npm i -g js-beautify
-js-beautify next.config.dist > next.config.js
-
-npm install
-
-CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }')
-MOD_EXPORTS_LINE=$(grep -n 'module.exports' next.config.js | cut -d':' -f1)
-
-head -$(( ${MOD_EXPORTS_LINE} - 1 )) next.config.js > next.config.js.1
-
-cat > next.config.js.2 < {
- a[v] = \`"CERC_RUNTIME_ENV_\${v.split(/\./).pop()}"\`;
- return a;
- }, {});
-} catch {
- // If .env-list.json cannot be loaded, we are probably running in dev mode, so use process.env instead.
- envMap = Object.keys(process.env).reduce((a, v) => {
- if (v.startsWith('CERC_')) {
- a[\`process.env.\${v}\`] = JSON.stringify(process.env[v]);
- }
- return a;
- }, {});
-}
-EOF
-
-grep 'module.exports' next.config.js > next.config.js.3
-
-cat > next.config.js.4 < {
- config.plugins.push(new webpack.DefinePlugin(envMap));
- return config;
- },
-EOF
-
-tail -$(( ${CONFIG_LINES} - ${MOD_EXPORTS_LINE} + 1 )) next.config.js | grep -v 'process\.env\.' > next.config.js.5
-
-cat next.config.js.* | js-beautify > next.config.js
-rm next.config.js.*
-
-"${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json
-
-npm run build
-rm .env-list.json
\ No newline at end of file
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh
deleted file mode 100755
index 0c0e87c9..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-
-WORK_DIR="${1:-./}"
-TMPF=$(mktemp)
-
-cd "$WORK_DIR" || exit 1
-
-for d in $(find . -maxdepth 1 -type d | grep -v '\./\.' | grep '/' | cut -d'/' -f2); do
- egrep "/$d[/$]?" .gitignore >/dev/null 2>/dev/null
- if [ $? -eq 0 ]; then
- continue
- fi
-
- for f in $(find "$d" -regex ".*.[tj]sx?$" -type f); do
- cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF
- done
-done
-
-cat $TMPF | sort -u | jq --raw-input . | jq --slurp .
-rm -f $TMPF
diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh
deleted file mode 100755
index abe72935..00000000
--- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-
-SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-
-CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
-cd "$CERC_WEBAPP_FILES_DIR"
-
-rm -rf .next-r
-"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
-npm start .next-r -p ${CERC_LISTEN_PORT:-3000}
diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py
index 1e5f5f81..79ab1482 100644
--- a/stack_orchestrator/deploy/compose/deploy_docker.py
+++ b/stack_orchestrator/deploy/compose/deploy_docker.py
@@ -21,7 +21,7 @@ from stack_orchestrator.deploy.deployer import Deployer, DeployerException, Depl
class DockerDeployer(Deployer):
name: str = "compose"
- def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
+ def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
compose_env_file=compose_env_file)
diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py
index 57fedebf..1c467067 100644
--- a/stack_orchestrator/deploy/deploy.py
+++ b/stack_orchestrator/deploy/deploy.py
@@ -28,6 +28,7 @@ from stack_orchestrator.util import include_exclude_check, get_parsed_stack_conf
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
+from stack_orchestrator.deploy.deployment_context import DeploymentContext
from stack_orchestrator.deploy.deployment_create import create as deployment_create
from stack_orchestrator.deploy.deployment_create import init as deployment_init
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
@@ -56,14 +57,17 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
if deploy_to is None:
deploy_to = "compose"
- ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to)
+ ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
# Subcommand is executed now, by the magic of click
-def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer):
+def create_deploy_context(
+ global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deployer):
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
+ deployment_dir = deployment_context.deployment_dir if deployment_context else None
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
- deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
+ deployer = getDeployer(deployer, deployment_dir, compose_files=cluster_context.compose_files,
+ compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(stack, cluster_context, deployer)
diff --git a/stack_orchestrator/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py
index b0c59380..fd14e90e 100644
--- a/stack_orchestrator/deploy/deploy_types.py
+++ b/stack_orchestrator/deploy/deploy_types.py
@@ -15,7 +15,6 @@
from typing import List
from dataclasses import dataclass
-from pathlib import Path
from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.deploy.deployer import Deployer
@@ -38,12 +37,6 @@ class DeployCommandContext:
deployer: Deployer
-@dataclass
-class DeploymentContext:
- deployment_dir: Path
- command_context: DeployCommandContext
-
-
@dataclass
class VolumeMapping:
host_path: str
diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py
index 262fa2dd..5d515418 100644
--- a/stack_orchestrator/deploy/deployer_factory.py
+++ b/stack_orchestrator/deploy/deployer_factory.py
@@ -26,10 +26,10 @@ def getDeployerConfigGenerator(type: str):
print(f"ERROR: deploy-to {type} is not valid")
-def getDeployer(type: str, compose_files, compose_project_name, compose_env_file):
+def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file):
if type == "compose" or type is None:
- return DockerDeployer(compose_files, compose_project_name, compose_env_file)
+ return DockerDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
elif type == "k8s":
- return K8sDeployer(compose_files, compose_project_name, compose_env_file)
+ return K8sDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file)
else:
print(f"ERROR: deploy-to {type} is not valid")
diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py
index c6656b01..e22d7dcc 100644
--- a/stack_orchestrator/deploy/deployment.py
+++ b/stack_orchestrator/deploy/deployment.py
@@ -18,34 +18,7 @@ from pathlib import Path
import sys
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
-from stack_orchestrator.deploy.stack import Stack
-from stack_orchestrator.deploy.spec import Spec
-
-
-class DeploymentContext:
- dir: Path
- spec: Spec
- stack: Stack
-
- def get_stack_file(self):
- return self.dir.joinpath("stack.yml")
-
- def get_spec_file(self):
- return self.dir.joinpath("spec.yml")
-
- def get_env_file(self):
- return self.dir.joinpath("config.env")
-
- # TODO: implement me
- def get_cluster_name(self):
- return None
-
- def init(self, dir):
- self.dir = dir
- self.stack = Stack()
- self.stack.init_from_file(self.get_stack_file())
- self.spec = Spec()
- self.spec.init_from_file(self.get_spec_file())
+from stack_orchestrator.deploy.deployment_context import DeploymentContext
@click.group()
@@ -77,7 +50,7 @@ def make_deploy_context(ctx):
stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
cluster_name = context.get_cluster_name()
- return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file,
+ return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file,
context.spec.obj["deploy-to"])
diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py
new file mode 100644
index 00000000..cd731394
--- /dev/null
+++ b/stack_orchestrator/deploy/deployment_context.py
@@ -0,0 +1,46 @@
+
+# Copyright © 2022, 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from pathlib import Path
+
+from stack_orchestrator.deploy.stack import Stack
+from stack_orchestrator.deploy.spec import Spec
+
+
+class DeploymentContext:
+ deployment_dir: Path
+ spec: Spec
+ stack: Stack
+
+ def get_stack_file(self):
+ return self.deployment_dir.joinpath("stack.yml")
+
+ def get_spec_file(self):
+ return self.deployment_dir.joinpath("spec.yml")
+
+ def get_env_file(self):
+ return self.deployment_dir.joinpath("config.env")
+
+ # TODO: implement me
+ def get_cluster_name(self):
+ return None
+
+ def init(self, dir):
+ self.deployment_dir = dir
+ self.spec = Spec()
+ self.spec.init_from_file(self.get_spec_file())
+ self.stack = Stack(self.spec.obj["stack"])
+ self.stack.init_from_file(self.get_stack_file())
diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py
index 8a2237a8..c00c0dc6 100644
--- a/stack_orchestrator/deploy/deployment_create.py
+++ b/stack_orchestrator/deploy/deployment_create.py
@@ -24,8 +24,9 @@ import sys
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
get_pod_script_paths, get_plugin_code_paths)
-from stack_orchestrator.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand
+from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
+from stack_orchestrator.deploy.deployment_context import DeploymentContext
def _make_default_deployment_dir():
@@ -108,8 +109,8 @@ def _fixup_pod_file(pod, spec, compose_dir):
pod["services"][container_name]["ports"] = container_ports
-def _commands_plugin_paths(ctx: DeployCommandContext):
- plugin_paths = get_plugin_code_paths(ctx.stack)
+def _commands_plugin_paths(stack_name: str):
+ plugin_paths = get_plugin_code_paths(stack_name)
ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths]
return ret
@@ -123,7 +124,7 @@ def call_stack_deploy_init(deploy_command_context):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
- python_file_paths = _commands_plugin_paths(deploy_command_context)
+ python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
ret = None
init_done = False
@@ -147,7 +148,7 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
- python_file_paths = _commands_plugin_paths(deploy_command_context)
+ python_file_paths = _commands_plugin_paths(deploy_command_context.stack)
for python_file_path in python_file_paths:
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
@@ -162,7 +163,7 @@ def call_stack_deploy_create(deployment_context, extra_args):
# Link with the python file in the stack
# Call a function in it
# If no function found, return None
- python_file_paths = _commands_plugin_paths(deployment_context.command_context)
+ python_file_paths = _commands_plugin_paths(deployment_context.stack.name)
for python_file_path in python_file_paths:
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
@@ -311,7 +312,7 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
- stack_name = parsed_spec['stack']
+ stack_name = parsed_spec["stack"]
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:
@@ -367,7 +368,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
# stack member here.
deployment_command_context = ctx.obj
deployment_command_context.stack = stack_name
- deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context)
+ deployment_context = DeploymentContext()
+ deployment_context.init(Path(deployment_dir))
# Call the deployer to generate any deployer-specific files (e.g. for kind)
deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"])
# TODO: make deployment_dir a Path above
diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py
index 5d785a01..deb0859d 100644
--- a/stack_orchestrator/deploy/k8s/cluster_info.py
+++ b/stack_orchestrator/deploy/k8s/cluster_info.py
@@ -18,7 +18,7 @@ from typing import Any, List, Set
from stack_orchestrator.opts import opts
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
-from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names
+from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path
class ClusterInfo:
@@ -50,11 +50,12 @@ class ClusterInfo:
print(f"Volumes: {volumes}")
for volume_name in volumes:
spec = client.V1PersistentVolumeClaimSpec(
- storage_class_name="standard",
access_modes=["ReadWriteOnce"],
+ storage_class_name="manual",
resources=client.V1ResourceRequirements(
requests={"storage": "2Gi"}
- )
+ ),
+ volume_name=volume_name
)
pvc = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(name=volume_name,
@@ -64,6 +65,24 @@ class ClusterInfo:
result.append(pvc)
return result
+ def get_pvs(self):
+ result = []
+ volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
+ for volume_name in volumes:
+ spec = client.V1PersistentVolumeSpec(
+ storage_class_name="manual",
+ access_modes=["ReadWriteOnce"],
+ capacity={"storage": "2Gi"},
+ host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name))
+ )
+ pv = client.V1PersistentVolume(
+ metadata=client.V1ObjectMeta(name=volume_name,
+ labels={"volume-label": volume_name}),
+ spec=spec,
+ )
+ result.append(pv)
+ return result
+
# to suit the deployment, and also annotate the container specs to point at said volumes
def get_deployment(self):
containers = []
diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py
index a5167185..5181e163 100644
--- a/stack_orchestrator/deploy/k8s/deploy_k8s.py
+++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py
@@ -30,12 +30,15 @@ class K8sDeployer(Deployer):
k8s_namespace: str = "default"
kind_cluster_name: str
cluster_info : ClusterInfo
+ deployment_dir: Path
- def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
+ def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None:
if (opts.o.debug):
+ print(f"Deployment dir: {deployment_dir}")
print(f"Compose files: {compose_files}")
print(f"Project name: {compose_project_name}")
print(f"Env file: {compose_env_file}")
+ self.deployment_dir = deployment_dir
self.kind_cluster_name = compose_project_name
self.cluster_info = ClusterInfo()
self.cluster_info.int_from_pod_files(compose_files)
@@ -47,16 +50,26 @@ class K8sDeployer(Deployer):
def up(self, detach, services):
# Create the kind cluster
- # HACK: pass in the config file path here
- create_cluster(self.kind_cluster_name, "./test-deployment-dir/kind-config.yml")
+ create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath("kind-config.yml"))
self.connect_api()
# Ensure the referenced containers are copied into kind
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
+
+ # Create the host-path-mounted PVs for this deployment
+ pvs = self.cluster_info.get_pvs()
+ for pv in pvs:
+ if opts.o.debug:
+ print(f"Sending this pv: {pv}")
+ pv_resp = self.core_api.create_persistent_volume(body=pv)
+ if opts.o.debug:
+ print("PVs created:")
+ print(f"{pv_resp}")
+
# Figure out the PVCs for this deployment
pvcs = self.cluster_info.get_pvcs()
for pvc in pvcs:
if opts.o.debug:
- print(f"Sending this: {pvc}")
+ print(f"Sending this pvc: {pvc}")
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
if opts.o.debug:
print("PVCs created:")
@@ -65,7 +78,7 @@ class K8sDeployer(Deployer):
deployment = self.cluster_info.get_deployment()
# Create the k8s objects
if opts.o.debug:
- print(f"Sending this: {deployment}")
+ print(f"Sending this deployment: {deployment}")
deployment_resp = self.apps_api.create_namespaced_deployment(
body=deployment, namespace=self.k8s_namespace
)
@@ -122,6 +135,8 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
# Check the file isn't already there
# Get the config file contents
content = generate_kind_config(deployment_dir)
+ if opts.o.debug:
+ print(f"kind config is: {content}")
config_file = deployment_dir.joinpath(self.config_file_name)
# Write the file
with open(config_file, "w") as output_file:
diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py
index 8536a521..ad48957b 100644
--- a/stack_orchestrator/deploy/k8s/helpers.py
+++ b/stack_orchestrator/deploy/k8s/helpers.py
@@ -14,6 +14,7 @@
# along with this program. If not, see .
from kubernetes import client
+import os
from pathlib import Path
import subprocess
from typing import Any, Set
@@ -73,6 +74,10 @@ def named_volumes_from_pod_files(parsed_pod_files):
return named_volumes
+def get_node_pv_mount_path(volume_name: str):
+ return f"/mnt/{volume_name}"
+
+
def volume_mounts_for_service(parsed_pod_files, service):
result = []
# Find the service
@@ -119,6 +124,14 @@ def _get_host_paths_for_volumes(parsed_pod_files):
return result
+def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path:
+ if os.path.isabs(data_mount_path):
+ return data_mount_path
+ else:
+ # Python Path voodo that looks pretty odd:
+ return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
+
+
def parsed_pod_files_map_from_file_names(pod_files):
parsed_pod_yaml_map : Any = {}
for pod_file in pod_files:
@@ -130,9 +143,12 @@ def parsed_pod_files_map_from_file_names(pod_files):
return parsed_pod_yaml_map
-def _generate_kind_mounts(parsed_pod_files):
+def _generate_kind_mounts(parsed_pod_files, deployment_dir):
volume_definitions = []
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
+ # Note these paths are relative to the location of the pod files (at present)
+ # So we need to fix up to make them correct and absolute because kind assumes
+ # relative to the cwd.
for pod in parsed_pod_files:
parsed_pod_file = parsed_pod_files[pod]
if "services" in parsed_pod_file:
@@ -145,7 +161,8 @@ def _generate_kind_mounts(parsed_pod_files):
# Looks like: test-data:/data
(volume_name, mount_path) = mount_string.split(":")
volume_definitions.append(
- f" - hostPath: {volume_host_path_map[volume_name]}\n containerPath: /var/local-path-provisioner"
+ f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
+ f" containerPath: {get_node_pv_mount_path(volume_name)}"
)
return (
"" if len(volume_definitions) == 0 else (
@@ -201,7 +218,7 @@ def generate_kind_config(deployment_dir: Path):
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
- mounts_yml = _generate_kind_mounts(parsed_pod_files_map)
+ mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir)
return (
"kind: Cluster\n"
"apiVersion: kind.x-k8s.io/v1alpha4\n"
diff --git a/stack_orchestrator/deploy/stack.py b/stack_orchestrator/deploy/stack.py
index e0d33851..1a493534 100644
--- a/stack_orchestrator/deploy/stack.py
+++ b/stack_orchestrator/deploy/stack.py
@@ -20,10 +20,11 @@ from stack_orchestrator.util import get_yaml
class Stack:
+ name: str
obj: typing.Any
- def __init__(self) -> None:
- pass
+ def __init__(self, name: str) -> None:
+ self.name = name
def init_from_file(self, file_path: Path):
with file_path:
diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py
index 0b0585e0..ca1914e6 100644
--- a/stack_orchestrator/main.py
+++ b/stack_orchestrator/main.py
@@ -19,7 +19,6 @@ from stack_orchestrator.command_types import CommandOptions
from stack_orchestrator.repos import setup_repositories
from stack_orchestrator.build import build_containers
from stack_orchestrator.build import build_npms
-from stack_orchestrator.build import build_webapp
from stack_orchestrator.deploy import deploy
from stack_orchestrator import version
from stack_orchestrator.deploy import deployment
@@ -49,7 +48,6 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
cli.add_command(setup_repositories.command, "setup-repositories")
cli.add_command(build_containers.command, "build-containers")
cli.add_command(build_npms.command, "build-npms")
-cli.add_command(build_webapp.command, "build-webapp")
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
cli.add_command(deploy.command, "deploy-system")
cli.add_command(deployment.command, "deployment")
diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh
deleted file mode 100755
index 71b4da16..00000000
--- a/tests/webapp-test/run-webapp-test.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-set -e
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-# Dump environment variables for debugging
-echo "Environment variables:"
-env
-# Test basic stack-orchestrator webapp
-echo "Running stack-orchestrator webapp test"
-# Bit of a hack, test the most recent package
-TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
-# Set a non-default repo dir
-export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
-echo "Testing this package: $TEST_TARGET_SO"
-echo "Test version command"
-reported_version_string=$( $TEST_TARGET_SO version )
-echo "Version reported is: ${reported_version_string}"
-echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
-rm -rf $CERC_REPO_BASE_DIR
-mkdir -p $CERC_REPO_BASE_DIR
-git clone https://git.vdb.to/cerc-io/test-progressive-web-app.git $CERC_REPO_BASE_DIR/test-progressive-web-app
-
-# Test webapp command execution
-$TEST_TARGET_SO build-webapp --source-repo $CERC_REPO_BASE_DIR/test-progressive-web-app
-
-UUID=`uuidgen`
-
-set +e
-
-CONTAINER_ID=$(docker run -p 3000:3000 -d cerc/test-progressive-web-app:local)
-sleep 3
-wget -O test.before -m http://localhost:3000
-
-docker remove -f $CONTAINER_ID
-
-CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$UUID -d cerc/test-progressive-web-app:local)
-sleep 3
-wget -O test.after -m http://localhost:3000
-
-docker remove -f $CONTAINER_ID
-
-echo "###########################################################################"
-echo ""
-
-grep "$UUID" test.before > /dev/null
-if [ $? -ne 1 ]; then
- echo "BEFORE: FAILED"
- exit 1
-else
- echo "BEFORE: PASSED"
-fi
-
-grep "$UUID" test.after > /dev/null
-if [ $? -ne 0 ]; then
- echo "AFTER: FAILED"
- exit 1
-else
- echo "AFTER: PASSED"
-fi
-
-exit 0