diff --git a/.gitea/workflows/lint.yml b/.gitea/workflows/lint.yml new file mode 100644 index 00000000..f9be1e6b --- /dev/null +++ b/.gitea/workflows/lint.yml @@ -0,0 +1,21 @@ +name: Lint Checks + +on: + pull_request: + branches: '*' + push: + branches: '*' + +jobs: + test: + name: "Run linter" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + - name: "Install Python" + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name : "Run flake8" + uses: py-actions/flake8@v2 diff --git a/.gitea/workflows/test-database.yml b/.gitea/workflows/test-database.yml new file mode 100644 index 00000000..b925271b --- /dev/null +++ b/.gitea/workflows/test-database.yml @@ -0,0 +1,52 @@ +name: Database Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.gitea/workflows/triggers/test-database' + - '.gitea/workflows/test-database.yml' + - 'tests/database/run-test.sh' + schedule: # Note: coordinate with other tests to not overload runners at the same time of day + - cron: '5 18 * * *' + +jobs: + test: + name: "Run database hosting test on kind/k8s" + runs-on: ubuntu-22.04 + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Check cgroups version" + run: mount | grep cgroup + - name: "Install kind" + run: ./tests/scripts/install-kind.sh + - name: "Install Kubectl" + run: ./tests/scripts/install-kubectl.sh + - name: "Run database deployment test" + run: | + source /opt/bash-utils/cgroup-helper.sh + join_cgroup + ./tests/database/run-test.sh + diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml index 92c76a7d..7df39132 100644 --- a/.gitea/workflows/test-k8s-deploy.yml +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -1,6 +1,8 @@ name: K8s Deploy Test on: + pull_request: + branches: '*' push: branches: '*' paths: @@ -8,6 +10,8 @@ on: - '.gitea/workflows/triggers/test-k8s-deploy' - '.gitea/workflows/test-k8s-deploy.yml' - 'tests/k8s-deploy/run-deploy-test.sh' + schedule: # Note: coordinate with other tests to not overload runners at the same time of day + - cron: '3 15 * * *' jobs: test: diff --git a/.gitea/workflows/triggers/test-database b/.gitea/workflows/triggers/test-database new file mode 100644 index 00000000..2d1d61d2 --- /dev/null +++ b/.gitea/workflows/triggers/test-database @@ -0,0 +1,2 @@ +Change this file to trigger running the test-database CI job +Trigger test run \ No newline at end of file diff --git a/.gitea/workflows/triggers/test-k8s-deploy b/.gitea/workflows/triggers/test-k8s-deploy index 519ff4ac..f3cf0624 100644 --- a/.gitea/workflows/triggers/test-k8s-deploy +++ b/.gitea/workflows/triggers/test-k8s-deploy @@ -1 +1,2 @@ Change this file to trigger running the test-k8s-deploy CI job +Trigger test on PR branch diff --git a/README.md b/README.md index aa979e3a..ef504295 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory. -Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable: +Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable: ```bash -curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so ``` Give it execute permissions: @@ -52,7 +52,7 @@ Version: 1.1.0-7a607c2-202304260513 Save the distribution url to `~/.laconic-so/config.yml`: ```bash mkdir ~/.laconic-so -echo "distribution-url: https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so" > ~/.laconic-so/config.yml +echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml ``` ### Update diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 39b294fb..375a2d05 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -26,7 +26,7 @@ In addition to the pre-requisites listed in the [README](/README.md), the follow 1. Clone this repository: ``` - $ git clone https://github.com/cerc-io/stack-orchestrator.git + $ git clone https://git.vdb.to/cerc-io/stack-orchestrator.git ``` 2. Enter the project directory: diff --git a/docs/adding-a-new-stack.md b/docs/adding-a-new-stack.md index 2b2d1a65..1e42420e 100644 --- a/docs/adding-a-new-stack.md +++ b/docs/adding-a-new-stack.md @@ -1,10 +1,10 @@ # Adding a new stack -See [this PR](https://github.com/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://github.com/cerc-io/stack-orchestrator/pull/435) is another good example. +See [this PR](https://git.vdb.to/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://git.vdb.to/cerc-io/stack-orchestrator/pull/435) is another good example. For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done. -Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://github.com/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack. +Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack. ## Example diff --git a/docs/spec.md b/docs/spec.md index 1dc9ac62..aa09274c 100644 --- a/docs/spec.md +++ b/docs/spec.md @@ -1,6 +1,6 @@ # Specification -Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://github.com/cerc-io/stack-orchestrator/issues/315). +Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315). ## Implementation diff --git a/requirements.txt b/requirements.txt index bbf97b4a..f6e3d07c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ pydantic==1.10.9 tomli==2.0.1 validators==0.22.0 kubernetes>=28.1.0 +humanfriendly>=10.0 diff --git a/scripts/cloud-init-dev-mode-install.yaml b/scripts/cloud-init-dev-mode-install.yaml index 965afe3a..7f9d9bd8 100644 --- a/scripts/cloud-init-dev-mode-install.yaml +++ b/scripts/cloud-init-dev-mode-install.yaml @@ -41,4 +41,4 @@ runcmd: - apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - systemctl enable docker - systemctl start docker - - git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator + - git clone https://git.vdb.to/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator diff --git a/scripts/cloud-init-user-mode-install.yaml b/scripts/cloud-init-user-mode-install.yaml index bd02416c..f98f21bc 100644 --- a/scripts/cloud-init-user-mode-install.yaml +++ b/scripts/cloud-init-user-mode-install.yaml @@ -31,5 +31,5 @@ runcmd: - apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - systemctl enable docker - systemctl start docker - - curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so + - curl -L -o /usr/local/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so - chmod +x /usr/local/bin/laconic-so diff --git a/scripts/quick-deploy-test.sh b/scripts/quick-deploy-test.sh new file mode 100755 index 00000000..597eb6ac --- /dev/null +++ b/scripts/quick-deploy-test.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Beginnings of a script to quickly spin up and test a deployment +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi +if [[ -n "$1" ]]; then + stack_name=$1 +else + stack_name="test" +fi +spec_file_name="${stack_name}-spec.yml" +deployment_dir_name="${stack_name}-deployment" +rm -f ${spec_file_name} +rm -rf ${deployment_dir_name} +laconic-so --stack ${stack_name} deploy --deploy-to k8s-kind init --output ${spec_file_name} +laconic-so --stack ${stack_name} deploy --deploy-to k8s-kind create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name} +#laconic-so deployment --dir ${deployment_dir_name} start +#laconic-so deployment --dir ${deployment_dir_name} ps +#laconic-so deployment --dir ${deployment_dir_name} stop diff --git a/scripts/quick-install-linux.sh b/scripts/quick-install-linux.sh index 5670a403..b8642416 100755 --- a/scripts/quick-install-linux.sh +++ b/scripts/quick-install-linux.sh @@ -137,7 +137,7 @@ fi echo "**************************************************************************************" echo "Installing laconic-so" # install latest `laconic-so` -distribution_url=https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +distribution_url=https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so install_filename=${install_dir}/laconic-so mkdir -p ${install_dir} curl -L -o ${install_filename} ${distribution_url} diff --git a/setup.py b/setup.py index d89dfc4d..773451f5 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ setup( description='Orchestrates deployment of the Laconic stack', long_description=long_description, long_description_content_type="text/markdown", - url='https://github.com/cerc-io/stack-orchestrator', + url='https://git.vdb.to/cerc-io/stack-orchestrator', py_modules=['stack_orchestrator'], packages=find_packages(), install_requires=[requirements], diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py index e987c504..7b0957b0 100644 --- a/stack_orchestrator/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -33,6 +33,7 @@ from stack_orchestrator.base import get_npm_registry_url # TODO: find a place for this # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" + def make_container_build_env(dev_root_path: str, container_build_dir: str, debug: bool, @@ -104,6 +105,9 @@ def process_container(stack: str, build_command = os.path.join(container_build_dir, "default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}" if not dry_run: + # No PATH at all causes failures with podman. + if "PATH" not in container_build_env: + container_build_env["PATH"] = os.environ["PATH"] if verbose: print(f"Executing: {build_command} with environment: {container_build_env}") build_result = subprocess.run(build_command, shell=True, env=container_build_env) @@ -119,6 +123,7 @@ def process_container(stack: str, else: print("Skipped") + @click.command() @click.option('--include', help="only build these containers") @click.option('--exclude', help="don\'t build these containers") diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py index 287347eb..a9124590 100644 --- a/stack_orchestrator/build/build_webapp.py +++ b/stack_orchestrator/build/build_webapp.py @@ -25,10 +25,11 @@ from decouple import config import click from pathlib import Path from stack_orchestrator.build import build_containers +from stack_orchestrator.deploy.webapp.util import determine_base_container @click.command() -@click.option('--base-container', default="cerc/nextjs-base") +@click.option('--base-container') @click.option('--source-repo', help="directory containing the webapp to build", required=True) @click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") @click.option("--extra-build-args", help="Supply extra arguments to build") @@ -57,6 +58,9 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t if not quiet: print(f'Dev Root is: {dev_root_path}') + if not base_container: + base_container = determine_base_container(source_repo) + # First build the base container. container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args) @@ -64,13 +68,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, verbose, dry_run, continue_on_error) - # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir. container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true" container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo) container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, - base_container.replace("/", "-"), - "Dockerfile.webapp") + base_container.replace("/", "-"), + "Dockerfile.webapp") if not tag: webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index 596b0c1b..bb809404 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -27,6 +27,12 @@ kube_config_key = "kube-config" deploy_to_key = "deploy-to" network_key = "network" http_proxy_key = "http-proxy" -image_resigtry_key = "image-registry" +image_registry_key = "image-registry" +configmaps_key = "configmaps" +resources_key = "resources" +volumes_key = "volumes" +security_key = "security" +annotations_key = "annotations" +labels_key = "labels" kind_config_filename = "kind-config.yml" kube_config_filename = "kubeconfig.yml" diff --git a/stack_orchestrator/data/compose/docker-compose-mars.yml b/stack_orchestrator/data/compose/docker-compose-mars.yml new file mode 100644 index 00000000..193a90af --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-mars.yml @@ -0,0 +1,20 @@ +version: "3.2" + +services: + mars: + image: cerc/mars:local + restart: always + ports: + - "3000:3000" + environment: + - URL_OSMOSIS_GQL=https://osmosis-node.marsprotocol.io/GGSFGSFGFG34/osmosis-hive-front/graphql + - URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com + - URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com + - URL_NEUTRON_GQL=https://neutron.rpc.p2p.world/qgrnU6PsQZA8F9S5Fb8Fn3tV3kXmMBl2M9bcc9jWLjQy8p/hive/graphql + - URL_NEUTRON_REST=https://rest-kralum.neutron-1.neutron.org + - URL_NEUTRON_RPC=https://rpc-kralum.neutron-1.neutron.org + - URL_NEUTRON_TEST_GQL=https://testnet-neutron-gql.marsprotocol.io/graphql + - URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech + - URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech + - WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x + diff --git a/stack_orchestrator/data/compose/docker-compose-ping-pub.yml b/stack_orchestrator/data/compose/docker-compose-ping-pub.yml new file mode 100644 index 00000000..e2120bc2 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-ping-pub.yml @@ -0,0 +1,8 @@ +version: "3.2" + +services: + ping-pub: + image: cerc/ping-pub:local + restart: always + ports: + - "5173:5173" diff --git a/stack_orchestrator/data/compose/docker-compose-test-database.yml b/stack_orchestrator/data/compose/docker-compose-test-database.yml new file mode 100644 index 00000000..6b99cdab --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-test-database.yml @@ -0,0 +1,20 @@ +services: + + database: + image: cerc/test-database-container:local + restart: always + volumes: + - db-data:/var/lib/postgresql/data + environment: + POSTGRES_USER: "test-user" + POSTGRES_DB: "test-db" + POSTGRES_PASSWORD: "password" + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" + ports: + - "5432" + + test-client: + image: cerc/test-database-client:local + +volumes: + db-data: diff --git a/stack_orchestrator/data/compose/docker-compose-test.yml b/stack_orchestrator/data/compose/docker-compose-test.yml index 5fbf46d0..6ebbd714 100644 --- a/stack_orchestrator/data/compose/docker-compose-test.yml +++ b/stack_orchestrator/data/compose/docker-compose-test.yml @@ -5,10 +5,15 @@ services: environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED} + CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE" volumes: - - test-data:/data + - test-data-bind:/data + - test-data-auto:/data2 + - test-config:/config:ro ports: - "80" volumes: - test-data: + test-data-bind: + test-data-auto: + test-config: diff --git a/stack_orchestrator/data/compose/docker-compose-webapp-template.yml b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml index b8697afa..255ebf40 100644 --- a/stack_orchestrator/data/compose/docker-compose-webapp-template.yml +++ b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml @@ -5,4 +5,4 @@ services: environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} ports: - - "3000" + - "80" diff --git a/stack_orchestrator/data/container-build/cerc-mars/build.sh b/stack_orchestrator/data/container-build/cerc-mars/build.sh new file mode 100755 index 00000000..28ebf417 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-mars/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build the mars image +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/mars:local -f ${CERC_REPO_BASE_DIR}/mars-interface/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/mars-interface diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile index 8949c4e9..5f9548ee 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -30,13 +30,13 @@ RUN \ # [Optional] Uncomment this section to install additional OS packages. RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get -y install --no-install-recommends jq gettext-base + && apt-get -y install --no-install-recommends jq gettext-base procps # [Optional] Uncomment if you want to install more global node modules # RUN su node -c "npm install -g " # Expose port for http -EXPOSE 3000 +EXPOSE 80 COPY /scripts /scripts diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh index 3692f2f6..bd254572 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -58,4 +58,4 @@ if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then fi fi -$CERC_BUILD_TOOL start . -p ${CERC_LISTEN_PORT:-3000} +$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80} diff --git a/stack_orchestrator/data/container-build/cerc-ping-pub/build.sh b/stack_orchestrator/data/container-build/cerc-ping-pub/build.sh new file mode 100755 index 00000000..391525dd --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-ping-pub/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Build the ping pub image +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +docker build -t cerc/ping-pub:local ${build_command_args} -f $CERC_REPO_BASE_DIR/explorer/Dockerfile $CERC_REPO_BASE_DIR/explorer diff --git a/stack_orchestrator/data/container-build/cerc-test-container/run.sh b/stack_orchestrator/data/container-build/cerc-test-container/run.sh index da0af7d5..8a5a53a9 100755 --- a/stack_orchestrator/data/container-build/cerc-test-container/run.sh +++ b/stack_orchestrator/data/container-build/cerc-test-container/run.sh @@ -1,21 +1,58 @@ #!/usr/bin/env bash set -e + if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -# Test if the container's filesystem is old (run previously) or new -EXISTSFILENAME=/data/exists + echo "Test container starting" -if [[ -f "$EXISTSFILENAME" ]]; -then - TIMESTAMP=`cat $EXISTSFILENAME` - echo "Filesystem is old, created: $TIMESTAMP" + +DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }') +if [[ -n "$DATA_DEVICE" ]]; then + echo "/data: MOUNTED dev=${DATA_DEVICE}" else - echo "Filesystem is fresh" - echo `date` > $EXISTSFILENAME + echo "/data: not mounted" fi + +DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }') +if [[ -n "$DATA_DEVICE" ]]; then + echo "/data2: MOUNTED dev=${DATA2_DEVICE}" +else + echo "/data2: not mounted" +fi + +# Test if the container's filesystem is old (run previously) or new +for d in /data /data2; do + if [[ -f "$d/exists" ]]; + then + TIMESTAMP=`cat $d/exists` + echo "$d filesystem is old, created: $TIMESTAMP" + else + echo "$d filesystem is fresh" + echo `date` > $d/exists + fi +done + if [ -n "$CERC_TEST_PARAM_1" ]; then echo "Test-param-1: ${CERC_TEST_PARAM_1}" fi +if [ -n "$CERC_TEST_PARAM_2" ]; then + echo "Test-param-2: ${CERC_TEST_PARAM_2}" +fi + +if [ -d "/config" ]; then + echo "/config: EXISTS" + for f in /config/*; do + if [[ -f "$f" ]] || [[ -L "$f" ]]; then + echo "$f:" + cat "$f" + echo "" + echo "" + fi + done +else + echo "/config: does NOT EXIST" +fi + # Run nginx which will block here forever /usr/sbin/nginx -g "daemon off;" diff --git a/stack_orchestrator/data/container-build/cerc-test-database-client/Dockerfile b/stack_orchestrator/data/container-build/cerc-test-database-client/Dockerfile new file mode 100644 index 00000000..e2b3f7ad --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-test-database-client/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:latest + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \ + apt-get install -y software-properties-common && \ + apt-get install -y postgresql-client && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +EXPOSE 80 + +COPY run.sh /app/run.sh + +ENTRYPOINT ["/app/run.sh"] diff --git a/stack_orchestrator/data/container-build/cerc-test-database-client/build.sh b/stack_orchestrator/data/container-build/cerc-test-database-client/build.sh new file mode 100755 index 00000000..f9a9051f --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-test-database-client/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Build cerc/test-container +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR \ No newline at end of file diff --git a/stack_orchestrator/data/container-build/cerc-test-database-client/run.sh b/stack_orchestrator/data/container-build/cerc-test-database-client/run.sh new file mode 100755 index 00000000..b889d677 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-test-database-client/run.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# TODO derive this from config +database_url="postgresql://test-user:password@localhost:5432/test-db" +psql_command="psql ${database_url}" +program_name="Database test client:" + +wait_for_database_up () { + for i in {1..50} + do + ${psql_command} -c "select 1;" + psql_succeeded=$? + if [[ ${psql_succeeded} == 0 ]]; then + # if ready, return + echo "${program_name} database up" + return + else + # if not ready, wait + echo "${program_name} waiting for database: ${i}" + sleep 5 + fi + done + # Timed out, error exit + echo "${program_name} waiting for database: FAILED" + exit 1 +} + +# Used to synchronize with the test runner +notify_test_complete () { + echo "${program_name} test complete" +} + +does_test_data_exist () { + query_result=$(${psql_command} -t -c "select count(*) from test_table_1 where key_column = 'test_key_1';" | head -1 | tr -d ' ') + if [[ "${query_result}" == "1" ]]; then + return 0 + else + return 1 + fi +} + +create_test_data () { + ${psql_command} -c "create table test_table_1 (key_column text, value_column text, primary key(key_column));" + ${psql_command} -c "insert into test_table_1 values ('test_key_1', 'test_value_1');" +} + +wait_forever() { + # Loop to keep docker/k8s happy since this is the container entrypoint + while :; do sleep 600; done +} + +wait_for_database_up + +# Check if the test database content exists already +if does_test_data_exist; then + # If so, log saying so. Test harness will look for this log output + echo "${program_name} test data already exists" +else + # Otherwise log saying the content was not present + echo "${program_name} test data does not exist" + echo "${program_name} creating test data" + # then create it + create_test_data +fi + +notify_test_complete +wait_forever diff --git a/stack_orchestrator/data/container-build/cerc-test-database-container/Dockerfile b/stack_orchestrator/data/container-build/cerc-test-database-container/Dockerfile new file mode 100644 index 00000000..aae60175 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-test-database-container/Dockerfile @@ -0,0 +1,3 @@ +FROM postgres:16-bullseye + +EXPOSE 5432 diff --git a/stack_orchestrator/data/container-build/cerc-test-database-container/build.sh b/stack_orchestrator/data/container-build/cerc-test-database-container/build.sh new file mode 100755 index 00000000..a4515229 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-test-database-container/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Build cerc/test-container +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/test-database-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile index 275a5c3c..331e04a1 100644 --- a/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile @@ -1,6 +1,6 @@ # Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile # [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster -ARG VARIANT=18-bullseye +ARG VARIANT=20-bullseye FROM node:${VARIANT} ARG USERNAME=node @@ -28,7 +28,7 @@ RUN \ # [Optional] Uncomment this section to install additional OS packages. RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get -y install --no-install-recommends jq + && apt-get -y install --no-install-recommends jq gettext-base # [Optional] Uncomment if you want to install an additional version of node using nvm # ARG EXTRA_NODE_VERSION=10 @@ -37,9 +37,7 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ # We do this to get a yq binary from the published container, for the correct architecture we're building here COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq -RUN mkdir -p /scripts -COPY ./apply-webapp-config.sh /scripts -COPY ./start-serving-app.sh /scripts +COPY scripts /scripts # [Optional] Uncomment if you want to install more global node modules # RUN su node -c "npm install -g " diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile.webapp new file mode 100644 index 00000000..711eff25 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile.webapp @@ -0,0 +1,11 @@ +FROM cerc/webapp-base:local as builder + +ARG CERC_BUILD_TOOL + +WORKDIR /app +COPY . . +RUN rm -rf node_modules build .next* +RUN /scripts/build-app.sh /app build /data + +FROM cerc/webapp-base:local +COPY --from=builder /data /data diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh index 51712dc4..12b25e5e 100755 --- a/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh +++ b/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh @@ -1,9 +1,29 @@ #!/usr/bin/env bash -# Build cerc/laconic-registry-cli +# Build cerc/webapp-base source ${CERC_CONTAINER_BASE_DIR}/build-base.sh # See: https://stackoverflow.com/a/246128/1701505 SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -docker build -t cerc/webapp-base:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile ${SCRIPT_DIR} +CERC_CONTAINER_BUILD_WORK_DIR=${CERC_CONTAINER_BUILD_WORK_DIR:-$SCRIPT_DIR} +CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/Dockerfile} +CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/webapp-base:local} + +docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR + +if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/webapp-base:local" ]; then + cat < $TMP_ENV + set -a + source .env + source $TMP_ENV + set +a + rm -f $TMP_ENV +fi + +for f in $(find . -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do + for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},();]' '\n' | egrep -o -e '^"CERC_RUNTIME_ENV_[^\"]+"' -e '^"LACONIC_HOSTED_CONFIG_[^\"]+"'); do + orig_name=$(echo -n "${e}" | sed 's/"//g') + cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g') + cur_val=$(echo -n "\$${cur_name}" | envsubst) + if [ "$CERC_RETAIN_ENV_QUOTES" != "true" ]; then + cur_val=$(sed "s/^[\"']//" <<< "$cur_val" | sed "s/[\"']//") + fi + esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val") + echo "$f: $cur_name=$cur_val" + sed -i "s/$orig_name/$esc_val/g" $f + done +done diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/apply-webapp-config.sh similarity index 100% rename from stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/scripts/apply-webapp-config.sh diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/build-app.sh new file mode 100755 index 00000000..c102a054 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/build-app.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +WORK_DIR="${1:-/app}" +OUTPUT_DIR="${2:-build}" +DEST_DIR="${3:-/data}" + +if [ -f "${WORK_DIR}/package.json" ]; then + echo "Building node-based webapp ..." + cd "${WORK_DIR}" || exit 1 + + if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ]; then + CERC_BUILD_TOOL=yarn + else + CERC_BUILD_TOOL=npm + fi + fi + + $CERC_BUILD_TOOL install || exit 1 + $CERC_BUILD_TOOL build || exit 1 + + rm -rf "${DEST_DIR}" + mv "${WORK_DIR}/${OUTPUT_DIR}" "${DEST_DIR}" +else + echo "Copying static app ..." + mv "${WORK_DIR}" "${DEST_DIR}" +fi + +exit 0 diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh new file mode 100755 index 00000000..365d05fb --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-webapp-base/scripts/start-serving-app.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}" +CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}" + +if [ "true" == "$CERC_ENABLE_CORS" ]; then + CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors" +fi + +/scripts/apply-webapp-config.sh /config/config.yml ${CERC_WEBAPP_FILES_DIR} +/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR} +http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT:-80} ${CERC_WEBAPP_FILES_DIR} diff --git a/stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh deleted file mode 100755 index 69fa6c22..00000000 --- a/stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}" - -/scripts/apply-webapp-config.sh /config/config.yml ${CERC_WEBAPP_FILES_DIR} -http-server -p 80 ${CERC_WEBAPP_FILES_DIR} diff --git a/stack_orchestrator/data/container-build/cerc-webapp-deployer-backend/build.sh b/stack_orchestrator/data/container-build/cerc-webapp-deployer-backend/build.sh new file mode 100755 index 00000000..948701d6 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-webapp-deployer-backend/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/webapp-deployer-backend + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/webapp-deployer-backend:local ${build_command_args} ${CERC_REPO_BASE_DIR}/webapp-deployment-status-api diff --git a/stack_orchestrator/data/stacks/fixturenet-eth/README.md b/stack_orchestrator/data/stacks/fixturenet-eth/README.md index 853ab90c..6f1ec8e8 100644 --- a/stack_orchestrator/data/stacks/fixturenet-eth/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-eth/README.md @@ -1,6 +1,6 @@ # fixturenet-eth -Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator)): +Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://git.vdb.to/cerc-io/stack-orchestrator)): ## Clone required repositories diff --git a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md index c82e48ad..94f9eb36 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md @@ -7,11 +7,11 @@ Instructions for deploying a local Laconic blockchain "fixturenet" for developme **Note:** For building some NPMs, access to the @lirewine repositories is required. If you don't have access, see [this tutorial](/docs/laconicd-fixturenet.md) to run this stack ## 1. Install Laconic Stack Orchestrator -Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: +Installation is covered in detail [here](https://git.vdb.to/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: ``` $ mkdir my-working-dir $ cd my-working-dir -$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +$ curl -L -o ./laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so $ chmod +x ./laconic-so $ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory ``` diff --git a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md index 77a017a2..1b04b875 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md @@ -3,11 +3,11 @@ Instructions for deploying a local Laconic blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator. ## 1. Install Laconic Stack Orchestrator -Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: +Installation is covered in detail [here](https://git.vdb.to/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: ``` $ mkdir my-working-dir $ cd my-working-dir -$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +$ curl -L -o ./laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so $ chmod +x ./laconic-so $ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory ``` diff --git a/stack_orchestrator/data/stacks/mainnet-eth/README.md b/stack_orchestrator/data/stacks/mainnet-eth/README.md index 8f0dc1c4..2656b75b 100644 --- a/stack_orchestrator/data/stacks/mainnet-eth/README.md +++ b/stack_orchestrator/data/stacks/mainnet-eth/README.md @@ -18,7 +18,7 @@ $ laconic-so --stack mainnet-eth build-containers ``` $ laconic-so --stack mainnet-eth deploy init --map-ports-to-host any-same --output mainnet-eth-spec.yml -$ laconic-so deploy create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment +$ laconic-so deploy --stack mainnet-eth create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment ``` ## Start the stack ``` diff --git a/stack_orchestrator/data/stacks/mars/README.md b/stack_orchestrator/data/stacks/mars/README.md new file mode 100644 index 00000000..fb1eff41 --- /dev/null +++ b/stack_orchestrator/data/stacks/mars/README.md @@ -0,0 +1,16 @@ +# mars + +On a fresh Digital Ocean droplet with Ubuntu: + +``` +git clone https://github.com/cerc-io/stack-orchestrator +cd stack-orchestrator +./scripts/quick-install-linux.sh +``` +Read and follow the instructions output from the above output to complete installation, then: + +``` +laconic-so --stack mars setup-repositories +laconic-so --stack mars build-containers +laconic-so --stack mars deploy up +``` diff --git a/stack_orchestrator/data/stacks/mars/stack.yml b/stack_orchestrator/data/stacks/mars/stack.yml new file mode 100644 index 00000000..314afa36 --- /dev/null +++ b/stack_orchestrator/data/stacks/mars/stack.yml @@ -0,0 +1,8 @@ +version: "0.1" +name: mars +repos: + - github.com/cerc-io/mars-interface +containers: + - cerc/mars +pods: + - mars diff --git a/stack_orchestrator/data/stacks/mobymask/README.md b/stack_orchestrator/data/stacks/mobymask/README.md index ef222cce..92a21c9d 100644 --- a/stack_orchestrator/data/stacks/mobymask/README.md +++ b/stack_orchestrator/data/stacks/mobymask/README.md @@ -4,7 +4,7 @@ The MobyMask watcher is a Laconic Network component that provides efficient acce ## Deploy the MobyMask Watcher -The instructions below show how to deploy a MobyMask watcher using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator#install)). +The instructions below show how to deploy a MobyMask watcher using laconic-stack-orchestrator (the installation of which is covered [here](https://git.vdb.to/cerc-io/stack-orchestrator#install)). This deployment expects that ipld-eth-server's endpoints are available on the local machine at http://ipld-eth-server.example.com:8083/graphql and http://ipld-eth-server.example.com:8082. More advanced configurations are supported by modifying the watcher's [config file](../../config/watcher-mobymask/mobymask-watcher.toml). diff --git a/stack_orchestrator/data/stacks/ping-pub/README.md b/stack_orchestrator/data/stacks/ping-pub/README.md new file mode 100644 index 00000000..32378b6f --- /dev/null +++ b/stack_orchestrator/data/stacks/ping-pub/README.md @@ -0,0 +1,10 @@ +# ping-pub +Experimental block explorer for laconic + +``` +laconic-so --stack ping-pub setup-repositories +laconic-so --stack ping-pub build-containers +laconic-so --stack ping-pub deploy init --output ping-pub-spec.yml --map-ports-to-host localhost-same +laconic-so --stack ping-pub deploy create --spec-file ping-pub-spec.yml --deployment-dir pp-deployment +laconic-so deployment --dir pp-deployment start +``` diff --git a/stack_orchestrator/data/stacks/ping-pub/stack.yml b/stack_orchestrator/data/stacks/ping-pub/stack.yml new file mode 100644 index 00000000..f2d86d59 --- /dev/null +++ b/stack_orchestrator/data/stacks/ping-pub/stack.yml @@ -0,0 +1,9 @@ +version: "0.1" +name: ping-pub +repos: + # fork, but only for config & Dockerfile reasonsb + - github.com/LaconicNetwork/explorer@laconic +containers: + - cerc/ping-pub +pods: + - ping-pub diff --git a/stack_orchestrator/data/stacks/test-database/README.md b/stack_orchestrator/data/stacks/test-database/README.md new file mode 100644 index 00000000..1dcdcc7b --- /dev/null +++ b/stack_orchestrator/data/stacks/test-database/README.md @@ -0,0 +1,3 @@ +# Test Database Stack + +A stack with a database for test/demo purposes. \ No newline at end of file diff --git a/stack_orchestrator/data/stacks/test-database/stack.yml b/stack_orchestrator/data/stacks/test-database/stack.yml new file mode 100644 index 00000000..46fef720 --- /dev/null +++ b/stack_orchestrator/data/stacks/test-database/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: test +description: "A test database stack" +repos: +containers: + - cerc/test-database-container + - cerc/test-database-client +pods: + - test-database diff --git a/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml b/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml new file mode 100644 index 00000000..04000a1b --- /dev/null +++ b/stack_orchestrator/data/stacks/webapp-deployer-backend/stack.yml @@ -0,0 +1,11 @@ +version: "1.0" +name: webapp-deployer-backend +description: "Deployer for webapps" +repos: + - git.vdb.to/telackey/webapp-deployment-status-api +containers: + - cerc/webapp-deployer-backend +pods: + - name: webapp-deployer-backend + repository: git.vdb.to/telackey/webapp-deployment-status-api + path: ./ diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index b2622820..ffde91c2 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -17,6 +17,7 @@ from pathlib import Path from python_on_whales import DockerClient, DockerException from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.opts import opts class DockerDeployer(Deployer): @@ -29,60 +30,69 @@ class DockerDeployer(Deployer): self.type = type def up(self, detach, services): - try: - return self.docker.compose.up(detach=detach, services=services) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.up(detach=detach, services=services) + except DockerException as e: + raise DeployerException(e) def down(self, timeout, volumes): - try: - return self.docker.compose.down(timeout=timeout, volumes=volumes) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.down(timeout=timeout, volumes=volumes) + except DockerException as e: + raise DeployerException(e) def update(self): - try: - return self.docker.compose.restart() - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.restart() + except DockerException as e: + raise DeployerException(e) def status(self): - try: - for p in self.docker.compose.ps(): - print(f"{p.name}\t{p.state.status}") - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + for p in self.docker.compose.ps(): + print(f"{p.name}\t{p.state.status}") + except DockerException as e: + raise DeployerException(e) def ps(self): - try: - return self.docker.compose.ps() - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.ps() + except DockerException as e: + raise DeployerException(e) def port(self, service, private_port): - try: - return self.docker.compose.port(service=service, private_port=private_port) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.port(service=service, private_port=private_port) + except DockerException as e: + raise DeployerException(e) def execute(self, service, command, tty, envs): - try: - return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) + except DockerException as e: + raise DeployerException(e) def logs(self, services, tail, follow, stream): - try: - return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream) + except DockerException as e: + raise DeployerException(e) def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): - try: - return self.docker.run(image=image, command=command, user=user, volumes=volumes, - entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) - except DockerException as e: - raise DeployerException(e) + if not opts.o.dry_run: + try: + return self.docker.run(image=image, command=command, user=user, volumes=volumes, + entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) + except DockerException as e: + raise DeployerException(e) class DockerDeployerConfigGenerator(DeployerConfigGenerator): diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 18d27a21..29afcf13 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -85,54 +85,39 @@ def create_deploy_context( def up_operation(ctx, services_list, stay_attached=False): global_context = ctx.parent.parent.obj deploy_context = ctx.obj - if not global_context.dry_run: - cluster_context = deploy_context.cluster_context - container_exec_env = _make_runtime_env(global_context) - for attr, value in container_exec_env.items(): - os.environ[attr] = value - if global_context.verbose: - print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") - for pre_start_command in cluster_context.pre_start_commands: - _run_command(global_context, cluster_context.cluster, pre_start_command) - deploy_context.deployer.up(detach=not stay_attached, services=services_list) - for post_start_command in cluster_context.post_start_commands: - _run_command(global_context, cluster_context.cluster, post_start_command) - _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env) + cluster_context = deploy_context.cluster_context + container_exec_env = _make_runtime_env(global_context) + for attr, value in container_exec_env.items(): + os.environ[attr] = value + if global_context.verbose: + print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}") + for pre_start_command in cluster_context.pre_start_commands: + _run_command(global_context, cluster_context.cluster, pre_start_command) + deploy_context.deployer.up(detach=not stay_attached, services=services_list) + for post_start_command in cluster_context.post_start_commands: + _run_command(global_context, cluster_context.cluster, post_start_command) + _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env) def down_operation(ctx, delete_volumes, extra_args_list): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose down") - timeout_arg = None - if extra_args_list: - timeout_arg = extra_args_list[0] - # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully - ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) + timeout_arg = None + if extra_args_list: + timeout_arg = extra_args_list[0] + # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully + ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) def status_operation(ctx): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose status") - ctx.obj.deployer.status() + ctx.obj.deployer.status() def update_operation(ctx): - global_context = ctx.parent.parent.obj - if not global_context.dry_run: - if global_context.verbose: - print("Running compose update") - ctx.obj.deployer.update() + ctx.obj.deployer.update() def ps_operation(ctx): global_context = ctx.parent.parent.obj if not global_context.dry_run: - if global_context.verbose: - print("Running compose ps") container_list = ctx.obj.deployer.ps() if len(container_list) > 0: print("Running containers:") @@ -187,15 +172,11 @@ def exec_operation(ctx, extra_args): def logs_operation(ctx, tail: int, follow: bool, extra_args: str): - global_context = ctx.parent.parent.obj extra_args_list = list(extra_args) or None - if not global_context.dry_run: - if global_context.verbose: - print("Running compose logs") - services_list = extra_args_list if extra_args_list is not None else [] - logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True) - for stream_type, stream_content in logs_stream: - print(stream_content.decode("utf-8"), end="") + services_list = extra_args_list if extra_args_list is not None else [] + logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True) + for stream_type, stream_content in logs_stream: + print(stream_content.decode("utf-8"), end="") @command.command() @@ -347,8 +328,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): else: if deployment: compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml") - pod_pre_start_command = pod["pre_start_command"] - pod_post_start_command = pod["post_start_command"] + pod_pre_start_command = pod.get("pre_start_command") + pod_post_start_command = pod.get("post_start_command") script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts") if pod_pre_start_command is not None: pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command)) @@ -357,8 +338,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): else: pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml") - pod_pre_start_command = pod["pre_start_command"] - pod_post_start_command = pod["post_start_command"] + pod_pre_start_command = pod.get("pre_start_command") + pod_post_start_command = pod.get("post_start_command") if pod_pre_start_command is not None: pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command)) if pod_post_start_command is not None: @@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en tty=False, envs=container_exec_env) waiting_for_data = False - if ctx.debug: + if ctx.debug and not waiting_for_data: print(f"destination output: {destination_output}") diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py index 959c1b7a..2d01729e 100644 --- a/stack_orchestrator/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -18,11 +18,11 @@ from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerCon from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator -def getDeployerConfigGenerator(type: str): +def getDeployerConfigGenerator(type: str, deployment_context): if type == "compose" or type is None: return DockerDeployerConfigGenerator(type) elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: - return K8sDeployerConfigGenerator(type) + return K8sDeployerConfigGenerator(type, deployment_context) else: print(f"ERROR: deploy-to {type} is not valid") diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 138fc1bb..87b6800d 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -27,6 +27,7 @@ from stack_orchestrator.opts import opts from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file) +from stack_orchestrator.deploy.spec import Spec from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -54,19 +55,44 @@ def _get_ports(stack): def _get_named_volumes(stack): # Parse the compose files looking for named volumes - named_volumes = [] + named_volumes = { + "rw": [], + "ro": [] + } parsed_stack = get_parsed_stack_config(stack) pods = get_pod_list(parsed_stack) yaml = get_yaml() + + def find_vol_usage(parsed_pod_file, vol): + ret = {} + if "services" in parsed_pod_file: + for svc_name, svc in parsed_pod_file["services"].items(): + if "volumes" in svc: + for svc_volume in svc["volumes"]: + parts = svc_volume.split(":") + if parts[0] == vol: + ret[svc_name] = { + "volume": parts[0], + "mount": parts[1], + "options": parts[2] if len(parts) == 3 else None + } + return ret + for pod in pods: pod_file_path = get_pod_file_path(parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) if "volumes" in parsed_pod_file: volumes = parsed_pod_file["volumes"] for volume in volumes.keys(): - # Volume definition looks like: - # 'laconicd-data': None - named_volumes.append(volume) + for vu in find_vol_usage(parsed_pod_file, volume).values(): + read_only = vu["options"] == "ro" + if read_only: + if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]: + named_volumes["ro"].append(vu["volume"]) + else: + if vu["volume"] not in named_volumes["rw"]: + named_volumes["rw"].append(vu["volume"]) + return named_volumes @@ -86,6 +112,7 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir): # See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml def _fixup_pod_file(pod, spec, compose_dir): + deployment_type = spec[constants.deploy_to_key] # Fix up volumes if "volumes" in spec: spec_volumes = spec["volumes"] @@ -94,16 +121,35 @@ def _fixup_pod_file(pod, spec, compose_dir): for volume in pod_volumes.keys(): if volume in spec_volumes: volume_spec = spec_volumes[volume] - volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}" - _create_bind_dir_if_relative(volume, volume_spec, compose_dir) - new_volume_spec = {"driver": "local", - "driver_opts": { - "type": "none", - "device": volume_spec_fixedup, - "o": "bind" - } - } - pod["volumes"][volume] = new_volume_spec + if volume_spec: + volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}" + _create_bind_dir_if_relative(volume, volume_spec, compose_dir) + # this is Docker specific + if spec.is_docker_deployment(): + new_volume_spec = { + "driver": "local", + "driver_opts": { + "type": "none", + "device": volume_spec_fixedup, + "o": "bind" + } + } + pod["volumes"][volume] = new_volume_spec + + # Fix up configmaps + if constants.configmaps_key in spec: + if spec.is_kubernetes_deployment(): + spec_cfgmaps = spec[constants.configmaps_key] + if "volumes" in pod: + pod_volumes = pod[constants.volumes_key] + for volume in pod_volumes.keys(): + if volume in spec_cfgmaps: + volume_cfg = spec_cfgmaps[volume] + # Just make the dir (if necessary) + _create_bind_dir_if_relative(volume, volume_cfg, compose_dir) + else: + print(f"Warning: ConfigMaps not supported for {deployment_type}") + # Fix up ports if "network" in spec and "ports" in spec["network"]: spec_ports = spec["network"]["ports"] @@ -286,7 +332,7 @@ def init_operation(deploy_command_context, stack, deployer_type, config, if image_registry is None: error_exit("--image-registry must be supplied with --deploy-to k8s") spec_file_content.update({constants.kube_config_key: kube_config}) - spec_file_content.update({constants.image_resigtry_key: image_registry}) + spec_file_content.update({constants.image_registry_key: image_registry}) else: # Check for --kube-config supplied for non-relevant deployer types if kube_config is not None: @@ -319,9 +365,24 @@ def init_operation(deploy_command_context, stack, deployer_type, config, named_volumes = _get_named_volumes(stack) if named_volumes: volume_descriptors = {} - for named_volume in named_volumes: - volume_descriptors[named_volume] = f"./data/{named_volume}" - spec_file_content["volumes"] = volume_descriptors + configmap_descriptors = {} + for named_volume in named_volumes["rw"]: + if "k8s" in deployer_type: + volume_descriptors[named_volume] = None + else: + volume_descriptors[named_volume] = f"./data/{named_volume}" + for named_volume in named_volumes["ro"]: + if "k8s" in deployer_type: + if "config" in named_volume: + configmap_descriptors[named_volume] = f"./configmaps/{named_volume}" + else: + volume_descriptors[named_volume] = None + else: + volume_descriptors[named_volume] = f"./data/{named_volume}" + if volume_descriptors: + spec_file_content["volumes"] = volume_descriptors + if configmap_descriptors: + spec_file_content["configmaps"] = configmap_descriptors if opts.o.debug: print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") @@ -360,6 +421,17 @@ def _create_deployment_file(deployment_dir: Path): output_file.write(f"{constants.cluster_id_key}: {cluster}\n") +def _check_volume_definitions(spec): + if spec.is_kubernetes_deployment(): + for volume_name, volume_path in spec.get_volumes().items(): + if volume_path: + if not os.path.isabs(volume_path): + raise Exception( + f"Relative path {volume_path} for volume {volume_name} not " + f"supported for deployment type {spec.get_deployment_type()}" + ) + + @click.command() @click.option("--spec-file", required=True, help="Spec file to use to create this deployment") @click.option("--deployment-dir", help="Create deployment files in this directory") @@ -375,7 +447,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): # The init command's implementation is in a separate function so that we can # call it from other commands, bypassing the click decoration stuff def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers): - parsed_spec = get_parsed_deployment_spec(spec_file) + parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)) + _check_volume_definitions(parsed_spec) stack_name = parsed_spec["stack"] deployment_type = parsed_spec[constants.deploy_to_key] stack_file = get_stack_file_path(stack_name) @@ -441,7 +514,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw deployment_context = DeploymentContext() deployment_context.init(deployment_dir_path) # Call the deployer to generate any deployer-specific files (e.g. for kind) - deployer_config_generator = getDeployerConfigGenerator(deployment_type) + deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context) # TODO: make deployment_dir_path a Path above deployer_config_generator.generate(deployment_dir_path) call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context]) diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py index ddbb33f7..7ddcca33 100644 --- a/stack_orchestrator/deploy/images.py +++ b/stack_orchestrator/deploy/images.py @@ -31,7 +31,8 @@ def _image_needs_pushed(image: str): def remote_tag_for_image(image: str, remote_repo_url: str): # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy - (org, image_name_with_version) = image.split("/") + major_parts = image.split("/", 2) + image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0] (image_name, image_version) = image_name_with_version.split(":") if image_version == "local": return f"{remote_repo_url}/{image_name}:deploy" @@ -45,7 +46,7 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont cluster_context = command_context.cluster_context images: Set[str] = images_for_deployment(cluster_context.compose_files) # Tag the images for the remote repo - remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key] + remote_repo_url = deployment_context.spec.obj[constants.image_registry_key] docker = DockerClient() for image in images: if _image_needs_pushed(image): diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 85fd63a8..402ab42b 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -13,19 +13,50 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import os + from kubernetes import client from typing import Any, List, Set from stack_orchestrator.opts import opts from stack_orchestrator.util import env_var_map_from_file from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files -from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path -from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map +from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path +from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_types import DeployEnvVars -from stack_orchestrator.deploy.spec import Spec +from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits from stack_orchestrator.deploy.images import remote_tag_for_image +DEFAULT_VOLUME_RESOURCES = Resources({ + "reservations": {"storage": "2Gi"} +}) + +DEFAULT_CONTAINER_RESOURCES = Resources({ + "reservations": {"cpus": "0.1", "memory": "200M"}, + "limits": {"cpus": "1.0", "memory": "2000M"}, +}) + + +def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements: + def to_dict(limits: ResourceLimits): + if not limits: + return None + + ret = {} + if limits.cpus: + ret["cpu"] = str(limits.cpus) + if limits.memory: + ret["memory"] = f"{int(limits.memory / (1000 * 1000))}M" + if limits.storage: + ret["storage"] = f"{int(limits.storage / (1000 * 1000))}M" + return ret + + return client.V1ResourceRequirements( + requests=to_dict(resources.reservations), + limits=to_dict(resources.limits) + ) + class ClusterInfo: parsed_pod_yaml_map: Any @@ -112,9 +143,10 @@ class ClusterInfo: services = pod["services"] for service_name in services: service_info = services[service_name] - port = int(service_info["ports"][0]) - if opts.o.debug: - print(f"service port: {port}") + if "ports" in service_info: + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"service port: {port}") service = client.V1Service( metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), spec=client.V1ServiceSpec( @@ -130,39 +162,112 @@ class ClusterInfo: def get_pvcs(self): result = [] - volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + spec_volumes = self.spec.get_volumes() + named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + resources = self.spec.get_volume_resources() + if not resources: + resources = DEFAULT_VOLUME_RESOURCES if opts.o.debug: - print(f"Volumes: {volumes}") - for volume_name in volumes: + print(f"Spec Volumes: {spec_volumes}") + print(f"Named Volumes: {named_volumes}") + print(f"Resources: {resources}") + for volume_name, volume_path in spec_volumes.items(): + if volume_name not in named_volumes: + if opts.o.debug: + print(f"{volume_name} not in pod files") + continue + + labels = { + "app": self.app_name, + "volume-label": f"{self.app_name}-{volume_name}" + } + if volume_path: + storage_class_name = "manual" + k8s_volume_name = f"{self.app_name}-{volume_name}" + else: + # These will be auto-assigned. + storage_class_name = None + k8s_volume_name = None + spec = client.V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], - storage_class_name="manual", - resources=client.V1ResourceRequirements( - requests={"storage": "2Gi"} - ), - volume_name=volume_name + storage_class_name=storage_class_name, + resources=to_k8s_resource_requirements(resources), + volume_name=k8s_volume_name ) pvc = client.V1PersistentVolumeClaim( - metadata=client.V1ObjectMeta(name=volume_name, - labels={"volume-label": volume_name}), - spec=spec, + metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels), + spec=spec ) result.append(pvc) return result + def get_configmaps(self): + result = [] + spec_configmaps = self.spec.get_configmaps() + named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + for cfg_map_name, cfg_map_path in spec_configmaps.items(): + if cfg_map_name not in named_volumes: + if opts.o.debug: + print(f"{cfg_map_name} not in pod files") + continue + + if not cfg_map_path.startswith("/"): + cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path) + + # Read in all the files at a single-level of the directory. This mimics the behavior + # of `kubectl create configmap foo --from-file=/path/to/dir` + data = {} + for f in os.listdir(cfg_map_path): + full_path = os.path.join(cfg_map_path, f) + if os.path.isfile(full_path): + data[f] = open(full_path, 'rt').read() + + spec = client.V1ConfigMap( + metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}", + labels={"configmap-label": cfg_map_name}), + data=data + ) + result.append(spec) + return result + def get_pvs(self): result = [] - volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) - for volume_name in volumes: + spec_volumes = self.spec.get_volumes() + named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + resources = self.spec.get_volume_resources() + if not resources: + resources = DEFAULT_VOLUME_RESOURCES + for volume_name, volume_path in spec_volumes.items(): + # We only need to create a volume if it is fully qualified HostPath. + # Otherwise, we create the PVC and expect the node to allocate the volume for us. + if not volume_path: + if opts.o.debug: + print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.") + continue + + if volume_name not in named_volumes: + if opts.o.debug: + print(f"{volume_name} not in pod files") + continue + + if not os.path.isabs(volume_path): + print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.") + continue + + if self.spec.is_kind_deployment(): + host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name)) + else: + host_path = client.V1HostPathVolumeSource(path=volume_path) spec = client.V1PersistentVolumeSpec( storage_class_name="manual", access_modes=["ReadWriteOnce"], - capacity={"storage": "2Gi"}, - host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) + capacity=to_k8s_resource_requirements(resources).requests, + host_path=host_path ) pv = client.V1PersistentVolume( - metadata=client.V1ObjectMeta(name=volume_name, - labels={"volume-label": volume_name}), + metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", + labels={"volume-label": f"{self.app_name}-{volume_name}"}), spec=spec, ) result.append(pv) @@ -171,6 +276,9 @@ class ClusterInfo: # TODO: put things like image pull policy into an object-scope struct def get_deployment(self, image_pull_policy: str = None): containers = [] + resources = self.spec.get_container_resources() + if not resources: + resources = DEFAULT_CONTAINER_RESOURCES for pod_name in self.parsed_pod_yaml_map: pod = self.parsed_pod_yaml_map[pod_name] services = pod["services"] @@ -178,10 +286,18 @@ class ClusterInfo: container_name = service_name service_info = services[service_name] image = service_info["image"] - port = int(service_info["ports"][0]) + if "ports" in service_info: + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"image: {image}") + print(f"service port: {port}") + merged_envs = merge_envs( + envs_from_compose_file( + service_info["environment"]), self.environment_variables.map + ) if "environment" in service_info else self.environment_variables.map + envs = envs_from_environment_variables_map(merged_envs) if opts.o.debug: - print(f"image: {image}") - print(f"service port: {port}") + print(f"Merged envs: {envs}") # Re-write the image tag for remote deployment image_to_use = remote_tag_for_image( image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image @@ -190,19 +306,40 @@ class ClusterInfo: name=container_name, image=image_to_use, image_pull_policy=image_pull_policy, - env=envs_from_environment_variables_map(self.environment_variables.map), + env=envs, ports=[client.V1ContainerPort(container_port=port)], volume_mounts=volume_mounts, - resources=client.V1ResourceRequirements( - requests={"cpu": "100m", "memory": "200Mi"}, - limits={"cpu": "500m", "memory": "500Mi"}, + security_context=client.V1SecurityContext( + privileged=self.spec.get_privileged(), + capabilities=client.V1Capabilities( + add=self.spec.get_capabilities() + ) if self.spec.get_capabilities() else None ), + resources=to_k8s_resource_requirements(resources), ) containers.append(container) - volumes = volumes_for_pod_files(self.parsed_pod_yaml_map) + volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name) image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")] + + annotations = None + labels = {"app": self.app_name} + + if self.spec.get_annotations(): + annotations = {} + for key, value in self.spec.get_annotations().items(): + for service_name in services: + annotations[key.replace("{name}", service_name)] = value + + if self.spec.get_labels(): + for key, value in self.spec.get_labels().items(): + for service_name in services: + labels[key.replace("{name}", service_name)] = value + template = client.V1PodTemplateSpec( - metadata=client.V1ObjectMeta(labels={"app": self.app_name}), + metadata=client.V1ObjectMeta( + annotations=annotations, + labels=labels + ), spec=client.V1PodSpec(containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes), ) spec = client.V1DeploymentSpec( diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 0a339fe9..db806050 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -1,5 +1,4 @@ # Copyright © 2023 Vulcanize - # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or @@ -21,7 +20,8 @@ from kubernetes import client, config from stack_orchestrator import constants from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind -from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config +from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, containers_in_pod, log_stream_from_string +from stack_orchestrator.deploy.k8s.helpers import generate_kind_config from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo from stack_orchestrator.opts import opts from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -82,99 +82,172 @@ class K8sDeployer(Deployer): self.apps_api = client.AppsV1Api() self.custom_obj_api = client.CustomObjectsApi() - def up(self, detach, services): - - if self.is_kind(): - # Create the kind cluster - create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) - # Ensure the referenced containers are copied into kind - load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) - self.connect_api() - + def _create_volume_data(self): # Create the host-path-mounted PVs for this deployment pvs = self.cluster_info.get_pvs() for pv in pvs: if opts.o.debug: print(f"Sending this pv: {pv}") - pv_resp = self.core_api.create_persistent_volume(body=pv) - if opts.o.debug: - print("PVs created:") - print(f"{pv_resp}") + if not opts.o.dry_run: + try: + pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name) + if pv_resp: + if opts.o.debug: + print("PVs already present:") + print(f"{pv_resp}") + continue + except: # noqa: E722 + pass + + pv_resp = self.core_api.create_persistent_volume(body=pv) + if opts.o.debug: + print("PVs created:") + print(f"{pv_resp}") # Figure out the PVCs for this deployment pvcs = self.cluster_info.get_pvcs() for pvc in pvcs: if opts.o.debug: print(f"Sending this pvc: {pvc}") - pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) + + if not opts.o.dry_run: + try: + pvc_resp = self.core_api.read_namespaced_persistent_volume_claim( + name=pvc.metadata.name, namespace=self.k8s_namespace) + if pvc_resp: + if opts.o.debug: + print("PVCs already present:") + print(f"{pvc_resp}") + continue + except: # noqa: E722 + pass + + pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) + if opts.o.debug: + print("PVCs created:") + print(f"{pvc_resp}") + + # Figure out the ConfigMaps for this deployment + config_maps = self.cluster_info.get_configmaps() + for cfg_map in config_maps: if opts.o.debug: - print("PVCs created:") - print(f"{pvc_resp}") + print(f"Sending this ConfigMap: {cfg_map}") + if not opts.o.dry_run: + cfg_rsp = self.core_api.create_namespaced_config_map( + body=cfg_map, + namespace=self.k8s_namespace + ) + if opts.o.debug: + print("ConfigMap created:") + print(f"{cfg_rsp}") + + def _create_deployment(self): # Process compose files into a Deployment deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always") # Create the k8s objects if opts.o.debug: print(f"Sending this deployment: {deployment}") - deployment_resp = self.apps_api.create_namespaced_deployment( - body=deployment, namespace=self.k8s_namespace - ) - if opts.o.debug: - print("Deployment created:") - print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ - {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") + if not opts.o.dry_run: + deployment_resp = self.apps_api.create_namespaced_deployment( + body=deployment, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("Deployment created:") + print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ + {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") service: client.V1Service = self.cluster_info.get_service() - service_resp = self.core_api.create_namespaced_service( - namespace=self.k8s_namespace, - body=service - ) if opts.o.debug: - print("Service created:") - print(f"{service_resp}") + print(f"Sending this service: {service}") + if not opts.o.dry_run: + service_resp = self.core_api.create_namespaced_service( + namespace=self.k8s_namespace, + body=service + ) + if opts.o.debug: + print("Service created:") + print(f"{service_resp}") + + def up(self, detach, services): + if not opts.o.dry_run: + if self.is_kind(): + # Create the kind cluster + create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) + # Ensure the referenced containers are copied into kind + load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + self.connect_api() + else: + print("Dry run mode enabled, skipping k8s API connect") + + self._create_volume_data() + self._create_deployment() if not self.is_kind(): ingress: client.V1Ingress = self.cluster_info.get_ingress() - if opts.o.debug: - print(f"Sending this ingress: {ingress}") - ingress_resp = self.networking_api.create_namespaced_ingress( - namespace=self.k8s_namespace, - body=ingress - ) - if opts.o.debug: - print("Ingress created:") - print(f"{ingress_resp}") + if ingress: + if opts.o.debug: + print(f"Sending this ingress: {ingress}") + if not opts.o.dry_run: + ingress_resp = self.networking_api.create_namespaced_ingress( + namespace=self.k8s_namespace, + body=ingress + ) + if opts.o.debug: + print("Ingress created:") + print(f"{ingress_resp}") + else: + if opts.o.debug: + print("No ingress configured") - def down(self, timeout, volumes): + def down(self, timeout, volumes): # noqa: C901 self.connect_api() # Delete the k8s objects - # Create the host-path-mounted PVs for this deployment - pvs = self.cluster_info.get_pvs() - for pv in pvs: - if opts.o.debug: - print(f"Deleting this pv: {pv}") - try: - pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) + + if volumes: + # Create the host-path-mounted PVs for this deployment + pvs = self.cluster_info.get_pvs() + for pv in pvs: if opts.o.debug: - print("PV deleted:") - print(f"{pv_resp}") + print(f"Deleting this pv: {pv}") + try: + pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) + if opts.o.debug: + print("PV deleted:") + print(f"{pv_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + # Figure out the PVCs for this deployment + pvcs = self.cluster_info.get_pvcs() + for pvc in pvcs: + if opts.o.debug: + print(f"Deleting this pvc: {pvc}") + try: + pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim( + name=pvc.metadata.name, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("PVCs deleted:") + print(f"{pvc_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + # Figure out the ConfigMaps for this deployment + cfg_maps = self.cluster_info.get_configmaps() + for cfg_map in cfg_maps: + if opts.o.debug: + print(f"Deleting this ConfigMap: {cfg_map}") + try: + cfg_map_resp = self.core_api.delete_namespaced_config_map( + name=cfg_map.metadata.name, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("ConfigMap deleted:") + print(f"{cfg_map_resp}") except client.exceptions.ApiException as e: _check_delete_exception(e) - # Figure out the PVCs for this deployment - pvcs = self.cluster_info.get_pvcs() - for pvc in pvcs: - if opts.o.debug: - print(f"Deleting this pvc: {pvc}") - try: - pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim( - name=pvc.metadata.name, namespace=self.k8s_namespace - ) - if opts.o.debug: - print("PVCs deleted:") - print(f"{pvc_resp}") - except client.exceptions.ApiException as e: - _check_delete_exception(e) deployment = self.cluster_info.get_deployment() if opts.o.debug: print(f"Deleting this deployment: {deployment}") @@ -198,14 +271,18 @@ class K8sDeployer(Deployer): if not self.is_kind(): ingress: client.V1Ingress = self.cluster_info.get_ingress() - if opts.o.debug: - print(f"Deleting this ingress: {ingress}") - try: - self.networking_api.delete_namespaced_ingress( - name=ingress.metadata.name, namespace=self.k8s_namespace - ) - except client.exceptions.ApiException as e: - _check_delete_exception(e) + if ingress: + if opts.o.debug: + print(f"Deleting this ingress: {ingress}") + try: + self.networking_api.delete_namespaced_ingress( + name=ingress.metadata.name, namespace=self.k8s_namespace + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + else: + if opts.o.debug: + print("No ingress to delete") if self.is_kind(): # Destroy the kind cluster @@ -306,9 +383,15 @@ class K8sDeployer(Deployer): log_data = "******* Pods not running ********\n" else: k8s_pod_name = pods[0] + containers = containers_in_pod(self.core_api, k8s_pod_name) # If the pod is not yet started, the logs request below will throw an exception try: - log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test") + log_data = "" + for container in containers: + container_log = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container=container) + container_log_lines = container_log.splitlines() + for line in container_log_lines: + log_data += f"{container}: {line}\n" except client.exceptions.ApiException as e: if opts.o.debug: print(f"Error from read_namespaced_pod_log: {e}") @@ -353,8 +436,9 @@ class K8sDeployer(Deployer): class K8sDeployerConfigGenerator(DeployerConfigGenerator): type: str - def __init__(self, type: str) -> None: + def __init__(self, type: str, deployment_context) -> None: self.type = type + self.deployment_context = deployment_context super().__init__() def generate(self, deployment_dir: Path): @@ -362,7 +446,7 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator): if self.type == "k8s-kind": # Check the file isn't already there # Get the config file contents - content = generate_kind_config(deployment_dir) + content = generate_kind_config(deployment_dir, self.deployment_context) if opts.o.debug: print(f"kind config is: {content}") config_file = deployment_dir.joinpath(constants.kind_config_filename) diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index 62545dfd..d49a0d21 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -17,6 +17,7 @@ from kubernetes import client import os from pathlib import Path import subprocess +import re from typing import Set, Mapping, List from stack_orchestrator.opts import opts @@ -61,6 +62,17 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str): return pods +def containers_in_pod(core_api: client.CoreV1Api, pod_name: str): + containers = [] + pod_response = core_api.read_namespaced_pod(pod_name, namespace="default") + if opts.o.debug: + print(f"pod_response: {pod_response}") + pod_containers = pod_response.spec.containers + for pod_container in pod_containers: + containers.append(pod_container.name) + return containers + + def log_stream_from_string(s: str): # Note response has to be UTF-8 encoded because the caller expects to decode it yield ("ignore", s.encode()) @@ -73,14 +85,14 @@ def named_volumes_from_pod_files(parsed_pod_files): parsed_pod_file = parsed_pod_files[pod] if "volumes" in parsed_pod_file: volumes = parsed_pod_file["volumes"] - for volume in volumes.keys(): + for volume, value in volumes.items(): # Volume definition looks like: # 'laconicd-data': None named_volumes.append(volume) return named_volumes -def get_node_pv_mount_path(volume_name: str): +def get_kind_pv_bind_mount_path(volume_name: str): return f"/mnt/{volume_name}" @@ -97,37 +109,46 @@ def volume_mounts_for_service(parsed_pod_files, service): if "volumes" in service_obj: volumes = service_obj["volumes"] for mount_string in volumes: - # Looks like: test-data:/data - (volume_name, mount_path) = mount_string.split(":") - volume_device = client.V1VolumeMount(mount_path=mount_path, name=volume_name) + # Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw + if opts.o.debug: + print(f"mount_string: {mount_string}") + mount_split = mount_string.split(":") + volume_name = mount_split[0] + mount_path = mount_split[1] + mount_options = mount_split[2] if len(mount_split) == 3 else None + if opts.o.debug: + print(f"volume_name: {volume_name}") + print(f"mount path: {mount_path}") + print(f"mount options: {mount_options}") + volume_device = client.V1VolumeMount( + mount_path=mount_path, + name=volume_name, + read_only="ro" == mount_options + ) result.append(volume_device) return result -def volumes_for_pod_files(parsed_pod_files): +def volumes_for_pod_files(parsed_pod_files, spec, app_name): result = [] for pod in parsed_pod_files: parsed_pod_file = parsed_pod_files[pod] if "volumes" in parsed_pod_file: volumes = parsed_pod_file["volumes"] for volume_name in volumes.keys(): - claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name) - volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) - result.append(volume) + if volume_name in spec.get_configmaps(): + config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}") + volume = client.V1Volume(name=volume_name, config_map=config_map) + result.append(volume) + else: + claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=f"{app_name}-{volume_name}") + volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) + result.append(volume) return result -def _get_host_paths_for_volumes(parsed_pod_files): - result = {} - for pod in parsed_pod_files: - parsed_pod_file = parsed_pod_files[pod] - if "volumes" in parsed_pod_file: - volumes = parsed_pod_file["volumes"] - for volume_name in volumes.keys(): - volume_definition = volumes[volume_name] - host_path = volume_definition["driver_opts"]["device"] - result[volume_name] = host_path - return result +def _get_host_paths_for_volumes(deployment_context): + return deployment_context.spec.get_volumes() def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path: @@ -135,12 +156,12 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat return data_mount_path else: # Python Path voodo that looks pretty odd: - return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve() + return Path.cwd().joinpath(deployment_dir.joinpath(data_mount_path)).resolve() -def _generate_kind_mounts(parsed_pod_files, deployment_dir): +def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context): volume_definitions = [] - volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) + volume_host_path_map = _get_host_paths_for_volumes(deployment_context) # Note these paths are relative to the location of the pod files (at present) # So we need to fix up to make them correct and absolute because kind assumes # relative to the cwd. @@ -153,12 +174,22 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir): if "volumes" in service_obj: volumes = service_obj["volumes"] for mount_string in volumes: - # Looks like: test-data:/data - (volume_name, mount_path) = mount_string.split(":") - volume_definitions.append( - f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" - f" containerPath: {get_node_pv_mount_path(volume_name)}" - ) + # Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw + if opts.o.debug: + print(f"mount_string: {mount_string}") + mount_split = mount_string.split(":") + volume_name = mount_split[0] + mount_path = mount_split[1] + if opts.o.debug: + print(f"volume_name: {volume_name}") + print(f"map: {volume_host_path_map}") + print(f"mount path: {mount_path}") + if volume_name not in deployment_context.spec.get_configmaps(): + if volume_host_path_map[volume_name]: + volume_definitions.append( + f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" + f" containerPath: {get_kind_pv_bind_mount_path(volume_name)}\n" + ) return ( "" if len(volume_definitions) == 0 else ( " extraMounts:\n" @@ -180,7 +211,7 @@ def _generate_kind_port_mappings(parsed_pod_files): for port_string in ports: # TODO handle the complex cases # Looks like: 80 or something more complicated - port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}") + port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n") return ( "" if len(port_definitions) == 0 else ( " extraPortMappings:\n" @@ -189,6 +220,33 @@ def _generate_kind_port_mappings(parsed_pod_files): ) +# Note: this makes any duplicate definition in b overwrite a +def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]: + result = {**a, **b} + return result + + +def _expand_shell_vars(raw_val: str) -> str: + # could be: or ${} or ${:-} + # TODO: implement support for variable substitution and default values + # if raw_val is like ${} print a warning and substitute an empty string + # otherwise return raw_val + match = re.search(r"^\$\{(.*)\}$", raw_val) + if match: + print(f"WARNING: found unimplemented environment variable substitution: {raw_val}") + else: + return raw_val + + +# TODO: handle the case where the same env var is defined in multiple places +def envs_from_compose_file(compose_file_envs: Mapping[str, str]) -> Mapping[str, str]: + result = {} + for env_var, env_val in compose_file_envs.items(): + expanded_env_val = _expand_shell_vars(env_val) + result.update({env_var: expanded_env_val}) + return result + + def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]: result = [] for env_var, env_val in map.items(): @@ -214,13 +272,13 @@ def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V # extraMounts: # - hostPath: /path/to/my/files # containerPath: /files -def generate_kind_config(deployment_dir: Path): +def generate_kind_config(deployment_dir: Path, deployment_context): compose_file_dir = deployment_dir.joinpath("compose") # TODO: this should come from the stack file, not this way pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()] parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files) port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map) - mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir) + mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context) return ( "kind: Cluster\n" "apiVersion: kind.x-k8s.io/v1alpha4\n" diff --git a/stack_orchestrator/deploy/spec.py b/stack_orchestrator/deploy/spec.py index c4f791bf..ab452fe3 100644 --- a/stack_orchestrator/deploy/spec.py +++ b/stack_orchestrator/deploy/spec.py @@ -13,30 +13,130 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from pathlib import Path import typing +import humanfriendly + +from pathlib import Path + from stack_orchestrator.util import get_yaml from stack_orchestrator import constants +class ResourceLimits: + cpus: float = None + memory: int = None + storage: int = None + + def __init__(self, obj={}): + if "cpus" in obj: + self.cpus = float(obj["cpus"]) + if "memory" in obj: + self.memory = humanfriendly.parse_size(obj["memory"]) + if "storage" in obj: + self.storage = humanfriendly.parse_size(obj["storage"]) + + def __len__(self): + return len(self.__dict__) + + def __iter__(self): + for k in self.__dict__: + yield k, self.__dict__[k] + + def __repr__(self): + return str(self.__dict__) + + +class Resources: + limits: ResourceLimits = None + reservations: ResourceLimits = None + + def __init__(self, obj={}): + if "reservations" in obj: + self.reservations = ResourceLimits(obj["reservations"]) + if "limits" in obj: + self.limits = ResourceLimits(obj["limits"]) + + def __len__(self): + return len(self.__dict__) + + def __iter__(self): + for k in self.__dict__: + yield k, self.__dict__[k] + + def __repr__(self): + return str(self.__dict__) + + class Spec: obj: typing.Any + file_path: Path - def __init__(self) -> None: - pass + def __init__(self, file_path: Path = None, obj={}) -> None: + self.file_path = file_path + self.obj = obj + + def __getitem__(self, item): + return self.obj[item] + + def __contains__(self, item): + return item in self.obj + + def get(self, item, default=None): + return self.obj.get(item, default) def init_from_file(self, file_path: Path): with file_path: self.obj = get_yaml().load(open(file_path, "r")) + self.file_path = file_path def get_image_registry(self): - return (self.obj[constants.image_resigtry_key] - if self.obj and constants.image_resigtry_key in self.obj + return (self.obj[constants.image_registry_key] + if self.obj and constants.image_registry_key in self.obj else None) + def get_volumes(self): + return (self.obj["volumes"] + if self.obj and "volumes" in self.obj + else {}) + + def get_configmaps(self): + return (self.obj["configmaps"] + if self.obj and "configmaps" in self.obj + else {}) + + def get_container_resources(self): + return Resources(self.obj.get("resources", {}).get("containers", {})) + + def get_volume_resources(self): + return Resources(self.obj.get("resources", {}).get("volumes", {})) + def get_http_proxy(self): return (self.obj[constants.network_key][constants.http_proxy_key] if self.obj and constants.network_key in self.obj and constants.http_proxy_key in self.obj[constants.network_key] else None) + + def get_annotations(self): + return self.obj.get("annotations", {}) + + def get_labels(self): + return self.obj.get("labels", {}) + + def get_privileged(self): + return "true" == str(self.obj.get("security", {}).get("privileged", "false")).lower() + + def get_capabilities(self): + return self.obj.get("security", {}).get("capabilities", []) + + def get_deployment_type(self): + return self.obj[constants.deploy_to_key] + + def is_kubernetes_deployment(self): + return self.get_deployment_type() in [constants.k8s_kind_deploy_type, constants.k8s_deploy_type] + + def is_kind_deployment(self): + return self.get_deployment_type() in [constants.k8s_kind_deploy_type] + + def is_docker_deployment(self): + return self.get_deployment_type() in [constants.compose_deploy_type] diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py index aca2df35..4c91dec3 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -44,7 +44,7 @@ def _fixup_url_spec(spec_file_name: str, url: str): - host-name: {parsed_url.hostname} routes: - path: '{parsed_url.path if parsed_url.path else "/"}' - proxy-to: webapp:3000 + proxy-to: webapp:80 ''' spec_file_path = Path(spec_file_name) with open(spec_file_path) as rfile: diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py index 2e4544eb..299eeba9 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp_from_registry.py @@ -19,6 +19,8 @@ import shlex import shutil import sys import tempfile +import time +import uuid import click @@ -27,7 +29,7 @@ from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, build_container_image, push_container_image, file_hash, deploy_to_k8s, publish_deployment, hostname_for_deployment_request, generate_hostname_for_app, - match_owner) + match_owner, skip_by_tag) def process_app_deployment_request( @@ -39,8 +41,19 @@ def process_app_deployment_request( dns_suffix, deployment_parent_dir, kube_config, - image_registry + image_registry, + log_parent_dir ): + run_id = f"{app_deployment_request.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}" + log_file = None + if log_parent_dir: + log_dir = os.path.join(log_parent_dir, app_deployment_request.id) + if not os.path.exists(log_dir): + os.mkdir(log_dir) + log_file_path = os.path.join(log_dir, f"{run_id}.log") + print(f"Directing build logs to: {log_file_path}") + log_file = open(log_file_path, "wt") + # 1. look up application app = laconic.get_record(app_deployment_request.attributes.application, require=True) @@ -59,8 +72,8 @@ def process_app_deployment_request( dns_record = laconic.get_record(dns_crn) if dns_record: matched_owner = match_owner(app_deployment_request, dns_record) - if not matched_owner and dns_record.request: - matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.request, require=True)) + if not matched_owner and dns_record.attributes.request: + matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.attributes.request, require=True)) if matched_owner: print("Matched DnsRecord ownership:", matched_owner) @@ -102,8 +115,10 @@ def process_app_deployment_request( needs_k8s_deploy = False # 6. build container (if needed) if not deployment_record or deployment_record.attributes.application != app.id: - build_container_image(app, deployment_container_tag) - push_container_image(deployment_dir) + # TODO: pull from request + extra_build_args = [] + build_container_image(app, deployment_container_tag, extra_build_args, log_file) + push_container_image(deployment_dir, log_file) needs_k8s_deploy = True # 7. update config (if needed) @@ -116,6 +131,7 @@ def process_app_deployment_request( deploy_to_k8s( deployment_record, deployment_dir, + log_file ) publish_deployment( @@ -136,13 +152,17 @@ def load_known_requests(filename): return {} -def dump_known_requests(filename, requests): +def dump_known_requests(filename, requests, status="SEEN"): if not filename: return known_requests = load_known_requests(filename) for r in requests: - known_requests[r.id] = r.createTime - json.dump(known_requests, open(filename, "w")) + known_requests[r.id] = { + "createTime": r.createTime, + "status": status + } + with open(filename, "w") as f: + json.dump(known_requests, f) @click.command() @@ -158,10 +178,14 @@ def dump_known_requests(filename, requests): @click.option("--record-namespace-dns", help="eg, crn://laconic/dns") @click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments") @click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True) +@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="") +@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="") +@click.option("--log-dir", help="Output build/deployment logs to directory.", default=None) @click.pass_context -def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, +def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901 request_id, discover, state_file, only_update_state, - dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run): + dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run, + include_tags, exclude_tags, log_dir): if request_id and discover: print("Cannot specify both --request-id and --discover", file=sys.stderr) sys.exit(2) @@ -179,6 +203,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_ print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr) sys.exit(2) + # Split CSV and clean up values. + include_tags = [tag.strip() for tag in include_tags.split(",") if tag] + exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag] + laconic = LaconicRegistryClient(laconic_config) # Find deployment requests. @@ -200,7 +228,9 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_ requests.sort(key=lambda r: r.createTime) requests.reverse() requests_by_name = {} + skipped_by_name = {} for r in requests: + # TODO: Do this _after_ filtering deployments and cancellations to minimize round trips. app = laconic.get_record(r.attributes.application) if not app: print("Skipping request %s, cannot locate app." % r.id) @@ -211,17 +241,20 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_ requested_name = generate_hostname_for_app(app) print("Generating name %s for request %s." % (requested_name, r.id)) - if requested_name not in requests_by_name: - print( - "Found request %s to run application %s on %s." - % (r.id, r.attributes.application, requested_name) - ) - requests_by_name[requested_name] = r - else: - print( - "Ignoring request %s, it is superseded by %s." - % (r.id, requests_by_name[requested_name].id) - ) + if requested_name in skipped_by_name or requested_name in requests_by_name: + print("Ignoring request %s, it has been superseded." % r.id) + continue + + if skip_by_tag(r, include_tags, exclude_tags): + print("Skipping request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id, + include_tags, + exclude_tags, + r.attributes.tags)) + skipped_by_name[requested_name] = r + continue + + print("Found request %s to run application %s on %s." % (r.id, r.attributes.application, requested_name)) + requests_by_name[requested_name] = r # Find deployments. deployments = laconic.app_deployments() @@ -256,6 +289,8 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_ if not dry_run: for r in requests_to_execute: + dump_known_requests(state_file, [r], "DEPLOYING") + status = "ERROR" try: process_app_deployment_request( ctx, @@ -266,7 +301,9 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_ dns_suffix, os.path.abspath(deployment_parent_dir), kube_config, - image_registry + image_registry, + log_dir ) + status = "DEPLOYED" finally: - dump_known_requests(state_file, [r]) + dump_known_requests(state_file, [r], status) diff --git a/stack_orchestrator/deploy/webapp/run_webapp.py b/stack_orchestrator/deploy/webapp/run_webapp.py index 4dbf234a..f780c6f8 100644 --- a/stack_orchestrator/deploy/webapp/run_webapp.py +++ b/stack_orchestrator/deploy/webapp/run_webapp.py @@ -27,7 +27,7 @@ from dotenv import dotenv_values from stack_orchestrator import constants from stack_orchestrator.deploy.deployer_factory import getDeployer -WEBAPP_PORT = 3000 +WEBAPP_PORT = 80 @click.command() diff --git a/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py b/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py index 6c5cac85..8585283e 100644 --- a/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py +++ b/stack_orchestrator/deploy/webapp/undeploy_webapp_from_registry.py @@ -20,7 +20,7 @@ import sys import click -from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient, match_owner +from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient, match_owner, skip_by_tag def process_app_removal_request(ctx, @@ -40,8 +40,8 @@ def process_app_removal_request(ctx, matched_owner = match_owner(app_removal_request, deployment_record, dns_record) # Or of the original deployment request. - if not matched_owner and deployment_record.request: - matched_owner = match_owner(app_removal_request, laconic.get_record(deployment_record.request, require=True)) + if not matched_owner and deployment_record.attributes.request: + matched_owner = match_owner(app_removal_request, laconic.get_record(deployment_record.attributes.request, require=True)) if matched_owner: print("Matched deployment ownership:", matched_owner) @@ -107,10 +107,12 @@ def dump_known_requests(filename, requests): @click.option("--delete-names/--preserve-names", help="Delete all names associated with removed deployments.", default=True) @click.option("--delete-volumes/--preserve-volumes", default=True, help="delete data volumes") @click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True) +@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="") +@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="") @click.pass_context def command(ctx, laconic_config, deployment_parent_dir, request_id, discover, state_file, only_update_state, - delete_names, delete_volumes, dry_run): + delete_names, delete_volumes, dry_run, include_tags, exclude_tags): if request_id and discover: print("Cannot specify both --request-id and --discover", file=sys.stderr) sys.exit(2) @@ -123,6 +125,10 @@ def command(ctx, laconic_config, deployment_parent_dir, print("--only-update-state requires --state-file", file=sys.stderr) sys.exit(2) + # Split CSV and clean up values. + include_tags = [tag.strip() for tag in include_tags.split(",") if tag] + exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag] + laconic = LaconicRegistryClient(laconic_config) # Find deployment removal requests. @@ -141,6 +147,7 @@ def command(ctx, laconic_config, deployment_parent_dir, previous_requests = load_known_requests(state_file) requests.sort(key=lambda r: r.createTime) + requests.reverse() # Find deployments. deployments = {} @@ -155,10 +162,22 @@ def command(ctx, laconic_config, deployment_parent_dir, # TODO: should we handle CRNs? removals_by_deployment[r.attributes.deployment] = r - requests_to_execute = [] + one_per_deployment = {} for r in requests: if not r.attributes.deployment: print(f"Skipping removal request {r.id} since it was a cancellation.") + elif r.attributes.deployment in one_per_deployment: + print(f"Skipping removal request {r.id} since it was superseded.") + else: + one_per_deployment[r.attributes.deployment] = r + + requests_to_execute = [] + for r in one_per_deployment.values(): + if skip_by_tag(r, include_tags, exclude_tags): + print("Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id, + include_tags, + exclude_tags, + r.attributes.tags)) elif r.id in removals_by_request: print(f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}") elif r.attributes.deployment in removals_by_deployment: diff --git a/stack_orchestrator/deploy/webapp/util.py b/stack_orchestrator/deploy/webapp/util.py index ef98117b..80b477f9 100644 --- a/stack_orchestrator/deploy/webapp/util.py +++ b/stack_orchestrator/deploy/webapp/util.py @@ -195,7 +195,24 @@ def file_hash(filename): return hashlib.sha1(open(filename).read().encode()).hexdigest() -def build_container_image(app_record, tag, extra_build_args=[]): +def determine_base_container(clone_dir, app_type="webapp"): + if not app_type or not app_type.startswith("webapp"): + raise Exception(f"Unsupported app_type {app_type}") + + base_container = "cerc/webapp-base" + if app_type == "webapp/next": + base_container = "cerc/nextjs-base" + elif app_type == "webapp": + pkg_json_path = os.path.join(clone_dir, "package.json") + if os.path.exists(pkg_json_path): + pkg_json = json.load(open(pkg_json_path)) + if "next" in pkg_json.get("dependencies", {}): + base_container = "cerc/nextjs-base" + + return base_container + + +def build_container_image(app_record, tag, extra_build_args=[], log_file=None): tmpdir = tempfile.mkdtemp() try: @@ -210,37 +227,46 @@ def build_container_image(app_record, tag, extra_build_args=[]): git_env = dict(os.environ.copy()) # Never prompt git_env["GIT_TERMINAL_PROMPT"] = "0" - subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env) - subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env) + subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=log_file, stderr=log_file) + subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=log_file, stderr=log_file) else: - result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir]) + result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=log_file, stderr=log_file) result.check_returncode() + base_container = determine_base_container(clone_dir, app_record.attributes.app_type) + print("Building webapp ...") - build_command = [sys.argv[0], "build-webapp", "--source-repo", clone_dir, "--tag", tag] + build_command = [ + sys.argv[0], "build-webapp", + "--source-repo", clone_dir, + "--tag", tag, + "--base-container", base_container + ] if extra_build_args: build_command.append("--extra-build-args") build_command.append(" ".join(extra_build_args)) - result = subprocess.run(build_command) + result = subprocess.run(build_command, stdout=log_file, stderr=log_file) result.check_returncode() finally: cmd("rm", "-rf", tmpdir) -def push_container_image(deployment_dir): +def push_container_image(deployment_dir, log_file=None): print("Pushing image ...") - result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"]) + result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"], + stdout=log_file, stderr=log_file) result.check_returncode() -def deploy_to_k8s(deploy_record, deployment_dir): +def deploy_to_k8s(deploy_record, deployment_dir, log_file=None): if not deploy_record: command = "up" else: command = "update" - result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command]) + result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command], + stdout=log_file, stderr=log_file) result.check_returncode() @@ -325,3 +351,17 @@ def generate_hostname_for_app(app): else: m.update(app.attributes.repository.encode()) return "%s-%s" % (last_part, m.hexdigest()[0:10]) + + +def skip_by_tag(r, include_tags, exclude_tags): + for tag in exclude_tags: + if r.attributes.tags and tag in r.attributes.tags: + return True + + if include_tags: + for tag in include_tags: + if r.attributes.tags and tag in r.attributes.tags: + return False + return True + + return False diff --git a/stack_orchestrator/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py index 3612aed0..a137d645 100644 --- a/stack_orchestrator/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -26,7 +26,7 @@ import importlib.resources from pathlib import Path import yaml from stack_orchestrator.constants import stack_file_name -from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit +from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit class GitProgress(git.RemoteProgress): @@ -249,8 +249,8 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches error_exit(f"stack {stack} does not exist") with stack_file_path: stack_config = yaml.safe_load(open(stack_file_path, "r")) - if "repos" not in stack_config: - error_exit(f"stack {stack} does not define any repositories") + if "repos" not in stack_config or stack_config["repos"] is None: + warn_exit(f"stack {stack} does not define any repositories") else: repos_in_scope = stack_config["repos"] else: diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py index 0bd1a609..257e1deb 100644 --- a/stack_orchestrator/util.py +++ b/stack_orchestrator/util.py @@ -19,7 +19,7 @@ import sys import ruamel.yaml from pathlib import Path from dotenv import dotenv_values -from typing import Mapping +from typing import Mapping, Set, List def include_exclude_check(s, include, exclude): @@ -81,17 +81,17 @@ def get_pod_list(parsed_stack): return result -def get_plugin_code_paths(stack): +def get_plugin_code_paths(stack) -> List[Path]: parsed_stack = get_parsed_stack_config(stack) pods = parsed_stack["pods"] - result = [] + result: Set[Path] = set() for pod in pods: if type(pod) is str: - result.append(get_stack_file_path(stack).parent) + result.add(get_stack_file_path(stack).parent) else: pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) - result.append(Path(os.path.join(pod_root_dir, "stack"))) - return result + result.add(Path(os.path.join(pod_root_dir, "stack"))) + return list(result) def get_pod_file_path(parsed_stack, pod_name: str): @@ -139,6 +139,13 @@ def get_compose_file_dir(): return source_compose_dir +def get_config_file_dir(): + # TODO: refactor to use common code with deploy command + data_dir = Path(__file__).absolute().parent.joinpath("data") + source_config_dir = data_dir.joinpath("config") + return source_config_dir + + def get_parsed_deployment_spec(spec_file): spec_file_path = Path(spec_file) try: @@ -182,5 +189,10 @@ def error_exit(s): sys.exit(1) +def warn_exit(s): + print(f"WARN: {s}") + sys.exit(0) + + def env_var_map_from_file(file: Path) -> Mapping[str, str]: return dotenv_values(file) diff --git a/tests/database/run-test.sh b/tests/database/run-test.sh new file mode 100755 index 00000000..e6aca59c --- /dev/null +++ b/tests/database/run-test.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x + # Dump environment variables for debugging + echo "Environment variables:" + env +fi + +if [ "$1" == "from-path" ]; then + TEST_TARGET_SO="laconic-so" +else + TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +fi + +stack="test-database" +spec_file=${stack}-spec.yml +deployment_dir=${stack}-deployment + +# Helper functions: TODO move into a separate file +wait_for_pods_started () { + for i in {1..50} + do + local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps ) + + if [[ "$ps_output" == *"Running containers:"* ]]; then + # if ready, return + return + else + # if not ready, wait + sleep 5 + fi + done + # Timed out, error exit + echo "waiting for pods to start: FAILED" + delete_cluster_exit +} + +wait_for_test_complete () { + for i in {1..50} + do + + local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) + + if [[ "${log_output}" == *"Database test client: test complete"* ]]; then + # if ready, return + return + else + # if not ready, wait + sleep 5 + fi + done + # Timed out, error exit + echo "waiting for test complete: FAILED" + delete_cluster_exit +} + + +delete_cluster_exit () { + $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes + exit 1 +} + +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +$TEST_TARGET_SO --stack ${stack} setup-repositories +$TEST_TARGET_SO --stack ${stack} build-containers +# Test basic stack-orchestrator deploy to k8s +test_deployment_dir=$CERC_REPO_BASE_DIR/test-${deployment_dir} +test_deployment_spec=$CERC_REPO_BASE_DIR/test-${spec_file} + +$TEST_TARGET_SO --stack ${stack} deploy --deploy-to k8s-kind init --output $test_deployment_spec +# Check the file now exists +if [ ! -f "$test_deployment_spec" ]; then + echo "deploy init test: spec file not present" + echo "deploy init test: FAILED" + exit 1 +fi +echo "deploy init test: passed" + +$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir +# Check the deployment dir exists +if [ ! -d "$test_deployment_dir" ]; then + echo "deploy create test: deployment directory not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create test: passed" + +# Try to start the deployment +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +wait_for_pods_started +# Check logs command works +wait_for_test_complete +log_output_1=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_1" == *"Database test client: test data does not exist"* ]]; then + echo "Create database content test: passed" +else + echo "Create database content test: FAILED" + delete_cluster_exit +fi + +# Stop then start again and check the volume was preserved +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop +# Sleep a bit just in case +sleep 20 +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +wait_for_pods_started +wait_for_test_complete + +log_output_2=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_2" == *"Database test client: test data already exists"* ]]; then + echo "Retain database content test: passed" +else + echo "Retain database content test: FAILED" + delete_cluster_exit +fi + +# Stop and clean up +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes +echo "Test passed" diff --git a/tests/deploy/run-deploy-test.sh b/tests/deploy/run-deploy-test.sh index 4456c28d..9dec2d46 100755 --- a/tests/deploy/run-deploy-test.sh +++ b/tests/deploy/run-deploy-test.sh @@ -6,6 +6,12 @@ fi # Dump environment variables for debugging echo "Environment variables:" env + +delete_cluster_exit () { + $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes + exit 1 +} + # Test basic stack-orchestrator deploy echo "Running stack-orchestrator deploy test" # Bit of a hack, test the most recent package @@ -57,7 +63,7 @@ $TEST_TARGET_SO --stack test deploy down # The next time we bring the container up the volume will be old (from the previous run above) $TEST_TARGET_SO --stack test deploy up log_output_1=$( $TEST_TARGET_SO --stack test deploy logs ) -if [[ "$log_output_1" == *"Filesystem is old"* ]]; then +if [[ "$log_output_1" == *"filesystem is old"* ]]; then echo "Retain volumes test: passed" else echo "Retain volumes test: FAILED" @@ -67,7 +73,7 @@ $TEST_TARGET_SO --stack test deploy down --delete-volumes # Now when we bring the container up the volume will be new again $TEST_TARGET_SO --stack test deploy up log_output_2=$( $TEST_TARGET_SO --stack test deploy logs ) -if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then +if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then echo "Delete volumes test: passed" else echo "Delete volumes test: FAILED" @@ -106,12 +112,16 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then echo "deploy create test: FAILED" exit 1 fi + +# Add a config file to be picked up by the ConfigMap before starting. +echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config + echo "deploy create output file test: passed" # Try to start the deployment $TEST_TARGET_SO deployment --dir $test_deployment_dir start # Check logs command works log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) -if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then +if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then echo "deployment logs test: passed" else echo "deployment logs test: FAILED" @@ -124,6 +134,37 @@ else echo "deployment config test: FAILED" exit 1 fi +# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file +if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then + echo "deployment compose config test: passed" +else + echo "deployment compose config test: FAILED" + exit 1 +fi + +# Check that the ConfigMap is mounted and contains the expected content. +log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then + echo "deployment ConfigMap test: passed" +else + echo "deployment ConfigMap test: FAILED" + delete_cluster_exit +fi + +# Stop then start again and check the volume was preserved +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop +# Sleep a bit just in case +# sleep for longer to check if that's why the subsequent create cluster fails +sleep 20 +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_5" == *"filesystem is old"* ]]; then + echo "Retain volumes test: passed" +else + echo "Retain volumes test: FAILED" + delete_cluster_exit +fi + # Stop and clean up $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes echo "Test passed" diff --git a/tests/k8s-deploy/run-deploy-test.sh b/tests/k8s-deploy/run-deploy-test.sh index de1a5f10..e482a5b7 100755 --- a/tests/k8s-deploy/run-deploy-test.sh +++ b/tests/k8s-deploy/run-deploy-test.sh @@ -76,6 +76,10 @@ if [ ! -f "$test_deployment_spec" ]; then exit 1 fi echo "deploy init test: passed" + +# Switch to a full path for bind mount. +sed -i "s|^\(\s*test-data-bind:$\)$|\1 ${test_deployment_dir}/data/test-data-bind|" $test_deployment_spec + $TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir # Check the deployment dir exists if [ ! -d "$test_deployment_dir" ]; then @@ -97,19 +101,26 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then echo "deploy create test: FAILED" exit 1 fi + +# Add a config file to be picked up by the ConfigMap before starting. +echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/configmaps/test-config/test_config + echo "deploy create output file test: passed" # Try to start the deployment $TEST_TARGET_SO deployment --dir $test_deployment_dir start wait_for_pods_started # Check logs command works wait_for_log_output +sleep 1 log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) -if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then +if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then echo "deployment logs test: passed" else echo "deployment logs test: FAILED" + echo $log_output_3 delete_cluster_exit fi + # Check the config variable CERC_TEST_PARAM_1 was passed correctly if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then echo "deployment config test: passed" @@ -117,6 +128,44 @@ else echo "deployment config test: FAILED" delete_cluster_exit fi + +# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file +if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then + echo "deployment compose config test: passed" +else + echo "deployment compose config test: FAILED" + exit 1 +fi + +# Check that the ConfigMap is mounted and contains the expected content. +log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then + echo "deployment ConfigMap test: passed" +else + echo "deployment ConfigMap test: FAILED" + delete_cluster_exit +fi + +# Check that the bind-mount volume is mounted. +log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_5" == *"/data: MOUNTED"* ]]; then + echo "deployment bind volumes test: passed" +else + echo "deployment bind volumes test: FAILED" + echo $log_output_5 + delete_cluster_exit +fi + +# Check that the provisioner managed volume is mounted. +log_output_6=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_6" == *"/data2: MOUNTED"* ]]; then + echo "deployment provisioner volumes test: passed" +else + echo "deployment provisioner volumes test: FAILED" + echo $log_output_6 + delete_cluster_exit +fi + # Stop then start again and check the volume was preserved $TEST_TARGET_SO deployment --dir $test_deployment_dir stop # Sleep a bit just in case @@ -125,13 +174,26 @@ sleep 20 $TEST_TARGET_SO deployment --dir $test_deployment_dir start wait_for_pods_started wait_for_log_output -log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) -if [[ "$log_output_4" == *"Filesystem is old"* ]]; then - echo "Retain volumes test: passed" +sleep 1 + +log_output_10=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_10" == *"/data filesystem is old"* ]]; then + echo "Retain bind volumes test: passed" else - echo "Retain volumes test: FAILED" + echo "Retain bind volumes test: FAILED" delete_cluster_exit fi + +# These volumes will be completely destroyed by the kind delete/create, because they lived inside +# the kind container. So, unlike the bind-mount case, they will appear fresh after the restart. +log_output_11=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_11" == *"/data2 filesystem is fresh"* ]]; then + echo "Fresh provisioner volumes test: passed" +else + echo "Fresh provisioner volumes test: FAILED" + delete_cluster_exit +fi + # Stop and clean up $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes echo "Test passed" diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh index 5db382f8..8cae4828 100755 --- a/tests/webapp-test/run-webapp-test.sh +++ b/tests/webapp-test/run-webapp-test.sh @@ -30,14 +30,14 @@ CHECK="SPECIAL_01234567890_TEST_STRING" set +e -CONTAINER_ID=$(docker run -p 3000:3000 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local) +CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local) sleep 3 wget -t 7 -O test.before -m http://localhost:3000 docker logs $CONTAINER_ID docker remove -f $CONTAINER_ID -CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local) +CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local) sleep 3 wget -t 7 -O test.after -m http://localhost:3000