From f27da1980890dee660dc6d3b1b9c82a7d7770230 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:00:03 +0530 Subject: [PATCH 1/4] Use IPFS for hosting glob files for Urbit (#677) * Use IPFS for hosting glob files for Urbit * Add env configuration for IPFS endpoints to instructions * Make ship pier dir configurable in remote deployment script * Update remote deployment script to accept glob hash arg --- .../docker-compose-uniswap-interface.yml | 19 --------------- .../compose/docker-compose-uniswap-urbit.yml | 20 ++++++++++++++++ .../uniswap-interface/deploy-uniswap-app.sh | 21 +++++++++++------ .../uniswap-interface/host-uniswap-glob.sh | 23 ------------------- .../uniswap-interface/install-uniswap-app.sh | 19 +++++++-------- .../remote-deploy-uniswap.sh | 15 +++++++----- .../data/stacks/uniswap-urbit-app/README.md | 15 ++++++++++-- 7 files changed, 66 insertions(+), 66 deletions(-) delete mode 100755 stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml index f6a5c53f..6d021961 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -12,25 +12,6 @@ services: - app_builds:/app-builds - ../config/uniswap-interface/build-app.sh:/app/build-app.sh - uniswap-glob-host: - image: cerc/urbit-globs-host:local - restart: unless-stopped - depends_on: - uniswap-interface: - condition: service_completed_successfully - command: ["./host-uniswap-glob.sh"] - volumes: - - app_globs:/app-globs - - ../config/uniswap-interface/host-uniswap-glob.sh:/app/host-uniswap-glob.sh - ports: - - "3000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "3000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - uniswap-gql-proxy: image: cerc/uniswap-interface:local restart: on-failure diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml index ae0b3709..31fa99bf 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml @@ -4,6 +4,9 @@ services: urbit-fake-ship: restart: unless-stopped image: tloncorp/vere + environment: + CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs-glob-host:5001} + CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs-glob-host:8080} entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"] volumes: - urbit_data:/urbit @@ -20,7 +23,24 @@ services: retries: 15 start_period: 10s + ipfs-glob-host: + image: ipfs/kubo:master-2023-02-20-714a968 + volumes: + - ipfs-import:/import + - ipfs-data:/data/ipfs + ports: + - "8080" + - "5001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + volumes: urbit_data: app_builds: app_globs: + ipfs-import: + ipfs-data: diff --git a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh index 6c147083..f07a205b 100755 --- a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh +++ b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh @@ -4,6 +4,11 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting globs" +echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading glob files" +ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT} +ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT} + uniswap_app_build='/app-builds/uniswap/build' uniswap_desk_dir='/urbit/zod/uniswap' @@ -96,15 +101,17 @@ rm "${uniswap_desk_dir}/desk.ship" hood "commit %uniswap" dojo "-landscape!make-glob %uniswap /build" -echo "Copying over glob file to mounted volume" -mkdir -p /app-globs/uniswap -cp /urbit/zod/.urb/put/* /app-globs/uniswap/ - glob_file=$(ls -1 -c zod/.urb/put | head -1) -echo "Glob filename: ${glob_file}" +echo "Created glob file: ${glob_file}" + +upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add) +glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//') + +echo "Glob file uploaded to IFPS:" +echo "{ cid: ${glob_cid}, filename: ${glob_file} }" # Curl and wait for the glob to be hosted -glob_url="http://uniswap-glob-host:3000/${glob_file}" +glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}" echo "Checking if glob file hosted at ${glob_url}" while true; do @@ -128,7 +135,7 @@ cat << EOF > "${uniswap_desk_dir}/desk.docket-0" color+0xcd.75df image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' base+'uniswap' - glob-http+['http://uniswap-glob-host:3000/${glob_file}' ${glob_hash}] + glob-http+['${glob_url}' ${glob_hash}] version+[0 0 1] website+'https://uniswap.org/' license+'MIT' diff --git a/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh b/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh deleted file mode 100755 index 37605794..00000000 --- a/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -# Use config from mounted volume (when running web-app along with watcher stack) -echo "Waiting for uniswap app glob" -while [ ! -d /app-globs/uniswap ]; do - echo "Glob directory not found, retrying in 5 seconds..." - sleep 5 -done - - -# Copy to a new globs directory -mkdir -p globs -cp -r /app-globs/uniswap/* ./globs - -# Serve the glob file -cd globs -echo "Hosting glob file at port 3000" -python3 -m http.server 3000 --bind 0.0.0.0 diff --git a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh index 679bc27e..2463e1c7 100755 --- a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh +++ b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh @@ -1,21 +1,22 @@ #!/bin/bash -# $1: Glob file URL (eg. https://xyz.com/glob-abcd.glob) -# $2: Uniswap desk dir (default: ./zod/uniswap) +# $1: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $2: Glob file hash (eg. 0vabcd) +# $3: Urbit ship's pier dir (default: ./zod) -if [ -z "$1" ]; then - echo "Glob file URL arg not provided" +if [ "$#" -lt 2 ]; then + echo "Insufficient arguments" exit 0 fi glob_url=$1 -glob_file=$(basename "$glob_url") -glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") -echo "Using glob file ${glob_file}" +glob_hash=$2 +echo "Using glob file from ${glob_url} with hash ${glob_hash}" +# Default pier dir: ./zod # Default desk dir: ./zod/uniswap -uniswap_desk_dir="${2:-./zod/uniswap}" - +pier_dir="${3:-./zod}" +uniswap_desk_dir="${pier_dir}/uniswap" echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path" # Fire curl requests to perform operations on the ship diff --git a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh index 528151e9..31f03d72 100755 --- a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh +++ b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh @@ -1,18 +1,21 @@ #!/bin/bash # $1: Remote user host -# $2: Path to run the app installation in (where urbit ship dir is located) -# $3: Glob file URL (eg. https://xyz.com/glob-abcd.glob) +# $2: Remote Urbit ship's pier dir path (eg. /home/user/zod) +# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $4: Glob file hash (eg. 0vabcd) -if [ "$#" -ne 3 ]; then - echo "Usage: $0 " +if [ "$#" -ne 4 ]; then + echo "Incorrect number of arguments" + echo "Usage: $0 " exit 1 fi remote_user_host="$1" -remote_folder="$2" +remote_pier_folder="$2" glob_url="$3" +glob_hash="$4" installation_script="./install-uniswap-app.sh" -ssh "$remote_user_host" "cd $remote_folder && bash -s $glob_url" < "$installation_script" +ssh "$remote_user_host" "bash -s $glob_url $glob_hash $remote_pier_folder" < "$installation_script" diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md index cd6a9f3e..55a37338 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -41,10 +41,11 @@ network: ports: urbit-fake-ship: - '8080:80' - uniswap-glob-host: - - '3000:3000' uniswap-gql-proxy: - '4000:4000' + ipfs-glob-host: + - '8081:8080' + - '5001:5001' ... ``` @@ -74,6 +75,16 @@ Inside the deployment directory, open the file `config.env` and add variable for # Set this to GQL proxy server endpoint for uniswap app # (Eg. http://localhost:4000/graphql) CERC_UNISWAP_GQL= + + # Optional IPFS endpoints: + + # IFPS endpoint to host the glob file on + # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) + CERC_IPFS_GLOB_HOST_ENDPOINT= + + # IFPS endpoint to fetch the glob file from + # (Default: http://ipfs-glob-host:8080 pointing to in-stack IPFS node) + CERC_IPFS_SERVER_ENDPOINT= ``` ## Start the stack From 6bef0c5b2f83c5e6710b0efb5ad685d6015ffa1f Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:41:10 +0530 Subject: [PATCH 2/4] Separate out GQL proxy server from uniswap-urbit stack (#681) * Separate out uniswap gql proxy in a stack * Use proxy server from watcher-ts * Add a flag to enable/disable the proxy server * Update env configuratoin for uniswap urbit app stack * Update stack file for uniswap urbit app stack * Fix env variables in instructions --- .../compose/docker-compose-proxy-server.yml | 22 ++++++ .../docker-compose-uniswap-interface.yml | 13 --- .../data/config/proxy-server/run.sh | 9 +++ .../data/stacks/proxy-server/README.md | 79 +++++++++++++++++++ .../data/stacks/proxy-server/stack.yml | 8 ++ .../data/stacks/uniswap-urbit-app/README.md | 28 +++++-- .../data/stacks/uniswap-urbit-app/stack.yml | 3 + 7 files changed, 144 insertions(+), 18 deletions(-) create mode 100644 stack_orchestrator/data/compose/docker-compose-proxy-server.yml create mode 100755 stack_orchestrator/data/config/proxy-server/run.sh create mode 100644 stack_orchestrator/data/stacks/proxy-server/README.md create mode 100644 stack_orchestrator/data/stacks/proxy-server/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-proxy-server.yml b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml new file mode 100644 index 00000000..607e8d23 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml @@ -0,0 +1,22 @@ +version: "3.2" + +services: + proxy-server: + image: cerc/watcher-ts:local + restart: on-failure + working_dir: /app/packages/cli + environment: + ENABLE_PROXY: ${ENABLE_PROXY:-true} + PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM} + PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER} + command: ["sh", "-c", "./run.sh"] + volumes: + - ../config/proxy-server/run.sh:/app/packages/cli/run.sh + ports: + - "4000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "4000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml index 6d021961..85b71af2 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -12,19 +12,6 @@ services: - app_builds:/app-builds - ../config/uniswap-interface/build-app.sh:/app/build-app.sh - uniswap-gql-proxy: - image: cerc/uniswap-interface:local - restart: on-failure - command: ["bash", "-c", "yarn proxy-gql"] - ports: - - "4000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "4000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - volumes: app_builds: app_globs: diff --git a/stack_orchestrator/data/config/proxy-server/run.sh b/stack_orchestrator/data/config/proxy-server/run.sh new file mode 100755 index 00000000..9e8dc7f5 --- /dev/null +++ b/stack_orchestrator/data/config/proxy-server/run.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +if [ "$ENABLE_PROXY" = "true" ]; then + echo "Proxy server enabled" + yarn proxy +else + echo "Proxy server disabled, exiting" + exit 0 +fi diff --git a/stack_orchestrator/data/stacks/proxy-server/README.md b/stack_orchestrator/data/stacks/proxy-server/README.md new file mode 100644 index 00000000..f0ccdb0f --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/README.md @@ -0,0 +1,79 @@ +# Proxy Server + +Instructions to setup and deploy a HTTP proxy server + +## Setup + +Clone required repository: + +```bash +laconic-so --stack proxy-server setup-repositories --pull + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container image: + +```bash +laconic-so --stack proxy-server build-containers +``` + +## Create a deployment + +* First, create a spec file for the deployment, which will allow mapping the stack's ports and volumes to the host: + + ```bash + laconic-so --stack proxy-server deploy init --output proxy-server-spec.yml + ``` + +* Edit `network` in spec file to map container ports to same ports in host: + + ```yml + ... + network: + ports: + proxy-server: + - '4000:4000' + ... + ``` + +* Once you've made any needed changes to the spec file, create a deployment from it: + + ```bash + laconic-so --stack proxy-server deploy create --spec-file proxy-server-spec.yml --deployment-dir proxy-server-deployment + ``` + +* Inside the deployment directory, open the file `config.env` and set the following env variables: + + ```bash + # Whether to run the proxy server (Optional) (Default: true) + ENABLE_PROXY= + + # Upstream endpoint + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM= + + # Origin header to be used (Optional) + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER= + ``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir proxy-server-deployment start +``` + +* List and check the health status of the container using `docker ps` + +* The proxy server will now be listening at http://localhost:4000 + +## Clean up + +To stop the service running in background: + +```bash +laconic-so deployment --dir proxy-server-deployment stop +``` diff --git a/stack_orchestrator/data/stacks/proxy-server/stack.yml b/stack_orchestrator/data/stacks/proxy-server/stack.yml new file mode 100644 index 00000000..313a7f91 --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/stack.yml @@ -0,0 +1,8 @@ +version: "0.1" +name: proxy-server +repos: + - github.com/cerc-io/watcher-ts@v0.2.78 +containers: + - cerc/watcher-ts +pods: + - proxy-server diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md index 55a37338..7499f5fc 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -41,7 +41,7 @@ network: ports: urbit-fake-ship: - '8080:80' - uniswap-gql-proxy: + proxy-server: - '4000:4000' ipfs-glob-host: - '8081:8080' @@ -64,7 +64,7 @@ laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app ## Set env variables -Inside the deployment directory, open the file `config.env` and add variable for infura key : +Inside the deployment directory, open the file `config.env` and set the following env variables: ```bash # External RPC endpoints @@ -73,10 +73,28 @@ Inside the deployment directory, open the file `config.env` and add variable for # Uniswap API GQL Endpoint # Set this to GQL proxy server endpoint for uniswap app - # (Eg. http://localhost:4000/graphql) + # (Eg. http://localhost:4000/v1/graphql) + # (Eg. https://abc.xyz.com/v1/graphql) CERC_UNISWAP_GQL= - # Optional IPFS endpoints: + # Optional + + # Whether to run the proxy GQL server + # (Disable only if proxy not required to be run) (Default: true) + ENABLE_PROXY= + + # Proxy server configuration + # Used only if proxy is enabled + + # Upstream API URL + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM=https://api.uniswap.org + + # Origin header to be used in the proxy + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER=https://app.uniswap.org + + # IPFS configuration # IFPS endpoint to host the glob file on # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) @@ -120,7 +138,7 @@ laconic-so deployment --dir uniswap-urbit-app-deployment start ## Clean up -To stop all uniswap-urbit-app services running in the background, while preserving chain data: +To stop all uniswap-urbit-app services running in the background, while preserving data: ```bash laconic-so deployment --dir uniswap-urbit-app-deployment stop diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml index 1077b557..3f77098f 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml @@ -2,9 +2,12 @@ version: "0.1" name: uniswap-urbit-app repos: - github.com/cerc-io/uniswap-interface@laconic # TODO: Use release + - github.com/cerc-io/watcher-ts@v0.2.78 containers: - cerc/uniswap-interface + - cerc/watcher-ts - cerc/urbit-globs-host pods: - uniswap-interface + - proxy-server - uniswap-urbit From 15faed00de8e510b4bdd5150eef1c26248ae3d19 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 5 Dec 2023 22:56:58 -0700 Subject: [PATCH 3/4] Generate a unique deployment id for each deployment (#680) * Move cluster name generation into a function * Generate a unique deployment id for each deployment --- stack_orchestrator/constants.py | 4 +++ stack_orchestrator/deploy/deploy.py | 36 ++++++++++++------- stack_orchestrator/deploy/deployment.py | 2 +- .../deploy/deployment_context.py | 28 +++++++++++++-- .../deploy/deployment_create.py | 18 +++++++--- 5 files changed, 68 insertions(+), 20 deletions(-) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index 54cfe355..596b0c1b 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -13,12 +13,16 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +cluster_name_prefix = "laconic-" stack_file_name = "stack.yml" spec_file_name = "spec.yml" config_file_name = "config.env" +deployment_file_name = "deployment.yml" +compose_dir_name = "compose" compose_deploy_type = "compose" k8s_kind_deploy_type = "k8s-kind" k8s_deploy_type = "k8s" +cluster_id_key = "cluster-id" kube_config_key = "kube-config" deploy_to_key = "deploy-to" network_key = "network" diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 424d112f..d1b64743 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -24,6 +24,8 @@ from importlib import resources import subprocess import click from pathlib import Path +from stack_orchestrator import constants +from stack_orchestrator.opts import opts from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path from stack_orchestrator.deploy.deployer import Deployer, DeployerException from stack_orchestrator.deploy.deployer_factory import getDeployer @@ -70,6 +72,9 @@ def create_deploy_context( cluster, env_file, deploy_to) -> DeployCommandContext: + # Extract the cluster name from the deployment, if we have one + if deployment_context and cluster is None: + cluster = deployment_context.get_cluster_id() cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, @@ -253,6 +258,22 @@ def _make_runtime_env(ctx): return container_exec_env +def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude): + # Create default unique, stable cluster name from confile file path and stack name if provided + if deployment: + path = os.path.realpath(os.path.abspath(compose_dir)) + else: + path = "internal" + unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" + if opts.o.debug: + print(f"pre-hash descriptor: {unique_cluster_descriptor}") + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + cluster = f"{constants.cluster_name_prefix}{hash}" + if opts.o.debug: + print(f"Using cluster name: {cluster}") + return cluster + + # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): @@ -270,18 +291,9 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose") if cluster is None: - # Create default unique, stable cluster name from confile file path and stack name if provided - if deployment: - path = os.path.realpath(os.path.abspath(compose_dir)) - else: - path = "internal" - unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" - if ctx.debug: - print(f"pre-hash descriptor: {unique_cluster_descriptor}") - hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] - cluster = f"laconic-{hash}" - if ctx.verbose: - print(f"Using cluster name: {cluster}") + cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) + else: + _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index 8d74a62d..cc70519e 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -52,7 +52,7 @@ def make_deploy_context(ctx) -> DeployCommandContext: context: DeploymentContext = ctx.obj stack_file_path = context.get_stack_file() env_file = context.get_env_file() - cluster_name = context.get_cluster_name() + cluster_name = context.get_cluster_id() if constants.deploy_to_key in context.spec.obj: deployment_type = context.spec.obj[constants.deploy_to_key] else: diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py index cbee4151..27e32812 100644 --- a/stack_orchestrator/deploy/deployment_context.py +++ b/stack_orchestrator/deploy/deployment_context.py @@ -14,15 +14,19 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import hashlib +import os from pathlib import Path from stack_orchestrator import constants +from stack_orchestrator.util import get_yaml from stack_orchestrator.deploy.stack import Stack from stack_orchestrator.deploy.spec import Spec class DeploymentContext: deployment_dir: Path + id: str spec: Spec stack: Stack @@ -35,9 +39,14 @@ class DeploymentContext: def get_env_file(self): return self.deployment_dir.joinpath(constants.config_file_name) - # TODO: implement me - def get_cluster_name(self): - return None + def get_deployment_file(self): + return self.deployment_dir.joinpath(constants.deployment_file_name) + + def get_compose_dir(self): + return self.deployment_dir.joinpath(constants.compose_dir_name) + + def get_cluster_id(self): + return self.id def init(self, dir): self.deployment_dir = dir @@ -45,3 +54,16 @@ class DeploymentContext: self.spec.init_from_file(self.get_spec_file()) self.stack = Stack(self.spec.obj["stack"]) self.stack.init_from_file(self.get_stack_file()) + deployment_file_path = self.get_deployment_file() + if deployment_file_path.exists(): + with deployment_file_path: + obj = get_yaml().load(open(deployment_file_path, "r")) + self.id = obj[constants.cluster_id_key] + # Handle the case of a legacy deployment with no file + # Code below is intended to match the output from _make_default_cluster_name() + # TODO: remove when we no longer need to support legacy deployments + else: + path = os.path.realpath(os.path.abspath(self.get_compose_dir())) + unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + self.id = f"{constants.cluster_name_prefix}{hash}" diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 88ce0b2a..9eaea30c 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -20,6 +20,7 @@ from pathlib import Path from typing import List import random from shutil import copy, copyfile, copytree +from secrets import token_hex import sys from stack_orchestrator import constants from stack_orchestrator.opts import opts @@ -276,7 +277,7 @@ def init(ctx, config, config_file, kube_config, image_registry, output, map_port # call it from other commands, bypassing the click decoration stuff def init_operation(deploy_command_context, stack, deployer_type, config, config_file, kube_config, image_registry, output, map_ports_to_host): - yaml = get_yaml() + default_spec_file_content = call_stack_deploy_init(deploy_command_context) spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} if deployer_type == "k8s": @@ -311,8 +312,6 @@ def init_operation(deploy_command_context, stack, deployer_type, config, new_config = config_file_variables merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) - if opts.o.debug: - print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") ports = _get_mapped_ports(stack, map_ports_to_host) spec_file_content.update({"network": {"ports": ports}}) @@ -324,8 +323,11 @@ def init_operation(deploy_command_context, stack, deployer_type, config, volume_descriptors[named_volume] = f"./data/{named_volume}" spec_file_content["volumes"] = volume_descriptors + if opts.o.debug: + print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") + with open(output, "w") as output_file: - yaml.dump(spec_file_content, output_file) + get_yaml().dump(spec_file_content, output_file) def _write_config_file(spec_file: Path, config_env_file: Path): @@ -351,6 +353,13 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): copy(path, os.path.join(directory, os.path.basename(path))) +def _create_deployment_file(deployment_dir: Path): + deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name) + cluster = f"{constants.cluster_name_prefix}{token_hex(8)}" + with open(deployment_file_path, "w") as output_file: + output_file.write(f"{constants.cluster_id_key}: {cluster}\n") + + @click.command() @click.option("--spec-file", required=True, help="Spec file to use to create this deployment") @click.option("--deployment-dir", help="Create deployment files in this directory") @@ -383,6 +392,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw # Copy spec file and the stack file into the deployment dir copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) + _create_deployment_file(deployment_dir_path) # Copy any config varibles from the spec file into an env file suitable for compose _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) # Copy any k8s config file into the deployment dir From 077ea80c7030b000bbe0775ff944ab977c4c18bc Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 6 Dec 2023 10:27:47 -0600 Subject: [PATCH 4/4] Add `deployment status` command and fix k8s output for `deployment ps` (#679) --- .../deploy/compose/deploy_docker.py | 7 ++ stack_orchestrator/deploy/deploy.py | 8 ++ stack_orchestrator/deploy/deployer.py | 4 + stack_orchestrator/deploy/deployment.py | 5 +- stack_orchestrator/deploy/k8s/deploy_k8s.py | 90 +++++++++++++++++-- 5 files changed, 104 insertions(+), 10 deletions(-) diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index 04f24df5..d34d1e6f 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -40,6 +40,13 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) + def status(self): + try: + for p in self.docker.compose.ps(): + print(f"{p.name}\t{p.state.status}") + except DockerException as e: + raise DeployerException(e) + def ps(self): try: return self.docker.compose.ps() diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index d1b64743..da96a500 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -112,6 +112,14 @@ def down_operation(ctx, delete_volumes, extra_args_list): ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) +def status_operation(ctx): + global_context = ctx.parent.parent.obj + if not global_context.dry_run: + if global_context.verbose: + print("Running compose status") + ctx.obj.deployer.status() + + def ps_operation(ctx): global_context = ctx.parent.parent.obj if not global_context.dry_run: diff --git a/stack_orchestrator/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py index 984945ed..2806044b 100644 --- a/stack_orchestrator/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -31,6 +31,10 @@ class Deployer(ABC): def ps(self): pass + @abstractmethod + def status(self): + pass + @abstractmethod def port(self, service, private_port): pass diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index cc70519e..366a83f6 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -18,7 +18,7 @@ from pathlib import Path import sys from stack_orchestrator import constants from stack_orchestrator.deploy.images import push_images_operation -from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation +from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -147,4 +147,5 @@ def logs(ctx, tail, follow, extra_args): @command.command() @click.pass_context def status(ctx): - print(f"Context: {ctx.parent.obj}") + ctx.obj = make_deploy_context(ctx) + status_operation(ctx) diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index c84aa34a..95131966 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -26,6 +26,12 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.util import error_exit +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + def _check_delete_exception(e: client.exceptions.ApiException): if e.status == 404: if opts.o.debug: @@ -42,7 +48,7 @@ class K8sDeployer(Deployer): networking_api: client.NetworkingV1Api k8s_namespace: str = "default" kind_cluster_name: str - cluster_info : ClusterInfo + cluster_info: ClusterInfo deployment_dir: Path deployment_context: DeploymentContext @@ -72,6 +78,7 @@ class K8sDeployer(Deployer): self.core_api = client.CoreV1Api() self.networking_api = client.NetworkingV1Api() self.apps_api = client.AppsV1Api() + self.custom_obj_api = client.CustomObjectsApi() def up(self, detach, services): @@ -202,15 +209,82 @@ class K8sDeployer(Deployer): # Destroy the kind cluster destroy_cluster(self.kind_cluster_name) - def ps(self): + def status(self): self.connect_api() # Call whatever API we need to get the running container list - ret = self.core_api.list_pod_for_all_namespaces(watch=False) - if ret.items: - for i in ret.items: - print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) - ret = self.core_api.list_node(pretty=True, watch=False) - return [] + all_pods = self.core_api.list_pod_for_all_namespaces(watch=False) + pods = [] + + if all_pods.items: + for p in all_pods.items: + if self.cluster_info.app_name in p.metadata.name: + pods.append(p) + + if not pods: + return + + hostname = "?" + ip = "?" + tls = "?" + try: + ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace, + name=self.cluster_info.get_ingress().metadata.name) + + cert = self.custom_obj_api.get_namespaced_custom_object( + group="cert-manager.io", + version="v1", + namespace=self.k8s_namespace, + plural="certificates", + name=ingress.spec.tls[0].secret_name + ) + + hostname = ingress.spec.tls[0].hosts[0] + ip = ingress.status.load_balancer.ingress[0].ip + tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"]) + except: # noqa: E722 + pass + + print("Ingress:") + print("\tHostname:", hostname) + print("\tIP:", ip) + print("\tTLS:", tls) + print("") + print("Pods:") + + for p in pods: + if p.metadata.deletion_timestamp: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})") + else: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})") + + def ps(self): + self.connect_api() + pods = self.core_api.list_pod_for_all_namespaces(watch=False) + + ret = [] + + for p in pods.items: + if self.cluster_info.app_name in p.metadata.name: + pod_ip = p.status.pod_ip + ports = AttrDict() + for c in p.spec.containers: + if c.ports: + for prt in c.ports: + ports[str(prt.container_port)] = [AttrDict({ + "HostIp": pod_ip, + "HostPort": prt.container_port + })] + + ret.append(AttrDict({ + "id": f"{p.metadata.namespace}/{p.metadata.name}", + "name": p.metadata.name, + "namespace": p.metadata.namespace, + "network_settings": AttrDict({ + "ports": ports + }) + })) + + return ret def port(self, service, private_port): # Since we handle the port mapping, need to figure out where this comes from