diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py
index 596b0c1b..54cfe355 100644
--- a/stack_orchestrator/constants.py
+++ b/stack_orchestrator/constants.py
@@ -13,16 +13,12 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-cluster_name_prefix = "laconic-"
stack_file_name = "stack.yml"
spec_file_name = "spec.yml"
config_file_name = "config.env"
-deployment_file_name = "deployment.yml"
-compose_dir_name = "compose"
compose_deploy_type = "compose"
k8s_kind_deploy_type = "k8s-kind"
k8s_deploy_type = "k8s"
-cluster_id_key = "cluster-id"
kube_config_key = "kube-config"
deploy_to_key = "deploy-to"
network_key = "network"
diff --git a/stack_orchestrator/data/compose/docker-compose-fixturenet-urbit.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-urbit.yml
new file mode 100644
index 00000000..bc64cd90
--- /dev/null
+++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-urbit.yml
@@ -0,0 +1,36 @@
+version: '3.7'
+
+services:
+ # Runs an Urbit fake ship and attempts an app installation using given data
+ # Uploads the app glob to given IPFS endpoint
+ # From urbit_app_builds volume:
+ # - takes app build from ${CERC_URBIT_APP}/build (waits for it to appear)
+ # - takes additional mark files from ${CERC_URBIT_APP}/mar
+ # - takes the docket file from ${CERC_URBIT_APP}/desk.docket-0
+ urbit-fake-ship:
+ restart: unless-stopped
+ image: tloncorp/vere
+ environment:
+ CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
+ CERC_URBIT_APP: ${CERC_URBIT_APP}
+ CERC_ENABLE_APP_INSTALL: ${CERC_ENABLE_APP_INSTALL:-true}
+ CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs:5001}
+ CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs:8080}
+ entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-app.sh && tail -f /dev/null"]
+ volumes:
+ - urbit_data:/urbit
+ - urbit_app_builds:/app-builds
+ - ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh
+ - ../config/urbit/deploy-app.sh:/urbit/deploy-app.sh
+ ports:
+ - "80"
+ healthcheck:
+ test: ["CMD", "nc", "-v", "localhost", "80"]
+ interval: 20s
+ timeout: 5s
+ retries: 15
+ start_period: 10s
+
+volumes:
+ urbit_data:
+ urbit_app_builds:
diff --git a/stack_orchestrator/data/compose/docker-compose-kubo.yml b/stack_orchestrator/data/compose/docker-compose-kubo.yml
index f5f8b06e..2e4ae419 100644
--- a/stack_orchestrator/data/compose/docker-compose-kubo.yml
+++ b/stack_orchestrator/data/compose/docker-compose-kubo.yml
@@ -1,13 +1,24 @@
version: "3.2"
+
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
services:
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
restart: always
volumes:
- - ./ipfs/import:/import
- - ./ipfs/data:/data/ipfs
+ - ipfs-import:/import
+ - ipfs-data:/data/ipfs
ports:
- - "0.0.0.0:8080:8080"
- - "0.0.0.0:4001:4001"
+ - "4001"
+ - "8080"
- "0.0.0.0:5001:5001"
+ healthcheck:
+ test: ["CMD", "nc", "-v", "localhost", "5001"]
+ interval: 20s
+ timeout: 5s
+ retries: 15
+ start_period: 10s
+
+volumes:
+ ipfs-import:
+ ipfs-data:
diff --git a/stack_orchestrator/data/compose/docker-compose-proxy-server.yml b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml
index 607e8d23..dc672e0a 100644
--- a/stack_orchestrator/data/compose/docker-compose-proxy-server.yml
+++ b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml
@@ -6,7 +6,7 @@ services:
restart: on-failure
working_dir: /app/packages/cli
environment:
- ENABLE_PROXY: ${ENABLE_PROXY:-true}
+ ENABLE_PROXY: ${CERC_ENABLE_PROXY:-true}
PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM}
PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER}
command: ["sh", "-c", "./run.sh"]
diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml
index 85b71af2..334ab0cd 100644
--- a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml
+++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml
@@ -9,9 +9,10 @@ services:
- REACT_APP_AWS_API_ENDPOINT=${CERC_UNISWAP_GQL}
command: ["./build-app.sh"]
volumes:
- - app_builds:/app-builds
- ../config/uniswap-interface/build-app.sh:/app/build-app.sh
+ - urbit_app_builds:/app-builds
+ - ../config/uniswap-interface/urbit-files/mar:/app/mar
+ - ../config/uniswap-interface/urbit-files/desk.docket-0:/app/desk.docket-0
volumes:
- app_builds:
- app_globs:
+ urbit_app_builds:
diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml
deleted file mode 100644
index 31fa99bf..00000000
--- a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-version: '3.7'
-
-services:
- urbit-fake-ship:
- restart: unless-stopped
- image: tloncorp/vere
- environment:
- CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs-glob-host:5001}
- CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs-glob-host:8080}
- entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"]
- volumes:
- - urbit_data:/urbit
- - app_builds:/app-builds
- - app_globs:/app-globs
- - ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh
- - ../config/uniswap-interface/deploy-uniswap-app.sh:/urbit/deploy-uniswap-app.sh
- ports:
- - "80"
- healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "80"]
- interval: 20s
- timeout: 5s
- retries: 15
- start_period: 10s
-
- ipfs-glob-host:
- image: ipfs/kubo:master-2023-02-20-714a968
- volumes:
- - ipfs-import:/import
- - ipfs-data:/data/ipfs
- ports:
- - "8080"
- - "5001"
- healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "5001"]
- interval: 20s
- timeout: 5s
- retries: 15
- start_period: 10s
-
-volumes:
- urbit_data:
- app_builds:
- app_globs:
- ipfs-import:
- ipfs-data:
diff --git a/stack_orchestrator/data/config/uniswap-interface/build-app.sh b/stack_orchestrator/data/config/uniswap-interface/build-app.sh
index d3b012e6..81e306b6 100755
--- a/stack_orchestrator/data/config/uniswap-interface/build-app.sh
+++ b/stack_orchestrator/data/config/uniswap-interface/build-app.sh
@@ -13,6 +13,9 @@ fi
yarn build
-# Move build to app-builds so urbit can deploy it
-mkdir /app-builds/uniswap
+# Copy over build and other files to app-builds for urbit deployment
+mkdir -p /app-builds/uniswap
cp -r ./build /app-builds/uniswap/
+
+cp -r mar /app-builds/uniswap/
+cp desk.docket-0 /app-builds/uniswap/
diff --git a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh
deleted file mode 100755
index f07a205b..00000000
--- a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-
-if [ -n "$CERC_SCRIPT_DEBUG" ]; then
- set -x
-fi
-
-echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting globs"
-echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading glob files"
-ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT}
-ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT}
-
-uniswap_app_build='/app-builds/uniswap/build'
-uniswap_desk_dir='/urbit/zod/uniswap'
-
-if [ -d ${uniswap_desk_dir} ]; then
- echo "Uniswap desk dir already exists, skipping deployment..."
- exit 0
-fi
-
-# Fire curl requests to perform operations on the ship
-dojo () {
- curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
-}
-
-hood () {
- curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
-}
-
-# Create/mount a uniswap desk
-hood "merge %uniswap our %landscape"
-hood "mount %uniswap"
-
-# Loop until the uniswap build appears
-while [ ! -d ${uniswap_app_build} ]; do
- echo "Uniswap app build not found, retrying in 5s..."
- sleep 5
-done
-echo "Build found..."
-
-# Copy over build to desk data dir
-cp -r ${uniswap_app_build} ${uniswap_desk_dir}
-
-# Create a mark file for .map file type
-cat << EOF > "${uniswap_desk_dir}/mar/map.hoon"
-::
-:::: /hoon/map/mar
- :: Mark for js source maps
-/? 310
-::
-=, eyre
-|_ mud=@
-++ grow
- |%
- ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))]
- --
-++ grab
- |% :: convert from
- ++ mime |=([p=mite q=octs] (@t q.q))
- ++ noun cord :: clam from %noun
- --
-++ grad %mime
---
-EOF
-
-# Create a mark file for .woff file type
-cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon"
-|_ dat=octs
-++ grow
- |%
- ++ mime [/font/woff dat]
- --
-++ grab
- |%
- ++ mime |=([=mite =octs] octs)
- ++ noun octs
- --
-++ grad %mime
---
-EOF
-
-# Create a mark file for .ttf file type
-cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon"
-|_ dat=octs
-++ grow
- |%
- ++ mime [/font/ttf dat]
- --
-++ grab
- |%
- ++ mime |=([=mite =octs] octs)
- ++ noun octs
- --
-++ grad %mime
---
-EOF
-
-rm "${uniswap_desk_dir}/desk.bill"
-rm "${uniswap_desk_dir}/desk.ship"
-
-# Commit changes and create a glob
-hood "commit %uniswap"
-dojo "-landscape!make-glob %uniswap /build"
-
-glob_file=$(ls -1 -c zod/.urb/put | head -1)
-echo "Created glob file: ${glob_file}"
-
-upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add)
-glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//')
-
-echo "Glob file uploaded to IFPS:"
-echo "{ cid: ${glob_cid}, filename: ${glob_file} }"
-
-# Curl and wait for the glob to be hosted
-glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}"
-
-echo "Checking if glob file hosted at ${glob_url}"
-while true; do
- response=$(curl -sL -w "%{http_code}" -o /dev/null "$glob_url")
-
- if [ $response -eq 200 ]; then
- echo "File found at $glob_url"
- break # Exit the loop if the file is found
- else
- echo "File not found. Retrying in a few seconds..."
- sleep 5
- fi
-done
-
-glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/")
-
-# Update the docket file
-cat << EOF > "${uniswap_desk_dir}/desk.docket-0"
-:~ title+'Uniswap'
- info+'Self-hosted uniswap frontend.'
- color+0xcd.75df
- image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg'
- base+'uniswap'
- glob-http+['${glob_url}' ${glob_hash}]
- version+[0 0 1]
- website+'https://uniswap.org/'
- license+'MIT'
-==
-EOF
-
-# Commit changes and install the app
-hood "commit %uniswap"
-hood "install our %uniswap"
-
-echo "Uniswap app installed"
diff --git a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh
deleted file mode 100755
index 2463e1c7..00000000
--- a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-
-# $1: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
-# $2: Glob file hash (eg. 0vabcd)
-# $3: Urbit ship's pier dir (default: ./zod)
-
-if [ "$#" -lt 2 ]; then
- echo "Insufficient arguments"
- exit 0
-fi
-
-glob_url=$1
-glob_hash=$2
-echo "Using glob file from ${glob_url} with hash ${glob_hash}"
-
-# Default pier dir: ./zod
-# Default desk dir: ./zod/uniswap
-pier_dir="${3:-./zod}"
-uniswap_desk_dir="${pier_dir}/uniswap"
-echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path"
-
-# Fire curl requests to perform operations on the ship
-dojo () {
- curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
-}
-
-hood () {
- curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
-}
-
-# Create/mount a uniswap desk
-hood "merge %uniswap our %landscape"
-hood "mount %uniswap"
-
-# Create a mark file for .map file type
-cat << EOF > "${uniswap_desk_dir}/mar/map.hoon"
-::
-:::: /hoon/map/mar
- :: Mark for js source maps
-/? 310
-::
-=, eyre
-|_ mud=@
-++ grow
- |%
- ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))]
- --
-++ grab
- |% :: convert from
- ++ mime |=([p=mite q=octs] (@t q.q))
- ++ noun cord :: clam from %noun
- --
-++ grad %mime
---
-EOF
-
-# Create a mark file for .woff file type
-cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon"
-|_ dat=octs
-++ grow
- |%
- ++ mime [/font/woff dat]
- --
-++ grab
- |%
- ++ mime |=([=mite =octs] octs)
- ++ noun octs
- --
-++ grad %mime
---
-EOF
-
-# Create a mark file for .ttf file type
-cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon"
-|_ dat=octs
-++ grow
- |%
- ++ mime [/font/ttf dat]
- --
-++ grab
- |%
- ++ mime |=([=mite =octs] octs)
- ++ noun octs
- --
-++ grad %mime
---
-EOF
-
-rm "${uniswap_desk_dir}/desk.bill"
-rm "${uniswap_desk_dir}/desk.ship"
-
-# Update the docket file
-cat << EOF > "${uniswap_desk_dir}/desk.docket-0"
-:~ title+'Uniswap'
- info+'Self-hosted uniswap frontend.'
- color+0xcd.75df
- image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg'
- base+'uniswap'
- glob-http+['${glob_url}' ${glob_hash}]
- version+[0 0 1]
- website+'https://uniswap.org/'
- license+'MIT'
-==
-EOF
-
-# Commit changes and install the app
-hood "commit %uniswap"
-hood "install our %uniswap"
-
-echo "Uniswap app installed"
diff --git a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh
deleted file mode 100755
index 31f03d72..00000000
--- a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# $1: Remote user host
-# $2: Remote Urbit ship's pier dir path (eg. /home/user/zod)
-# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
-# $4: Glob file hash (eg. 0vabcd)
-
-if [ "$#" -ne 4 ]; then
- echo "Incorrect number of arguments"
- echo "Usage: $0 "
- exit 1
-fi
-
-remote_user_host="$1"
-remote_pier_folder="$2"
-glob_url="$3"
-glob_hash="$4"
-
-installation_script="./install-uniswap-app.sh"
-
-ssh "$remote_user_host" "bash -s $glob_url $glob_hash $remote_pier_folder" < "$installation_script"
diff --git a/stack_orchestrator/data/config/uniswap-interface/urbit-files/desk.docket-0 b/stack_orchestrator/data/config/uniswap-interface/urbit-files/desk.docket-0
new file mode 100644
index 00000000..6256c41b
--- /dev/null
+++ b/stack_orchestrator/data/config/uniswap-interface/urbit-files/desk.docket-0
@@ -0,0 +1,10 @@
+:~ title+'Uniswap'
+ info+'Self-hosted uniswap frontend.'
+ color+0xcd.75df
+ image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg'
+ base+'uniswap'
+ glob-http+['REPLACE_WITH_GLOB_URL' REPLACE_WITH_GLOB_HASH]
+ version+[0 0 1]
+ website+'https://uniswap.org/'
+ license+'MIT'
+==
diff --git a/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/map.hoon b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/map.hoon
new file mode 100644
index 00000000..e5d61eee
--- /dev/null
+++ b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/map.hoon
@@ -0,0 +1,18 @@
+::
+:::: /hoon/map/mar
+ :: Mark for js source maps
+/? 310
+::
+=, eyre
+|_ mud=@
+++ grow
+ |%
+ ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))]
+ --
+++ grab
+ |% :: convert from
+ ++ mime |=([p=mite q=octs] (@t q.q))
+ ++ noun cord :: clam from %noun
+ --
+++ grad %mime
+--
diff --git a/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/ttf.hoon b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/ttf.hoon
new file mode 100644
index 00000000..2d29193e
--- /dev/null
+++ b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/ttf.hoon
@@ -0,0 +1,12 @@
+|_ dat=octs
+++ grow
+ |%
+ ++ mime [/font/ttf dat]
+ --
+++ grab
+ |%
+ ++ mime |=([=mite =octs] octs)
+ ++ noun octs
+ --
+++ grad %mime
+--
diff --git a/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/woff.hoon b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/woff.hoon
new file mode 100644
index 00000000..2933aea7
--- /dev/null
+++ b/stack_orchestrator/data/config/uniswap-interface/urbit-files/mar/woff.hoon
@@ -0,0 +1,12 @@
+|_ dat=octs
+++ grow
+ |%
+ ++ mime [/font/woff dat]
+ --
+++ grab
+ |%
+ ++ mime |=([=mite =octs] octs)
+ ++ noun octs
+ --
+++ grad %mime
+--
diff --git a/stack_orchestrator/data/config/urbit/deploy-app-to-remote-urbit.sh b/stack_orchestrator/data/config/urbit/deploy-app-to-remote-urbit.sh
new file mode 100755
index 00000000..18019949
--- /dev/null
+++ b/stack_orchestrator/data/config/urbit/deploy-app-to-remote-urbit.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# $1: Remote user host
+# $2: App name (eg. uniswap)
+# $3: Assets dir path (local) for app (eg. /home/user/myapp/urbit-files)
+# $4: Remote Urbit ship's pier dir path (eg. /home/user/zod)
+# $5: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
+# $6: Glob file hash (eg. 0vabcd)
+
+if [ "$#" -ne 6 ]; then
+ echo "Incorrect number of arguments"
+ echo "Usage: $0 "
+ exit 1
+fi
+
+remote_user_host="$1"
+app_name=$2
+app_assets_folder=$3
+remote_pier_folder="$4"
+glob_url="$5"
+glob_hash="$6"
+
+installation_script="./install-urbit-app.sh"
+
+# Copy over the assets to remote machine in a tmp dir
+remote_app_assets_folder=/tmp/urbit-app-assets/$app_name
+ssh "$remote_user_host" "mkdir -p $remote_app_assets_folder"
+scp -r $app_assets_folder/* $remote_user_host:$remote_app_assets_folder
+
+# Run the installation script
+ssh "$remote_user_host" "bash -s $app_name $remote_app_assets_folder '${glob_url}' $glob_hash $remote_pier_folder" < "$installation_script"
+
+# Remove the tmp assets dir
+ssh "$remote_user_host" "rm -rf $remote_app_assets_folder"
diff --git a/stack_orchestrator/data/config/urbit/deploy-app.sh b/stack_orchestrator/data/config/urbit/deploy-app.sh
new file mode 100755
index 00000000..bde6cd6f
--- /dev/null
+++ b/stack_orchestrator/data/config/urbit/deploy-app.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+if [ -n "$CERC_SCRIPT_DEBUG" ]; then
+ set -x
+fi
+
+if [ -z "$CERC_URBIT_APP" ]; then
+ echo "CERC_URBIT_APP not set, exiting"
+ exit 0
+fi
+
+echo "Creating Urbit application for ${CERC_URBIT_APP}"
+
+app_desk_dir=/urbit/zod/${CERC_URBIT_APP}
+if [ -d ${app_desk_dir} ]; then
+ echo "Desk dir already exists for ${CERC_URBIT_APP}, skipping deployment..."
+ exit 0
+fi
+
+app_build=/app-builds/${CERC_URBIT_APP}/build
+app_mark_files=/app-builds/${CERC_URBIT_APP}/mar
+app_docket_file=/app-builds/${CERC_URBIT_APP}/desk.docket-0
+
+echo "Reading app build from ${app_build}"
+echo "Reading additional mark files from ${app_mark_files}"
+echo "Reading docket file ${app_docket_file}"
+
+# Loop until the app's build appears
+while [ ! -d ${app_build} ]; do
+ echo "${CERC_URBIT_APP} app build not found, retrying in 5s..."
+ sleep 5
+done
+echo "Build found..."
+
+echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting the ${CERC_URBIT_APP} glob"
+echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading ${CERC_URBIT_APP} glob"
+ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT}
+ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT}
+
+# Fire curl requests to perform operations on the ship
+dojo () {
+ curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
+}
+
+hood () {
+ curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
+}
+
+# Create / mount the app's desk
+hood "merge %${CERC_URBIT_APP} our %landscape"
+hood "mount %${CERC_URBIT_APP}"
+
+# Copy over build to desk data dir
+cp -r ${app_build} ${app_desk_dir}
+
+# Copy over the additional mark files
+cp ${app_mark_files}/* ${app_desk_dir}/mar/
+
+rm "${app_desk_dir}/desk.bill"
+rm "${app_desk_dir}/desk.ship"
+
+# Commit changes and create a glob
+hood "commit %${CERC_URBIT_APP}"
+dojo "-landscape!make-glob %${CERC_URBIT_APP} /build"
+
+glob_file=$(ls -1 -c zod/.urb/put | head -1)
+echo "Created glob file: ${glob_file}"
+
+# Upload the glob file to IPFS
+echo "Uploading glob file to ${ipfs_host_endpoint}"
+upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add)
+glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//')
+
+glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}"
+glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/")
+
+echo "Glob file uploaded to IFPS:"
+echo "{ cid: ${glob_cid}, filename: ${glob_file} }"
+echo "{ url: ${glob_url}, hash: ${glob_hash} }"
+
+# Exit if the installation not required
+if [ "$CERC_ENABLE_APP_INSTALL" = "false" ]; then
+ echo "CERC_ENABLE_APP_INSTALL set to false, skipping app installation"
+ exit 0
+fi
+
+# Curl and wait for the glob to be hosted
+echo "Checking if glob file hosted at ${glob_url}"
+while true; do
+ response=$(curl -sL -w "%{http_code}" -o /dev/null "$glob_url")
+
+ if [ $response -eq 200 ]; then
+ echo "File found at $glob_url"
+ break # Exit the loop if the file is found
+ else
+ echo "File not found, retrying in a 5s..."
+ sleep 5
+ fi
+done
+
+# Replace the docket file for app
+# Substitue the glob URL and hash
+cp ${app_docket_file} ${app_desk_dir}/
+sed -i "s|REPLACE_WITH_GLOB_URL|${glob_url}|g; s|REPLACE_WITH_GLOB_HASH|${glob_hash}|g" ${app_desk_dir}/desk.docket-0
+
+# Commit changes and install the app
+hood "commit %${CERC_URBIT_APP}"
+hood "install our %${CERC_URBIT_APP}"
+
+echo "${CERC_URBIT_APP} app installed"
diff --git a/stack_orchestrator/data/config/urbit/install-urbit-app.sh b/stack_orchestrator/data/config/urbit/install-urbit-app.sh
new file mode 100755
index 00000000..444ff45e
--- /dev/null
+++ b/stack_orchestrator/data/config/urbit/install-urbit-app.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# $1: App name (eg. uniswap)
+# $2: Assets dir path (local) for app (eg. /home/user/myapp/urbit-files)
+# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
+# $4: Glob file hash (eg. 0vabcd)
+# $5: Urbit ship's pier dir (default: ./zod)
+
+if [ "$#" -lt 4 ]; then
+ echo "Insufficient arguments"
+ echo "Usage: $0 [/path/to/remote/pier/folder]"
+ exit 1
+fi
+
+app_name=$1
+app_mark_files=$2/mar
+app_docket_file=$2/desk.docket-0
+echo "Creating Urbit application for ${app_name}"
+echo "Reading additional mark files from ${app_mark_files}"
+echo "Reading docket file ${app_docket_file}"
+
+glob_url=$3
+glob_hash=$4
+echo "Using glob file from ${glob_url} with hash ${glob_hash}"
+
+# Default pier dir: ./zod
+# Default desk dir: ./zod/
+pier_dir="${5:-./zod}"
+app_desk_dir="${pier_dir}/${app_name}"
+echo "Using ${app_desk_dir} as the ${app_name} desk dir path"
+
+# Fire curl requests to perform operations on the ship
+dojo () {
+ curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
+}
+
+hood () {
+ curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
+}
+
+# Create / mount the app's desk
+hood "merge %${app_name} our %landscape"
+hood "mount %${app_name}"
+
+# Copy over the additional mark files
+cp ${app_mark_files}/* ${app_desk_dir}/mar/
+
+rm "${app_desk_dir}/desk.bill"
+rm "${app_desk_dir}/desk.ship"
+
+# Replace the docket file for app
+# Substitue the glob URL and hash
+cp ${app_docket_file} ${app_desk_dir}/
+sed -i "s|REPLACE_WITH_GLOB_URL|${glob_url}|g; s|REPLACE_WITH_GLOB_HASH|${glob_hash}|g" ${app_desk_dir}/desk.docket-0
+
+# Commit changes and install the app
+hood "commit %${app_name}"
+hood "install our %${app_name}"
+
+echo "${app_name} app installed"
diff --git a/stack_orchestrator/data/config/urbit/run-urbit-ship.sh b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh
index bb301c81..0b219803 100755
--- a/stack_orchestrator/data/config/urbit/run-urbit-ship.sh
+++ b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh
@@ -7,11 +7,13 @@ fi
pier_dir="/urbit/zod"
+# TODO: Bootstrap fake ship on the first run
+
# Run urbit ship in daemon mode
# Check if the directory exists
if [ -d "$pier_dir" ]; then
echo "Pier directory already exists, rebooting..."
- urbit -d zod
+ /urbit/zod/.run -d
else
echo "Creating a new fake ship..."
urbit -d -F zod
diff --git a/stack_orchestrator/data/stacks/proxy-server/README.md b/stack_orchestrator/data/stacks/proxy-server/README.md
index f0ccdb0f..294b01a9 100644
--- a/stack_orchestrator/data/stacks/proxy-server/README.md
+++ b/stack_orchestrator/data/stacks/proxy-server/README.md
@@ -47,7 +47,7 @@ laconic-so --stack proxy-server build-containers
```bash
# Whether to run the proxy server (Optional) (Default: true)
- ENABLE_PROXY=
+ CERC_ENABLE_PROXY=
# Upstream endpoint
# (Eg. https://api.example.org)
diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md
index 7499f5fc..6818dd94 100644
--- a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md
+++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md
@@ -33,7 +33,7 @@ laconic-so --stack uniswap-urbit-app deploy init --output uniswap-urbit-app-spec
### Ports
-Edit `network` in spec file to map container ports to same ports in host
+Edit `network` in spec file to map container ports to same ports in host:
```
...
@@ -43,12 +43,14 @@ network:
- '8080:80'
proxy-server:
- '4000:4000'
- ipfs-glob-host:
+ ipfs:
- '8081:8080'
- '5001:5001'
...
```
+Note: Skip the `ipfs` ports if need to use an externally running IPFS node
+
### Data volumes
Container data volumes are bind-mounted to specified paths in the host filesystem.
@@ -67,6 +69,9 @@ laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app
Inside the deployment directory, open the file `config.env` and set the following env variables:
```bash
+ # App to be installed (Do not change)
+ CERC_URBIT_APP=uniswap
+
# External RPC endpoints
# https://docs.infura.io/getting-started#2-create-an-api-key
CERC_INFURA_KEY=
@@ -79,9 +84,13 @@ Inside the deployment directory, open the file `config.env` and set the followin
# Optional
+ # Whether to enable app installation on Urbit
+ # (just builds and uploads the glob file if disabled) (Default: true)
+ CERC_ENABLE_APP_INSTALL=
+
# Whether to run the proxy GQL server
- # (Disable only if proxy not required to be run) (Default: true)
- ENABLE_PROXY=
+ # (disable only if proxy not required to be run) (Default: true)
+ CERC_ENABLE_PROXY=
# Proxy server configuration
# Used only if proxy is enabled
@@ -97,11 +106,11 @@ Inside the deployment directory, open the file `config.env` and set the followin
# IPFS configuration
# IFPS endpoint to host the glob file on
- # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node)
+ # (Default: http://ipfs:5001 pointing to in-stack IPFS node)
CERC_IPFS_GLOB_HOST_ENDPOINT=
# IFPS endpoint to fetch the glob file from
- # (Default: http://ipfs-glob-host:8080 pointing to in-stack IPFS node)
+ # (Default: http://ipfs:8080 pointing to in-stack IPFS node)
CERC_IPFS_SERVER_ENDPOINT=
```
diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml
index 3f77098f..31499406 100644
--- a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml
+++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml
@@ -10,4 +10,5 @@ containers:
pods:
- uniswap-interface
- proxy-server
- - uniswap-urbit
+ - fixturenet-urbit
+ - kubo
diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py
index d1b64743..424d112f 100644
--- a/stack_orchestrator/deploy/deploy.py
+++ b/stack_orchestrator/deploy/deploy.py
@@ -24,8 +24,6 @@ from importlib import resources
import subprocess
import click
from pathlib import Path
-from stack_orchestrator import constants
-from stack_orchestrator.opts import opts
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
from stack_orchestrator.deploy.deployer_factory import getDeployer
@@ -72,9 +70,6 @@ def create_deploy_context(
cluster,
env_file,
deploy_to) -> DeployCommandContext:
- # Extract the cluster name from the deployment, if we have one
- if deployment_context and cluster is None:
- cluster = deployment_context.get_cluster_id()
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
compose_project_name=cluster_context.cluster,
@@ -258,22 +253,6 @@ def _make_runtime_env(ctx):
return container_exec_env
-def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
- # Create default unique, stable cluster name from confile file path and stack name if provided
- if deployment:
- path = os.path.realpath(os.path.abspath(compose_dir))
- else:
- path = "internal"
- unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
- if opts.o.debug:
- print(f"pre-hash descriptor: {unique_cluster_descriptor}")
- hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
- cluster = f"{constants.cluster_name_prefix}{hash}"
- if opts.o.debug:
- print(f"Using cluster name: {cluster}")
- return cluster
-
-
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
@@ -291,9 +270,18 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
if cluster is None:
- cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
- else:
- _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
+ # Create default unique, stable cluster name from confile file path and stack name if provided
+ if deployment:
+ path = os.path.realpath(os.path.abspath(compose_dir))
+ else:
+ path = "internal"
+ unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
+ if ctx.debug:
+ print(f"pre-hash descriptor: {unique_cluster_descriptor}")
+ hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
+ cluster = f"laconic-{hash}"
+ if ctx.verbose:
+ print(f"Using cluster name: {cluster}")
# See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data
diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py
index cc70519e..8d74a62d 100644
--- a/stack_orchestrator/deploy/deployment.py
+++ b/stack_orchestrator/deploy/deployment.py
@@ -52,7 +52,7 @@ def make_deploy_context(ctx) -> DeployCommandContext:
context: DeploymentContext = ctx.obj
stack_file_path = context.get_stack_file()
env_file = context.get_env_file()
- cluster_name = context.get_cluster_id()
+ cluster_name = context.get_cluster_name()
if constants.deploy_to_key in context.spec.obj:
deployment_type = context.spec.obj[constants.deploy_to_key]
else:
diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py
index 27e32812..cbee4151 100644
--- a/stack_orchestrator/deploy/deployment_context.py
+++ b/stack_orchestrator/deploy/deployment_context.py
@@ -14,19 +14,15 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import hashlib
-import os
from pathlib import Path
from stack_orchestrator import constants
-from stack_orchestrator.util import get_yaml
from stack_orchestrator.deploy.stack import Stack
from stack_orchestrator.deploy.spec import Spec
class DeploymentContext:
deployment_dir: Path
- id: str
spec: Spec
stack: Stack
@@ -39,14 +35,9 @@ class DeploymentContext:
def get_env_file(self):
return self.deployment_dir.joinpath(constants.config_file_name)
- def get_deployment_file(self):
- return self.deployment_dir.joinpath(constants.deployment_file_name)
-
- def get_compose_dir(self):
- return self.deployment_dir.joinpath(constants.compose_dir_name)
-
- def get_cluster_id(self):
- return self.id
+ # TODO: implement me
+ def get_cluster_name(self):
+ return None
def init(self, dir):
self.deployment_dir = dir
@@ -54,16 +45,3 @@ class DeploymentContext:
self.spec.init_from_file(self.get_spec_file())
self.stack = Stack(self.spec.obj["stack"])
self.stack.init_from_file(self.get_stack_file())
- deployment_file_path = self.get_deployment_file()
- if deployment_file_path.exists():
- with deployment_file_path:
- obj = get_yaml().load(open(deployment_file_path, "r"))
- self.id = obj[constants.cluster_id_key]
- # Handle the case of a legacy deployment with no file
- # Code below is intended to match the output from _make_default_cluster_name()
- # TODO: remove when we no longer need to support legacy deployments
- else:
- path = os.path.realpath(os.path.abspath(self.get_compose_dir()))
- unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
- hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
- self.id = f"{constants.cluster_name_prefix}{hash}"
diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py
index 9eaea30c..88ce0b2a 100644
--- a/stack_orchestrator/deploy/deployment_create.py
+++ b/stack_orchestrator/deploy/deployment_create.py
@@ -20,7 +20,6 @@ from pathlib import Path
from typing import List
import random
from shutil import copy, copyfile, copytree
-from secrets import token_hex
import sys
from stack_orchestrator import constants
from stack_orchestrator.opts import opts
@@ -277,7 +276,7 @@ def init(ctx, config, config_file, kube_config, image_registry, output, map_port
# call it from other commands, bypassing the click decoration stuff
def init_operation(deploy_command_context, stack, deployer_type, config,
config_file, kube_config, image_registry, output, map_ports_to_host):
-
+ yaml = get_yaml()
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
if deployer_type == "k8s":
@@ -312,6 +311,8 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
new_config = config_file_variables
merged_config = {**new_config, **orig_config}
spec_file_content.update({"config": merged_config})
+ if opts.o.debug:
+ print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
ports = _get_mapped_ports(stack, map_ports_to_host)
spec_file_content.update({"network": {"ports": ports}})
@@ -323,11 +324,8 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
- if opts.o.debug:
- print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
-
with open(output, "w") as output_file:
- get_yaml().dump(spec_file_content, output_file)
+ yaml.dump(spec_file_content, output_file)
def _write_config_file(spec_file: Path, config_env_file: Path):
@@ -353,13 +351,6 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path):
copy(path, os.path.join(directory, os.path.basename(path)))
-def _create_deployment_file(deployment_dir: Path):
- deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name)
- cluster = f"{constants.cluster_name_prefix}{token_hex(8)}"
- with open(deployment_file_path, "w") as output_file:
- output_file.write(f"{constants.cluster_id_key}: {cluster}\n")
-
-
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@@ -392,7 +383,6 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file)))
- _create_deployment_file(deployment_dir_path)
# Copy any config varibles from the spec file into an env file suitable for compose
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
# Copy any k8s config file into the deployment dir