Merge branch 'main' into telackey/deployer
# Conflicts: # stack_orchestrator/deploy/compose/deploy_docker.py # stack_orchestrator/deploy/deploy.py # stack_orchestrator/deploy/deployment.py
This commit is contained in:
		
						commit
						2c5159ff93
					
				| @ -13,12 +13,16 @@ | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <http:#www.gnu.org/licenses/>. | ||||
| 
 | ||||
| cluster_name_prefix = "laconic-" | ||||
| stack_file_name = "stack.yml" | ||||
| spec_file_name = "spec.yml" | ||||
| config_file_name = "config.env" | ||||
| deployment_file_name = "deployment.yml" | ||||
| compose_dir_name = "compose" | ||||
| compose_deploy_type = "compose" | ||||
| k8s_kind_deploy_type = "k8s-kind" | ||||
| k8s_deploy_type = "k8s" | ||||
| cluster_id_key = "cluster-id" | ||||
| kube_config_key = "kube-config" | ||||
| deploy_to_key = "deploy-to" | ||||
| network_key = "network" | ||||
|  | ||||
| @ -0,0 +1,22 @@ | ||||
| version: "3.2" | ||||
| 
 | ||||
| services: | ||||
|   proxy-server: | ||||
|     image: cerc/watcher-ts:local | ||||
|     restart: on-failure | ||||
|     working_dir: /app/packages/cli | ||||
|     environment: | ||||
|       ENABLE_PROXY: ${ENABLE_PROXY:-true} | ||||
|       PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM} | ||||
|       PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER} | ||||
|     command: ["sh", "-c", "./run.sh"] | ||||
|     volumes: | ||||
|       - ../config/proxy-server/run.sh:/app/packages/cli/run.sh | ||||
|     ports: | ||||
|       - "4000" | ||||
|     healthcheck: | ||||
|       test: ["CMD", "nc", "-v", "localhost", "4000"] | ||||
|       interval: 20s | ||||
|       timeout: 5s | ||||
|       retries: 15 | ||||
|       start_period: 10s | ||||
| @ -12,38 +12,6 @@ services: | ||||
|       - app_builds:/app-builds | ||||
|       - ../config/uniswap-interface/build-app.sh:/app/build-app.sh | ||||
| 
 | ||||
|   uniswap-glob-host: | ||||
|     image: cerc/urbit-globs-host:local | ||||
|     restart: unless-stopped | ||||
|     depends_on: | ||||
|       uniswap-interface: | ||||
|         condition: service_completed_successfully | ||||
|     command: ["./host-uniswap-glob.sh"] | ||||
|     volumes: | ||||
|       - app_globs:/app-globs | ||||
|       - ../config/uniswap-interface/host-uniswap-glob.sh:/app/host-uniswap-glob.sh | ||||
|     ports: | ||||
|       - "3000" | ||||
|     healthcheck: | ||||
|       test: ["CMD", "nc", "-v", "localhost", "3000"] | ||||
|       interval: 20s | ||||
|       timeout: 5s | ||||
|       retries: 15 | ||||
|       start_period: 10s | ||||
| 
 | ||||
|   uniswap-gql-proxy: | ||||
|     image: cerc/uniswap-interface:local | ||||
|     restart: on-failure | ||||
|     command: ["bash", "-c", "yarn proxy-gql"] | ||||
|     ports: | ||||
|       - "4000" | ||||
|     healthcheck: | ||||
|       test: ["CMD", "nc", "-v", "localhost", "4000"] | ||||
|       interval: 20s | ||||
|       timeout: 5s | ||||
|       retries: 15 | ||||
|       start_period: 10s | ||||
| 
 | ||||
| volumes: | ||||
|   app_builds: | ||||
|   app_globs: | ||||
|  | ||||
| @ -4,6 +4,9 @@ services: | ||||
|   urbit-fake-ship: | ||||
|     restart: unless-stopped | ||||
|     image: tloncorp/vere | ||||
|     environment: | ||||
|       CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs-glob-host:5001} | ||||
|       CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs-glob-host:8080} | ||||
|     entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"] | ||||
|     volumes: | ||||
|       - urbit_data:/urbit | ||||
| @ -20,7 +23,24 @@ services: | ||||
|       retries: 15 | ||||
|       start_period: 10s | ||||
| 
 | ||||
|   ipfs-glob-host: | ||||
|     image: ipfs/kubo:master-2023-02-20-714a968 | ||||
|     volumes: | ||||
|       - ipfs-import:/import | ||||
|       - ipfs-data:/data/ipfs | ||||
|     ports: | ||||
|       - "8080" | ||||
|       - "5001" | ||||
|     healthcheck: | ||||
|       test: ["CMD", "nc", "-v", "localhost", "5001"] | ||||
|       interval: 20s | ||||
|       timeout: 5s | ||||
|       retries: 15 | ||||
|       start_period: 10s | ||||
| 
 | ||||
| volumes: | ||||
|   urbit_data: | ||||
|   app_builds: | ||||
|   app_globs: | ||||
|   ipfs-import: | ||||
|   ipfs-data: | ||||
|  | ||||
							
								
								
									
										9
									
								
								stack_orchestrator/data/config/proxy-server/run.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										9
									
								
								stack_orchestrator/data/config/proxy-server/run.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,9 @@ | ||||
| #!/bin/sh | ||||
| 
 | ||||
| if [ "$ENABLE_PROXY" = "true" ]; then | ||||
|   echo "Proxy server enabled" | ||||
|   yarn proxy | ||||
| else | ||||
|   echo "Proxy server disabled, exiting" | ||||
|   exit 0 | ||||
| fi | ||||
| @ -4,6 +4,11 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then | ||||
|   set -x | ||||
| fi | ||||
| 
 | ||||
| echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting globs" | ||||
| echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading glob files" | ||||
| ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT} | ||||
| ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT} | ||||
| 
 | ||||
| uniswap_app_build='/app-builds/uniswap/build' | ||||
| uniswap_desk_dir='/urbit/zod/uniswap' | ||||
| 
 | ||||
| @ -96,15 +101,17 @@ rm "${uniswap_desk_dir}/desk.ship" | ||||
| hood "commit %uniswap" | ||||
| dojo "-landscape!make-glob %uniswap /build" | ||||
| 
 | ||||
| echo "Copying over glob file to mounted volume" | ||||
| mkdir -p /app-globs/uniswap | ||||
| cp /urbit/zod/.urb/put/* /app-globs/uniswap/ | ||||
| 
 | ||||
| glob_file=$(ls -1 -c zod/.urb/put | head -1) | ||||
| echo "Glob filename: ${glob_file}" | ||||
| echo "Created glob file: ${glob_file}" | ||||
| 
 | ||||
| upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add) | ||||
| glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//') | ||||
| 
 | ||||
| echo "Glob file uploaded to IFPS:" | ||||
| echo "{ cid: ${glob_cid}, filename: ${glob_file} }" | ||||
| 
 | ||||
| # Curl and wait for the glob to be hosted | ||||
| glob_url="http://uniswap-glob-host:3000/${glob_file}" | ||||
| glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}" | ||||
| 
 | ||||
| echo "Checking if glob file hosted at ${glob_url}" | ||||
| while true; do | ||||
| @ -128,7 +135,7 @@ cat << EOF > "${uniswap_desk_dir}/desk.docket-0" | ||||
|     color+0xcd.75df | ||||
|     image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' | ||||
|     base+'uniswap' | ||||
|     glob-http+['http://uniswap-glob-host:3000/${glob_file}' ${glob_hash}] | ||||
|     glob-http+['${glob_url}' ${glob_hash}] | ||||
|     version+[0 0 1] | ||||
|     website+'https://uniswap.org/' | ||||
|     license+'MIT' | ||||
|  | ||||
| @ -1,23 +0,0 @@ | ||||
| #!/bin/bash | ||||
| 
 | ||||
| set -e | ||||
| if [ -n "$CERC_SCRIPT_DEBUG" ]; then | ||||
|   set -x | ||||
| fi | ||||
| 
 | ||||
| # Use config from mounted volume (when running web-app along with watcher stack) | ||||
| echo "Waiting for uniswap app glob" | ||||
| while [ ! -d /app-globs/uniswap ]; do | ||||
|   echo "Glob directory not found, retrying in 5 seconds..." | ||||
|   sleep 5 | ||||
| done | ||||
| 
 | ||||
| 
 | ||||
| # Copy to a new globs directory | ||||
| mkdir -p globs | ||||
| cp -r /app-globs/uniswap/* ./globs | ||||
| 
 | ||||
| # Serve the glob file | ||||
| cd globs | ||||
| echo "Hosting glob file at port 3000" | ||||
| python3 -m http.server 3000 --bind 0.0.0.0 | ||||
| @ -1,21 +1,22 @@ | ||||
| #!/bin/bash | ||||
| 
 | ||||
| # $1: Glob file URL (eg. https://xyz.com/glob-abcd.glob) | ||||
| # $2: Uniswap desk dir (default: ./zod/uniswap) | ||||
| # $1: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) | ||||
| # $2: Glob file hash (eg. 0vabcd) | ||||
| # $3: Urbit ship's pier dir (default: ./zod) | ||||
| 
 | ||||
| if [ -z "$1" ]; then | ||||
|   echo "Glob file URL arg not provided" | ||||
| if [ "$#" -lt 2 ]; then | ||||
|   echo "Insufficient arguments" | ||||
|   exit 0 | ||||
| fi | ||||
| 
 | ||||
| glob_url=$1 | ||||
| glob_file=$(basename "$glob_url") | ||||
| glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") | ||||
| echo "Using glob file ${glob_file}" | ||||
| glob_hash=$2 | ||||
| echo "Using glob file from ${glob_url} with hash ${glob_hash}" | ||||
| 
 | ||||
| # Default pier dir: ./zod | ||||
| # Default desk dir: ./zod/uniswap | ||||
| uniswap_desk_dir="${2:-./zod/uniswap}" | ||||
| 
 | ||||
| pier_dir="${3:-./zod}" | ||||
| uniswap_desk_dir="${pier_dir}/uniswap" | ||||
| echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path" | ||||
| 
 | ||||
| # Fire curl requests to perform operations on the ship | ||||
|  | ||||
| @ -1,18 +1,21 @@ | ||||
| #!/bin/bash | ||||
| 
 | ||||
| # $1: Remote user host | ||||
| # $2: Path to run the app installation in (where urbit ship dir is located) | ||||
| # $3: Glob file URL (eg. https://xyz.com/glob-abcd.glob) | ||||
| # $2: Remote Urbit ship's pier dir path (eg. /home/user/zod) | ||||
| # $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) | ||||
| # $4: Glob file hash (eg. 0vabcd) | ||||
| 
 | ||||
| if [ "$#" -ne 3 ]; then | ||||
|   echo "Usage: $0 <username@remote_host> </path/to/remote/folder> <glob_url>" | ||||
| if [ "$#" -ne 4 ]; then | ||||
|   echo "Incorrect number of arguments" | ||||
|   echo "Usage: $0 <username@remote_host> </path/to/remote/pier/folder> <glob_url> <glob_hash>" | ||||
|   exit 1 | ||||
| fi | ||||
| 
 | ||||
| remote_user_host="$1" | ||||
| remote_folder="$2" | ||||
| remote_pier_folder="$2" | ||||
| glob_url="$3" | ||||
| glob_hash="$4" | ||||
| 
 | ||||
| installation_script="./install-uniswap-app.sh" | ||||
| 
 | ||||
| ssh "$remote_user_host" "cd $remote_folder && bash -s $glob_url" < "$installation_script" | ||||
| ssh "$remote_user_host" "bash -s $glob_url $glob_hash $remote_pier_folder" < "$installation_script" | ||||
|  | ||||
							
								
								
									
										79
									
								
								stack_orchestrator/data/stacks/proxy-server/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								stack_orchestrator/data/stacks/proxy-server/README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,79 @@ | ||||
| # Proxy Server | ||||
| 
 | ||||
| Instructions to setup and deploy a HTTP proxy server | ||||
| 
 | ||||
| ## Setup | ||||
| 
 | ||||
| Clone required repository: | ||||
| 
 | ||||
| ```bash | ||||
| laconic-so --stack proxy-server setup-repositories --pull | ||||
| 
 | ||||
| # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command | ||||
| ``` | ||||
| 
 | ||||
| Build the container image: | ||||
| 
 | ||||
| ```bash | ||||
| laconic-so --stack proxy-server build-containers | ||||
| ``` | ||||
| 
 | ||||
| ## Create a deployment | ||||
| 
 | ||||
| * First, create a spec file for the deployment, which will allow mapping the stack's ports and volumes to the host: | ||||
| 
 | ||||
|   ```bash | ||||
|   laconic-so --stack proxy-server deploy init --output proxy-server-spec.yml | ||||
|   ``` | ||||
| 
 | ||||
| * Edit `network` in spec file to map container ports to same ports in host: | ||||
| 
 | ||||
|   ```yml | ||||
|   ... | ||||
|   network: | ||||
|     ports: | ||||
|       proxy-server: | ||||
|         - '4000:4000' | ||||
|   ... | ||||
|   ``` | ||||
| 
 | ||||
| * Once you've made any needed changes to the spec file, create a deployment from it: | ||||
| 
 | ||||
|   ```bash | ||||
|   laconic-so --stack proxy-server deploy create --spec-file proxy-server-spec.yml --deployment-dir proxy-server-deployment | ||||
|   ``` | ||||
| 
 | ||||
| * Inside the deployment directory, open the file `config.env` and set the following env variables: | ||||
| 
 | ||||
|   ```bash | ||||
|   # Whether to run the proxy server (Optional) (Default: true) | ||||
|   ENABLE_PROXY= | ||||
| 
 | ||||
|   # Upstream endpoint | ||||
|   # (Eg. https://api.example.org) | ||||
|   CERC_PROXY_UPSTREAM= | ||||
| 
 | ||||
|   # Origin header to be used (Optional) | ||||
|   # (Eg. https://app.example.org) | ||||
|   CERC_PROXY_ORIGIN_HEADER= | ||||
|   ``` | ||||
| 
 | ||||
| ## Start the stack | ||||
| 
 | ||||
| Start the deployment: | ||||
| 
 | ||||
| ```bash | ||||
| laconic-so deployment --dir proxy-server-deployment start | ||||
| ``` | ||||
| 
 | ||||
| * List and check the health status of the container using `docker ps` | ||||
| 
 | ||||
| * The proxy server will now be listening at http://localhost:4000 | ||||
| 
 | ||||
| ## Clean up | ||||
| 
 | ||||
| To stop the service running in background: | ||||
| 
 | ||||
| ```bash | ||||
| laconic-so deployment --dir proxy-server-deployment stop | ||||
| ``` | ||||
							
								
								
									
										8
									
								
								stack_orchestrator/data/stacks/proxy-server/stack.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								stack_orchestrator/data/stacks/proxy-server/stack.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,8 @@ | ||||
| version: "0.1" | ||||
| name: proxy-server | ||||
| repos: | ||||
|   - github.com/cerc-io/watcher-ts@v0.2.78 | ||||
| containers: | ||||
|   - cerc/watcher-ts | ||||
| pods: | ||||
|   - proxy-server | ||||
| @ -41,10 +41,11 @@ network: | ||||
|   ports: | ||||
|     urbit-fake-ship: | ||||
|       - '8080:80' | ||||
|     uniswap-glob-host: | ||||
|       - '3000:3000' | ||||
|     uniswap-gql-proxy: | ||||
|     proxy-server: | ||||
|       - '4000:4000' | ||||
|     ipfs-glob-host: | ||||
|       - '8081:8080' | ||||
|       - '5001:5001' | ||||
| ... | ||||
| ``` | ||||
| 
 | ||||
| @ -63,7 +64,7 @@ laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app | ||||
| 
 | ||||
| ## Set env variables | ||||
| 
 | ||||
| Inside the deployment directory, open the file `config.env` and add variable for infura key : | ||||
| Inside the deployment directory, open the file `config.env` and set the following env variables: | ||||
| 
 | ||||
|   ```bash | ||||
|   # External RPC endpoints | ||||
| @ -72,8 +73,36 @@ Inside the deployment directory, open the file `config.env` and add variable for | ||||
| 
 | ||||
|   # Uniswap API GQL Endpoint | ||||
|   # Set this to GQL proxy server endpoint for uniswap app | ||||
|   # (Eg. http://localhost:4000/graphql) | ||||
|   # (Eg. http://localhost:4000/v1/graphql) | ||||
|   # (Eg. https://abc.xyz.com/v1/graphql) | ||||
|   CERC_UNISWAP_GQL= | ||||
| 
 | ||||
|   # Optional | ||||
| 
 | ||||
|   # Whether to run the proxy GQL server | ||||
|   # (Disable only if proxy not required to be run) (Default: true) | ||||
|   ENABLE_PROXY= | ||||
| 
 | ||||
|   # Proxy server configuration | ||||
|   # Used only if proxy is enabled | ||||
| 
 | ||||
|   # Upstream API URL | ||||
|   # (Eg. https://api.example.org) | ||||
|   CERC_PROXY_UPSTREAM=https://api.uniswap.org | ||||
| 
 | ||||
|   # Origin header to be used in the proxy | ||||
|   # (Eg. https://app.example.org) | ||||
|   CERC_PROXY_ORIGIN_HEADER=https://app.uniswap.org | ||||
| 
 | ||||
|   # IPFS configuration | ||||
| 
 | ||||
|   # IFPS endpoint to host the glob file on | ||||
|   # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) | ||||
|   CERC_IPFS_GLOB_HOST_ENDPOINT= | ||||
| 
 | ||||
|   # IFPS endpoint to fetch the glob file from | ||||
|   # (Default: http://ipfs-glob-host:8080 pointing to in-stack IPFS node) | ||||
|   CERC_IPFS_SERVER_ENDPOINT= | ||||
|   ``` | ||||
| 
 | ||||
| ## Start the stack | ||||
| @ -109,7 +138,7 @@ laconic-so deployment --dir uniswap-urbit-app-deployment start | ||||
| 
 | ||||
| ## Clean up | ||||
| 
 | ||||
| To stop all uniswap-urbit-app services running in the background, while preserving chain data: | ||||
| To stop all uniswap-urbit-app services running in the background, while preserving data: | ||||
| 
 | ||||
| ```bash | ||||
| laconic-so deployment --dir uniswap-urbit-app-deployment stop | ||||
|  | ||||
| @ -2,9 +2,12 @@ version: "0.1" | ||||
| name: uniswap-urbit-app | ||||
| repos: | ||||
|   - github.com/cerc-io/uniswap-interface@laconic # TODO: Use release | ||||
|   - github.com/cerc-io/watcher-ts@v0.2.78 | ||||
| containers: | ||||
|   - cerc/uniswap-interface | ||||
|   - cerc/watcher-ts | ||||
|   - cerc/urbit-globs-host | ||||
| pods: | ||||
|   - uniswap-interface | ||||
|   - proxy-server | ||||
|   - uniswap-urbit | ||||
|  | ||||
| @ -46,6 +46,13 @@ class DockerDeployer(Deployer): | ||||
|         except DockerException as e: | ||||
|             raise DeployerException(e) | ||||
| 
 | ||||
|     def status(self): | ||||
|         try: | ||||
|             for p in self.docker.compose.ps(): | ||||
|                 print(f"{p.name}\t{p.state.status}") | ||||
|         except DockerException as e: | ||||
|             raise DeployerException(e) | ||||
| 
 | ||||
|     def ps(self): | ||||
|         try: | ||||
|             return self.docker.compose.ps() | ||||
|  | ||||
| @ -24,6 +24,8 @@ from importlib import resources | ||||
| import subprocess | ||||
| import click | ||||
| from pathlib import Path | ||||
| from stack_orchestrator import constants | ||||
| from stack_orchestrator.opts import opts | ||||
| from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path | ||||
| from stack_orchestrator.deploy.deployer import Deployer, DeployerException | ||||
| from stack_orchestrator.deploy.deployer_factory import getDeployer | ||||
| @ -70,6 +72,9 @@ def create_deploy_context( | ||||
|         cluster, | ||||
|         env_file, | ||||
|         deploy_to) -> DeployCommandContext: | ||||
|     # Extract the cluster name from the deployment, if we have one | ||||
|     if deployment_context and cluster is None: | ||||
|         cluster = deployment_context.get_cluster_id() | ||||
|     cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) | ||||
|     deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, | ||||
|                            compose_project_name=cluster_context.cluster, | ||||
| @ -107,6 +112,14 @@ def down_operation(ctx, delete_volumes, extra_args_list): | ||||
|         ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) | ||||
| 
 | ||||
| 
 | ||||
| def status_operation(ctx): | ||||
|     global_context = ctx.parent.parent.obj | ||||
|     if not global_context.dry_run: | ||||
|         if global_context.verbose: | ||||
|             print("Running compose status") | ||||
|         ctx.obj.deployer.status() | ||||
| 
 | ||||
| 
 | ||||
| def update_operation(ctx): | ||||
|     global_context = ctx.parent.parent.obj | ||||
|     if not global_context.dry_run: | ||||
| @ -261,6 +274,22 @@ def _make_runtime_env(ctx): | ||||
|     return container_exec_env | ||||
| 
 | ||||
| 
 | ||||
| def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude): | ||||
|     # Create default unique, stable cluster name from confile file path and stack name if provided | ||||
|     if deployment: | ||||
|         path = os.path.realpath(os.path.abspath(compose_dir)) | ||||
|     else: | ||||
|         path = "internal" | ||||
|     unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" | ||||
|     if opts.o.debug: | ||||
|         print(f"pre-hash descriptor: {unique_cluster_descriptor}") | ||||
|     hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] | ||||
|     cluster = f"{constants.cluster_name_prefix}{hash}" | ||||
|     if opts.o.debug: | ||||
|         print(f"Using cluster name: {cluster}") | ||||
|     return cluster | ||||
| 
 | ||||
| 
 | ||||
| # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack | ||||
| def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): | ||||
| 
 | ||||
| @ -278,18 +307,9 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): | ||||
|         compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose") | ||||
| 
 | ||||
|     if cluster is None: | ||||
|         # Create default unique, stable cluster name from confile file path and stack name if provided | ||||
|         if deployment: | ||||
|             path = os.path.realpath(os.path.abspath(compose_dir)) | ||||
|         else: | ||||
|             path = "internal" | ||||
|         unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" | ||||
|         if ctx.debug: | ||||
|             print(f"pre-hash descriptor: {unique_cluster_descriptor}") | ||||
|         hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] | ||||
|         cluster = f"laconic-{hash}" | ||||
|         if ctx.verbose: | ||||
|             print(f"Using cluster name: {cluster}") | ||||
|         cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) | ||||
|     else: | ||||
|         _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) | ||||
| 
 | ||||
|     # See: https://stackoverflow.com/a/20885799/1701505 | ||||
|     from stack_orchestrator import data | ||||
|  | ||||
| @ -35,6 +35,10 @@ class Deployer(ABC): | ||||
|     def ps(self): | ||||
|         pass | ||||
| 
 | ||||
|     @abstractmethod | ||||
|     def status(self): | ||||
|         pass | ||||
| 
 | ||||
|     @abstractmethod | ||||
|     def port(self, service, private_port): | ||||
|         pass | ||||
|  | ||||
| @ -20,6 +20,8 @@ from stack_orchestrator import constants | ||||
| from stack_orchestrator.deploy.images import push_images_operation | ||||
| from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation | ||||
| from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context, update_operation | ||||
| from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation | ||||
| from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context | ||||
| from stack_orchestrator.deploy.deploy_types import DeployCommandContext | ||||
| from stack_orchestrator.deploy.deployment_context import DeploymentContext | ||||
| from stack_orchestrator.deploy.webapp import update_from_registry as webapp_update | ||||
| @ -53,7 +55,7 @@ def make_deploy_context(ctx) -> DeployCommandContext: | ||||
|     context: DeploymentContext = ctx.obj | ||||
|     stack_file_path = context.get_stack_file() | ||||
|     env_file = context.get_env_file() | ||||
|     cluster_name = context.get_cluster_name() | ||||
|     cluster_name = context.get_cluster_id() | ||||
|     if constants.deploy_to_key in context.spec.obj: | ||||
|         deployment_type = context.spec.obj[constants.deploy_to_key] | ||||
|     else: | ||||
| @ -148,7 +150,8 @@ def logs(ctx, tail, follow, extra_args): | ||||
| @command.command() | ||||
| @click.pass_context | ||||
| def status(ctx): | ||||
|     print(f"Context: {ctx.parent.obj}") | ||||
|     ctx.obj = make_deploy_context(ctx) | ||||
|     status_operation(ctx) | ||||
| 
 | ||||
| 
 | ||||
| @command.command() | ||||
|  | ||||
| @ -14,15 +14,19 @@ | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <http:#www.gnu.org/licenses/>. | ||||
| 
 | ||||
| import hashlib | ||||
| import os | ||||
| from pathlib import Path | ||||
| 
 | ||||
| from stack_orchestrator import constants | ||||
| from stack_orchestrator.util import get_yaml | ||||
| from stack_orchestrator.deploy.stack import Stack | ||||
| from stack_orchestrator.deploy.spec import Spec | ||||
| 
 | ||||
| 
 | ||||
| class DeploymentContext: | ||||
|     deployment_dir: Path | ||||
|     id: str | ||||
|     spec: Spec | ||||
|     stack: Stack | ||||
| 
 | ||||
| @ -35,9 +39,14 @@ class DeploymentContext: | ||||
|     def get_env_file(self): | ||||
|         return self.deployment_dir.joinpath(constants.config_file_name) | ||||
| 
 | ||||
|     # TODO: implement me | ||||
|     def get_cluster_name(self): | ||||
|         return None | ||||
|     def get_deployment_file(self): | ||||
|         return self.deployment_dir.joinpath(constants.deployment_file_name) | ||||
| 
 | ||||
|     def get_compose_dir(self): | ||||
|         return self.deployment_dir.joinpath(constants.compose_dir_name) | ||||
| 
 | ||||
|     def get_cluster_id(self): | ||||
|         return self.id | ||||
| 
 | ||||
|     def init(self, dir): | ||||
|         self.deployment_dir = dir | ||||
| @ -45,3 +54,16 @@ class DeploymentContext: | ||||
|         self.spec.init_from_file(self.get_spec_file()) | ||||
|         self.stack = Stack(self.spec.obj["stack"]) | ||||
|         self.stack.init_from_file(self.get_stack_file()) | ||||
|         deployment_file_path = self.get_deployment_file() | ||||
|         if deployment_file_path.exists(): | ||||
|             with deployment_file_path: | ||||
|                 obj = get_yaml().load(open(deployment_file_path, "r")) | ||||
|                 self.id = obj[constants.cluster_id_key] | ||||
|         # Handle the case of a legacy deployment with no file | ||||
|         # Code below is intended to match the output from _make_default_cluster_name() | ||||
|         # TODO: remove when we no longer need to support legacy deployments | ||||
|         else: | ||||
|             path = os.path.realpath(os.path.abspath(self.get_compose_dir())) | ||||
|             unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None" | ||||
|             hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] | ||||
|             self.id = f"{constants.cluster_name_prefix}{hash}" | ||||
|  | ||||
| @ -20,6 +20,7 @@ from pathlib import Path | ||||
| from typing import List | ||||
| import random | ||||
| from shutil import copy, copyfile, copytree | ||||
| from secrets import token_hex | ||||
| import sys | ||||
| from stack_orchestrator import constants | ||||
| from stack_orchestrator.opts import opts | ||||
| @ -276,7 +277,7 @@ def init(ctx, config, config_file, kube_config, image_registry, output, map_port | ||||
| # call it from other commands, bypassing the click decoration stuff | ||||
| def init_operation(deploy_command_context, stack, deployer_type, config, | ||||
|                    config_file, kube_config, image_registry, output, map_ports_to_host): | ||||
|     yaml = get_yaml() | ||||
| 
 | ||||
|     default_spec_file_content = call_stack_deploy_init(deploy_command_context) | ||||
|     spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} | ||||
|     if deployer_type == "k8s": | ||||
| @ -311,8 +312,6 @@ def init_operation(deploy_command_context, stack, deployer_type, config, | ||||
|             new_config = config_file_variables | ||||
|             merged_config = {**new_config, **orig_config} | ||||
|             spec_file_content.update({"config": merged_config}) | ||||
|     if opts.o.debug: | ||||
|         print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") | ||||
| 
 | ||||
|     ports = _get_mapped_ports(stack, map_ports_to_host) | ||||
|     spec_file_content.update({"network": {"ports": ports}}) | ||||
| @ -324,8 +323,11 @@ def init_operation(deploy_command_context, stack, deployer_type, config, | ||||
|             volume_descriptors[named_volume] = f"./data/{named_volume}" | ||||
|         spec_file_content["volumes"] = volume_descriptors | ||||
| 
 | ||||
|     if opts.o.debug: | ||||
|         print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") | ||||
| 
 | ||||
|     with open(output, "w") as output_file: | ||||
|         yaml.dump(spec_file_content, output_file) | ||||
|         get_yaml().dump(spec_file_content, output_file) | ||||
| 
 | ||||
| 
 | ||||
| def _write_config_file(spec_file: Path, config_env_file: Path): | ||||
| @ -351,6 +353,13 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): | ||||
|         copy(path, os.path.join(directory, os.path.basename(path))) | ||||
| 
 | ||||
| 
 | ||||
| def _create_deployment_file(deployment_dir: Path): | ||||
|     deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name) | ||||
|     cluster = f"{constants.cluster_name_prefix}{token_hex(8)}" | ||||
|     with open(deployment_file_path, "w") as output_file: | ||||
|         output_file.write(f"{constants.cluster_id_key}: {cluster}\n") | ||||
| 
 | ||||
| 
 | ||||
| @click.command() | ||||
| @click.option("--spec-file", required=True, help="Spec file to use to create this deployment") | ||||
| @click.option("--deployment-dir", help="Create deployment files in this directory") | ||||
| @ -383,6 +392,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw | ||||
|     # Copy spec file and the stack file into the deployment dir | ||||
|     copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) | ||||
|     copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) | ||||
|     _create_deployment_file(deployment_dir_path) | ||||
|     # Copy any config varibles from the spec file into an env file suitable for compose | ||||
|     _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) | ||||
|     # Copy any k8s config file into the deployment dir | ||||
|  | ||||
| @ -28,6 +28,12 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext | ||||
| from stack_orchestrator.util import error_exit | ||||
| 
 | ||||
| 
 | ||||
| class AttrDict(dict): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(AttrDict, self).__init__(*args, **kwargs) | ||||
|         self.__dict__ = self | ||||
| 
 | ||||
| 
 | ||||
| def _check_delete_exception(e: client.exceptions.ApiException): | ||||
|     if e.status == 404: | ||||
|         if opts.o.debug: | ||||
| @ -74,6 +80,7 @@ class K8sDeployer(Deployer): | ||||
|         self.core_api = client.CoreV1Api() | ||||
|         self.networking_api = client.NetworkingV1Api() | ||||
|         self.apps_api = client.AppsV1Api() | ||||
|         self.custom_obj_api = client.CustomObjectsApi() | ||||
| 
 | ||||
|     def up(self, detach, services): | ||||
| 
 | ||||
| @ -204,15 +211,82 @@ class K8sDeployer(Deployer): | ||||
|             # Destroy the kind cluster | ||||
|             destroy_cluster(self.kind_cluster_name) | ||||
| 
 | ||||
|     def ps(self): | ||||
|     def status(self): | ||||
|         self.connect_api() | ||||
|         # Call whatever API we need to get the running container list | ||||
|         ret = self.core_api.list_pod_for_all_namespaces(watch=False) | ||||
|         if ret.items: | ||||
|             for i in ret.items: | ||||
|                 print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) | ||||
|         ret = self.core_api.list_node(pretty=True, watch=False) | ||||
|         return [] | ||||
|         all_pods = self.core_api.list_pod_for_all_namespaces(watch=False) | ||||
|         pods = [] | ||||
| 
 | ||||
|         if all_pods.items: | ||||
|             for p in all_pods.items: | ||||
|                 if self.cluster_info.app_name in p.metadata.name: | ||||
|                     pods.append(p) | ||||
| 
 | ||||
|         if not pods: | ||||
|             return | ||||
| 
 | ||||
|         hostname = "?" | ||||
|         ip = "?" | ||||
|         tls = "?" | ||||
|         try: | ||||
|             ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace, | ||||
|                                                                   name=self.cluster_info.get_ingress().metadata.name) | ||||
| 
 | ||||
|             cert = self.custom_obj_api.get_namespaced_custom_object( | ||||
|                 group="cert-manager.io", | ||||
|                 version="v1", | ||||
|                 namespace=self.k8s_namespace, | ||||
|                 plural="certificates", | ||||
|                 name=ingress.spec.tls[0].secret_name | ||||
|             ) | ||||
| 
 | ||||
|             hostname = ingress.spec.tls[0].hosts[0] | ||||
|             ip = ingress.status.load_balancer.ingress[0].ip | ||||
|             tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"]) | ||||
|         except:  # noqa: E722 | ||||
|             pass | ||||
| 
 | ||||
|         print("Ingress:") | ||||
|         print("\tHostname:", hostname) | ||||
|         print("\tIP:", ip) | ||||
|         print("\tTLS:", tls) | ||||
|         print("") | ||||
|         print("Pods:") | ||||
| 
 | ||||
|         for p in pods: | ||||
|             if p.metadata.deletion_timestamp: | ||||
|                 print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})") | ||||
|             else: | ||||
|                 print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})") | ||||
| 
 | ||||
|     def ps(self): | ||||
|         self.connect_api() | ||||
|         pods = self.core_api.list_pod_for_all_namespaces(watch=False) | ||||
| 
 | ||||
|         ret = [] | ||||
| 
 | ||||
|         for p in pods.items: | ||||
|             if self.cluster_info.app_name in p.metadata.name: | ||||
|                 pod_ip = p.status.pod_ip | ||||
|                 ports = AttrDict() | ||||
|                 for c in p.spec.containers: | ||||
|                     if c.ports: | ||||
|                         for prt in c.ports: | ||||
|                             ports[str(prt.container_port)] = [AttrDict({ | ||||
|                                 "HostIp": pod_ip, | ||||
|                                 "HostPort": prt.container_port | ||||
|                             })] | ||||
| 
 | ||||
|                 ret.append(AttrDict({ | ||||
|                     "id": f"{p.metadata.namespace}/{p.metadata.name}", | ||||
|                     "name": p.metadata.name, | ||||
|                     "namespace": p.metadata.namespace, | ||||
|                     "network_settings": AttrDict({ | ||||
|                         "ports": ports | ||||
|                     }) | ||||
|                 })) | ||||
| 
 | ||||
|         return ret | ||||
| 
 | ||||
|     def port(self, service, private_port): | ||||
|         # Since we handle the port mapping, need to figure out where this comes from | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user