forked from cerc-io/stack-orchestrator
Merge branch 'main' into ci-test
This commit is contained in:
commit
fbd340bd34
54
.gitea/workflows/test-container-registry.yml
Normal file
54
.gitea/workflows/test-container-registry.yml
Normal file
@ -0,0 +1,54 @@
|
||||
name: Container Registry Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: '*'
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/test-container-registry'
|
||||
- '.gitea/workflows/test-container-registry.yml'
|
||||
- 'tests/container-registry/run-test.sh'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '6 19 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run contaier registry hosting test on kind/k8s"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
# At present the stock setup-python action fails on Linux/aarch64
|
||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Print Python version"
|
||||
run: python3 --version
|
||||
- name: "Install shiv"
|
||||
run: pip install shiv
|
||||
- name: "Generate build version file"
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Check cgroups version"
|
||||
run: mount | grep cgroup
|
||||
- name: "Install kind"
|
||||
run: ./tests/scripts/install-kind.sh
|
||||
- name: "Install Kubectl"
|
||||
run: ./tests/scripts/install-kubectl.sh
|
||||
- name: "Install ed" # Only needed until we remove the need to edit the spec file
|
||||
run: apt update && apt install -y ed
|
||||
- name: "Run container registry deployment test"
|
||||
run: |
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/container-registry/run-test.sh
|
||||
|
1
.gitea/workflows/triggers/test-container-registry
Normal file
1
.gitea/workflows/triggers/test-container-registry
Normal file
@ -0,0 +1 @@
|
||||
Change this file to trigger running the test-container-registry CI job
|
@ -15,101 +15,106 @@ To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the
|
||||
16 GB Memory / 8 Intel vCPUs / 160 GB Disk.
|
||||
|
||||
1. Login to the droplet as root (either by SSH key or password set in the DO console)
|
||||
```
|
||||
ssh root@IP
|
||||
```
|
||||
|
||||
```
|
||||
ssh root@IP
|
||||
```
|
||||
1. Get the install script, give it executable permissions, and run it:
|
||||
|
||||
2. Get the install script, give it executable permissions, and run it:
|
||||
```
|
||||
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
|
||||
```
|
||||
```
|
||||
chmod +x install.sh
|
||||
```
|
||||
```
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
```
|
||||
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
|
||||
```
|
||||
```
|
||||
chmod +x install.sh
|
||||
```
|
||||
```
|
||||
bash install.sh
|
||||
```
|
||||
1. Confirm docker was installed and activate the changes in `~/.profile`:
|
||||
|
||||
3. Confirm docker was installed and activate the changes in `~/.profile`:
|
||||
```
|
||||
docker run hello-world
|
||||
```
|
||||
```
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
```
|
||||
docker run hello-world
|
||||
```
|
||||
```
|
||||
source ~/.profile
|
||||
```
|
||||
1. Verify installation:
|
||||
|
||||
4. Verify installation:
|
||||
|
||||
```
|
||||
laconic-so version
|
||||
```
|
||||
```
|
||||
laconic-so version
|
||||
```
|
||||
|
||||
## Setup the laconic fixturenet stack
|
||||
|
||||
1. Get the repositories
|
||||
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/laconic-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
||||
```
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/laconic-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
||||
```
|
||||
|
||||
2. Set this environment variable to the Laconic self-hosted Gitea instance:
|
||||
1. Build the containers:
|
||||
|
||||
```
|
||||
export CERC_NPM_REGISTRY_URL=https://git.vdb.to/api/packages/cerc-io/npm/
|
||||
```
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||
```
|
||||
|
||||
3. Build the containers:
|
||||
It's possible to run into an `ESOCKETTIMEDOUT` error, e.g., `error An unexpected error occurred: "https://registry.yarnpkg.com/@material-ui/icons/-/icons-4.11.3.tgz: ESOCKETTIMEDOUT"`. This may happen even if you have a great internet connection. In that case, re-run the `build-containers` command.
|
||||
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||
```
|
||||
|
||||
It's possible to run into an `ESOCKETTIMEDOUT` error, e.g., `error An unexpected error occurred: "https://registry.yarnpkg.com/@material-ui/icons/-/icons-4.11.3.tgz: ESOCKETTIMEDOUT"`. This may happen even if you have a great internet connection. In that case, re-run the `build-containers` command.
|
||||
1. Set this environment variable to your droplet's IP address or fully qualified DNS host name if it has one:
|
||||
|
||||
4. Set this environment variable to your droplet's IP address:
|
||||
```
|
||||
export BACKEND_ENDPOINT=http://<your-IP-or-hostname>:9473
|
||||
```
|
||||
e.g.
|
||||
```
|
||||
export BACKEND_ENDPOINT=http://my-test-server.example.com:9473
|
||||
```
|
||||
|
||||
```
|
||||
export LACONIC_HOSTED_ENDPOINT=http://<your-IP>
|
||||
```
|
||||
1. Create a deployment directory for the stack:
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded deploy init --output laconic-loaded.spec --map-ports-to-host any-same --config LACONIC_HOSTED_ENDPOINT=$BACKEND_ENDPOINT
|
||||
```
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded deploy create --deployment-dir laconic-loaded-deployment --spec-file laconic-loaded.spec
|
||||
```
|
||||
2. Start the stack:
|
||||
|
||||
5. Deploy the stack:
|
||||
```
|
||||
laconic-so deployment --dir laconic-loaded-deployment start
|
||||
```
|
||||
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded deploy up
|
||||
```
|
||||
3. Check the logs:
|
||||
|
||||
6. Check the logs:
|
||||
```
|
||||
laconic-so deployment --dir laconic-loaded-deployment logs
|
||||
```
|
||||
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded deploy logs
|
||||
```
|
||||
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
||||
|
||||
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
||||
```
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:29PM INF indexed block exents height=12 module=txindex server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF Timed out dur=4976.960115 height=13 module=consensus round=0 server=node step=1
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received proposal module=consensus proposal={"Type":32,"block_id":{"hash":"D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4","parts":{"hash":"22411A20B7F14CDA33244420FBDDAF24450C0628C7A06034FF22DAC3699DDCC8","total":1}},"height":13,"pol_round":-1,"round":0,"signature":"DEuqnaQmvyYbUwckttJmgKdpRu6eVm9i+9rQ1pIrV2PidkMNdWRZBLdmNghkIrUzGbW8Xd7UVJxtLRmwRASgBg==","timestamp":"2023-04-18T21:30:01.49450663Z"} server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received complete proposal block hash=D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4 height=13 module=consensus server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF finalizing commit of block hash={} height=13 module=consensus num_txs=0 root=1A8CA1AF139CCC80EC007C6321D8A63A46A793386EE2EDF9A5CA0AB2C90728B7 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF minted coins from module account amount=2059730459416582643aphoton from=mint module=x/bank
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF executed block height=13 module=state num_invalid_txs=0 num_valid_txs=0 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF commit synced commit=436F6D6D697449447B5B363520313037203630203232372039352038352032303820313334203231392032303520313433203130372031343920313431203139203139322038362031323720362031383520323533203137362031333820313735203135392031383620323334203135382031323120313431203230342037335D3A447D
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF committed state app_hash=416B3CE35F55D086DBCD8F6B958D13C0567F06B9FDB08AAF9FBAEA9E798DCC49 height=13 module=state num_txs=0 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed block exents height=13 module=txindex server=node
|
||||
```
|
||||
|
||||
```
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:29PM INF indexed block exents height=12 module=txindex server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF Timed out dur=4976.960115 height=13 module=consensus round=0 server=node step=1
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received proposal module=consensus proposal={"Type":32,"block_id":{"hash":"D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4","parts":{"hash":"22411A20B7F14CDA33244420FBDDAF24450C0628C7A06034FF22DAC3699DDCC8","total":1}},"height":13,"pol_round":-1,"round":0,"signature":"DEuqnaQmvyYbUwckttJmgKdpRu6eVm9i+9rQ1pIrV2PidkMNdWRZBLdmNghkIrUzGbW8Xd7UVJxtLRmwRASgBg==","timestamp":"2023-04-18T21:30:01.49450663Z"} server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received complete proposal block hash=D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4 height=13 module=consensus server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF finalizing commit of block hash={} height=13 module=consensus num_txs=0 root=1A8CA1AF139CCC80EC007C6321D8A63A46A793386EE2EDF9A5CA0AB2C90728B7 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF minted coins from module account amount=2059730459416582643aphoton from=mint module=x/bank
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF executed block height=13 module=state num_invalid_txs=0 num_valid_txs=0 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF commit synced commit=436F6D6D697449447B5B363520313037203630203232372039352038352032303820313334203231392032303520313433203130372031343920313431203139203139322038362031323720362031383520323533203137362031333820313735203135392031383620323334203135382031323120313431203230342037335D3A447D
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF committed state app_hash=416B3CE35F55D086DBCD8F6B958D13C0567F06B9FDB08AAF9FBAEA9E798DCC49 height=13 module=state num_txs=0 server=node
|
||||
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed block exents height=13 module=txindex server=node
|
||||
```
|
||||
4. Confirm operation of the registry CLI:
|
||||
|
||||
7. Confirm operation of the registry CLI:
|
||||
```
|
||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic cns status"
|
||||
```
|
||||
|
||||
```
|
||||
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status"
|
||||
```
|
||||
|
||||
```
|
||||
{
|
||||
```
|
||||
{
|
||||
"version": "0.3.0",
|
||||
"node": {
|
||||
"id": "4216af2ac9f68bda33a38803fc1b5c9559312c1d",
|
||||
@ -136,11 +141,13 @@ laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status
|
||||
"num_peers": "0",
|
||||
"peers": [],
|
||||
"disk_usage": "292.0K"
|
||||
}
|
||||
```
|
||||
}
|
||||
```
|
||||
|
||||
## Configure Digital Ocean firewall
|
||||
|
||||
(Note this step may not be necessary depending on the droplet image used)
|
||||
|
||||
Let's open some ports.
|
||||
|
||||
1. In the Digital Ocean web console, navigate to your droplet's main page. Select the "Networking" tab and scroll down to "Firewall".
|
||||
|
@ -12,8 +12,8 @@ spec_file_name="${stack_name}-spec.yml"
|
||||
deployment_dir_name="${stack_name}-deployment"
|
||||
rm -f ${spec_file_name}
|
||||
rm -rf ${deployment_dir_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to k8s-kind init --output ${spec_file_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to k8s-kind create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to compose init --output ${spec_file_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to compose create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name}
|
||||
#laconic-so deployment --dir ${deployment_dir_name} start
|
||||
#laconic-so deployment --dir ${deployment_dir_name} ps
|
||||
#laconic-so deployment --dir ${deployment_dir_name} stop
|
||||
|
@ -27,7 +27,7 @@ import subprocess
|
||||
import click
|
||||
import importlib.resources
|
||||
from pathlib import Path
|
||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external
|
||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external, warn_exit
|
||||
from stack_orchestrator.base import get_npm_registry_url
|
||||
|
||||
# TODO: find a place for this
|
||||
@ -164,6 +164,8 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||
containers_in_scope = []
|
||||
if stack:
|
||||
stack_config = get_parsed_stack_config(stack)
|
||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||
warn_exit(f"stack {stack} does not define any containers")
|
||||
containers_in_scope = stack_config['containers']
|
||||
else:
|
||||
containers_in_scope = all_containers
|
||||
|
@ -0,0 +1,13 @@
|
||||
services:
|
||||
registry:
|
||||
image: registry:2.8
|
||||
restart: always
|
||||
environment:
|
||||
REGISTRY_LOG_LEVEL: ${REGISTRY_LOG_LEVEL}
|
||||
volumes:
|
||||
- registry-data:/var/lib/registry
|
||||
ports:
|
||||
- "5000"
|
||||
|
||||
volumes:
|
||||
registry-data:
|
12
stack_orchestrator/data/compose/docker-compose-mars-v2.yml
Normal file
12
stack_orchestrator/data/compose/docker-compose-mars-v2.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
mars:
|
||||
image: cerc/mars-v2:local
|
||||
restart: always
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
@ -0,0 +1,13 @@
|
||||
services:
|
||||
snowballtools-base-backend:
|
||||
image: cerc/snowballtools-base-backend:local
|
||||
restart: always
|
||||
volumes:
|
||||
- data:/data
|
||||
- config:/config:ro
|
||||
ports:
|
||||
- 8000
|
||||
|
||||
volumes:
|
||||
data:
|
||||
config:
|
@ -6,6 +6,7 @@ services:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
||||
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
||||
volumes:
|
||||
- test-data-bind:/data
|
||||
- test-data-auto:/data2
|
||||
|
4
stack_orchestrator/data/container-build/cerc-mars-v2/build.sh
Executable file
4
stack_orchestrator/data/container-build/cerc-mars-v2/build.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the mars-v2 image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
docker build -t cerc/mars-v2:local -f ${CERC_REPO_BASE_DIR}/mars-v2-frontend/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/mars-v2-frontend
|
@ -0,0 +1,6 @@
|
||||
FROM cerc/snowballtools-base-backend-base:local
|
||||
|
||||
WORKDIR /app/packages/backend
|
||||
COPY run.sh .
|
||||
|
||||
ENTRYPOINT ["./run.sh"]
|
@ -0,0 +1,26 @@
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends --no-install-suggests \
|
||||
ca-certificates curl gnupg
|
||||
|
||||
# Node
|
||||
ARG NODE_MAJOR=20
|
||||
RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
|
||||
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \
|
||||
apt update && apt install -y nodejs
|
||||
|
||||
# npm setup
|
||||
RUN npm config set @cerc-io:registry https://git.vdb.to/api/packages/cerc-io/npm/ && npm install -g yarn
|
||||
|
||||
COPY . /app/
|
||||
WORKDIR /app/
|
||||
|
||||
RUN find . -name 'node_modules' | xargs -n1 rm -rf
|
||||
RUN yarn && yarn build --ignore frontend
|
||||
|
||||
FROM cerc/webapp-base:local
|
||||
|
||||
COPY --from=builder /app /app
|
||||
|
||||
WORKDIR /app/packages/backend
|
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/webapp-deployer-backend
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/snowballtools-base-backend-base:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile-base ${CERC_REPO_BASE_DIR}/snowballtools-base
|
||||
docker build -t cerc/snowballtools-base-backend:local ${build_command_args} ${SCRIPT_DIR}
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
||||
if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||
if [ -f "/config/laconic-hosted-config.yml" ]; then
|
||||
LACONIC_HOSTED_CONFIG_FILE="/config/laconic-hosted-config.yml"
|
||||
elif [ -f "/config/config.yml" ]; then
|
||||
LACONIC_HOSTED_CONFIG_FILE="/config/config.yml"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||
/scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE "`pwd`/dist"
|
||||
fi
|
||||
|
||||
/scripts/apply-runtime-env.sh "`pwd`/dist"
|
||||
|
||||
yarn start
|
@ -39,6 +39,15 @@ fi
|
||||
if [ -n "$CERC_TEST_PARAM_2" ]; then
|
||||
echo "Test-param-2: ${CERC_TEST_PARAM_2}"
|
||||
fi
|
||||
if [ -n "$CERC_TEST_PARAM_3" ]; then
|
||||
echo "Test-param-3: ${CERC_TEST_PARAM_3}"
|
||||
fi
|
||||
if [ -n "$CERC_TEST_PARAM_4" ]; then
|
||||
echo "Test-param-4: ${CERC_TEST_PARAM_4}"
|
||||
fi
|
||||
if [ -n "$CERC_TEST_PARAM_5" ]; then
|
||||
echo "Test-param-5: ${CERC_TEST_PARAM_5}"
|
||||
fi
|
||||
|
||||
if [ -d "/config" ]; then
|
||||
echo "/config: EXISTS"
|
||||
|
@ -43,7 +43,6 @@ COPY scripts /scripts
|
||||
# RUN su node -c "npm install -g <your-package-list-here>"
|
||||
|
||||
RUN mkdir -p /config
|
||||
COPY ./config.yml /config
|
||||
|
||||
# Install simple web server for now (use nginx perhaps later)
|
||||
RUN yarn global add http-server
|
||||
|
@ -1 +0,0 @@
|
||||
# Put config here.
|
@ -11,7 +11,13 @@ WORK_DIR="${1:-/app}"
|
||||
OUTPUT_DIR="${2:-build}"
|
||||
DEST_DIR="${3:-/data}"
|
||||
|
||||
if [ -f "${WORK_DIR}/package.json" ]; then
|
||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
|
||||
rm -rf "${DEST_DIR}"
|
||||
./build-webapp.sh "${DEST_DIR}" || exit 1
|
||||
elif [ -f "${WORK_DIR}/package.json" ]; then
|
||||
echo "Building node-based webapp ..."
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
|
||||
|
@ -10,6 +10,18 @@ if [ "true" == "$CERC_ENABLE_CORS" ]; then
|
||||
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
||||
fi
|
||||
|
||||
/scripts/apply-webapp-config.sh /config/config.yml ${CERC_WEBAPP_FILES_DIR}
|
||||
LACONIC_HOSTED_CONFIG_FILE=${LACONIC_HOSTED_CONFIG_FILE}
|
||||
if [ -z "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||
if [ -f "/config/laconic-hosted-config.yml" ]; then
|
||||
LACONIC_HOSTED_CONFIG_FILE="/config/laconic-hosted-config.yml"
|
||||
elif [ -f "/config/config.yml" ]; then
|
||||
LACONIC_HOSTED_CONFIG_FILE="/config/config.yml"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "${LACONIC_HOSTED_CONFIG_FILE}" ]; then
|
||||
/scripts/apply-webapp-config.sh $LACONIC_HOSTED_CONFIG_FILE ${CERC_WEBAPP_FILES_DIR}
|
||||
fi
|
||||
|
||||
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
||||
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT:-80} ${CERC_WEBAPP_FILES_DIR}
|
||||
|
@ -0,0 +1,3 @@
|
||||
# Container Registry Stack
|
||||
|
||||
Host a container image registry
|
@ -0,0 +1,5 @@
|
||||
version: "1.0"
|
||||
name: container-registry
|
||||
description: "Container registry stack"
|
||||
pods:
|
||||
- container-registry
|
16
stack_orchestrator/data/stacks/mars-v2/README.md
Normal file
16
stack_orchestrator/data/stacks/mars-v2/README.md
Normal file
@ -0,0 +1,16 @@
|
||||
# mars
|
||||
|
||||
On a fresh Digital Ocean droplet with Ubuntu:
|
||||
|
||||
```
|
||||
git clone https://github.com/cerc-io/stack-orchestrator
|
||||
cd stack-orchestrator
|
||||
./scripts/quick-install-linux.sh
|
||||
```
|
||||
Read and follow the instructions output from the above output to complete installation, then:
|
||||
|
||||
```
|
||||
laconic-so --stack mars-v2 setup-repositories
|
||||
laconic-so --stack mars-v2 build-containers
|
||||
laconic-so --stack mars-v2 deploy up
|
||||
```
|
8
stack_orchestrator/data/stacks/mars-v2/stack.yml
Normal file
8
stack_orchestrator/data/stacks/mars-v2/stack.yml
Normal file
@ -0,0 +1,8 @@
|
||||
version: "0.1"
|
||||
name: mars-v2
|
||||
repos:
|
||||
- github.com/mars-protocol/mars-v2-frontend
|
||||
containers:
|
||||
- cerc/mars-v2
|
||||
pods:
|
||||
- mars-v2
|
@ -0,0 +1,10 @@
|
||||
version: "1.0"
|
||||
name: snowballtools-base-backend
|
||||
description: "snowballtools-base-backend"
|
||||
repos:
|
||||
- github.com/snowball-tools/snowballtools-base
|
||||
containers:
|
||||
- cerc/webapp-base
|
||||
- cerc/snowballtools-base-backend
|
||||
pods:
|
||||
- snowballtools-base-backend
|
@ -33,6 +33,7 @@ from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
||||
|
||||
|
||||
def process_app_deployment_request(
|
||||
run_id,
|
||||
ctx,
|
||||
laconic: LaconicRegistryClient,
|
||||
app_deployment_request,
|
||||
@ -42,18 +43,8 @@ def process_app_deployment_request(
|
||||
deployment_parent_dir,
|
||||
kube_config,
|
||||
image_registry,
|
||||
log_parent_dir
|
||||
log_file=None
|
||||
):
|
||||
run_id = f"{app_deployment_request.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
||||
log_file = None
|
||||
if log_parent_dir:
|
||||
log_dir = os.path.join(log_parent_dir, app_deployment_request.id)
|
||||
if not os.path.exists(log_dir):
|
||||
os.mkdir(log_dir)
|
||||
log_file_path = os.path.join(log_dir, f"{run_id}.log")
|
||||
print(f"Directing build logs to: {log_file_path}")
|
||||
log_file = open(log_file_path, "wt")
|
||||
|
||||
# 1. look up application
|
||||
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
||||
|
||||
@ -291,10 +282,23 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
for r in requests_to_execute:
|
||||
dump_known_requests(state_file, [r], "DEPLOYING")
|
||||
status = "ERROR"
|
||||
run_log_file = None
|
||||
run_reg_client = laconic
|
||||
try:
|
||||
run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
||||
if log_dir:
|
||||
run_log_dir = os.path.join(log_dir, r.id)
|
||||
if not os.path.exists(run_log_dir):
|
||||
os.mkdir(run_log_dir)
|
||||
run_log_file_path = os.path.join(run_log_dir, f"{run_id}.log")
|
||||
print(f"Directing deployment logs to: {run_log_file_path}")
|
||||
run_log_file = open(run_log_file_path, "wt")
|
||||
run_reg_client = LaconicRegistryClient(laconic_config, log_file=run_log_file)
|
||||
|
||||
process_app_deployment_request(
|
||||
run_id,
|
||||
ctx,
|
||||
laconic,
|
||||
run_reg_client,
|
||||
r,
|
||||
record_namespace_deployments,
|
||||
record_namespace_dns,
|
||||
@ -302,8 +306,12 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
os.path.abspath(deployment_parent_dir),
|
||||
kube_config,
|
||||
image_registry,
|
||||
log_dir
|
||||
run_log_file
|
||||
)
|
||||
status = "DEPLOYED"
|
||||
except Exception as e:
|
||||
print("ERROR: " + str(e), file=run_log_file)
|
||||
finally:
|
||||
dump_known_requests(state_file, [r], status)
|
||||
if run_log_file:
|
||||
run_log_file.close()
|
||||
|
@ -39,13 +39,19 @@ class AttrDict(dict):
|
||||
return v
|
||||
|
||||
|
||||
def cmd(*vargs):
|
||||
def logged_cmd(log_file, *vargs):
|
||||
result = None
|
||||
try:
|
||||
if log_file:
|
||||
print(" ".join(vargs), file=log_file)
|
||||
result = subprocess.run(vargs, capture_output=True)
|
||||
result.check_returncode()
|
||||
return result.stdout.decode()
|
||||
except Exception as err:
|
||||
print(result.stderr.decode())
|
||||
if result:
|
||||
print(result.stderr.decode(), file=log_file)
|
||||
else:
|
||||
print(str(err), file=log_file)
|
||||
raise err
|
||||
|
||||
|
||||
@ -58,8 +64,9 @@ def match_owner(recordA, *records):
|
||||
|
||||
|
||||
class LaconicRegistryClient:
|
||||
def __init__(self, config_file):
|
||||
def __init__(self, config_file, log_file=None):
|
||||
self.config_file = config_file
|
||||
self.log_file = log_file
|
||||
self.cache = AttrDict(
|
||||
{
|
||||
"name_or_id": {},
|
||||
@ -77,7 +84,7 @@ class LaconicRegistryClient:
|
||||
args.append("--%s" % k)
|
||||
args.append(str(v))
|
||||
|
||||
results = [AttrDict(r) for r in json.loads(cmd(*args))]
|
||||
results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
||||
|
||||
# Most recent records first
|
||||
results.sort(key=lambda r: r.createTime)
|
||||
@ -115,7 +122,7 @@ class LaconicRegistryClient:
|
||||
|
||||
args = ["laconic", "-c", self.config_file, "cns", "name", "resolve", name]
|
||||
|
||||
parsed = [AttrDict(r) for r in json.loads(cmd(*args))]
|
||||
parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
||||
if parsed:
|
||||
self._add_to_cache(parsed)
|
||||
return parsed[0]
|
||||
@ -145,7 +152,7 @@ class LaconicRegistryClient:
|
||||
name_or_id,
|
||||
]
|
||||
|
||||
parsed = [AttrDict(r) for r in json.loads(cmd(*args))]
|
||||
parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
||||
if len(parsed):
|
||||
self._add_to_cache(parsed)
|
||||
return parsed[0]
|
||||
@ -173,22 +180,22 @@ class LaconicRegistryClient:
|
||||
record_file = open(record_fname, 'w')
|
||||
yaml.dump(record, record_file)
|
||||
record_file.close()
|
||||
print(open(record_fname, 'r').read())
|
||||
print(open(record_fname, 'r').read(), file=self.log_file)
|
||||
|
||||
new_record_id = json.loads(
|
||||
cmd("laconic", "-c", self.config_file, "cns", "record", "publish", "--filename", record_fname)
|
||||
logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "record", "publish", "--filename", record_fname)
|
||||
)["id"]
|
||||
for name in names:
|
||||
self.set_name(name, new_record_id)
|
||||
return new_record_id
|
||||
finally:
|
||||
cmd("rm", "-rf", tmpdir)
|
||||
logged_cmd(self.log_file, "rm", "-rf", tmpdir)
|
||||
|
||||
def set_name(self, name, record_id):
|
||||
cmd("laconic", "-c", self.config_file, "cns", "name", "set", name, record_id)
|
||||
logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "set", name, record_id)
|
||||
|
||||
def delete_name(self, name):
|
||||
cmd("laconic", "-c", self.config_file, "cns", "name", "delete", name)
|
||||
logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "delete", name)
|
||||
|
||||
|
||||
def file_hash(filename):
|
||||
@ -227,8 +234,16 @@ def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
||||
git_env = dict(os.environ.copy())
|
||||
# Never prompt
|
||||
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
||||
try:
|
||||
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=log_file, stderr=log_file)
|
||||
except Exception as e:
|
||||
print(f"git clone failed. Is the repository {repo} private?", file=log_file)
|
||||
raise e
|
||||
try:
|
||||
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=log_file, stderr=log_file)
|
||||
except Exception as e:
|
||||
print(f"git checkout failed. Does ref {ref} exist?", file=log_file)
|
||||
raise e
|
||||
else:
|
||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
@ -249,7 +264,7 @@ def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
||||
result = subprocess.run(build_command, stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
finally:
|
||||
cmd("rm", "-rf", tmpdir)
|
||||
logged_cmd(log_file, "rm", "-rf", tmpdir)
|
||||
|
||||
|
||||
def push_container_image(deployment_dir, log_file=None):
|
||||
|
146
tests/container-registry/run-test.sh
Executable file
146
tests/container-registry/run-test.sh
Executable file
@ -0,0 +1,146 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
# Dump environment variables for debugging
|
||||
echo "Environment variables:"
|
||||
env
|
||||
fi
|
||||
|
||||
stack="container-registry"
|
||||
|
||||
# Helper functions: TODO move into a separate file
|
||||
wait_for_pods_started () {
|
||||
for i in {1..50}
|
||||
do
|
||||
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||
|
||||
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for pods to start: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
wait_for_log_output () {
|
||||
for i in {1..50}
|
||||
do
|
||||
|
||||
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
|
||||
if [[ ! -z "$log_output" ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for pods log content: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
|
||||
delete_cluster_exit () {
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Note: eventually this test should be folded into ../deploy/
|
||||
# but keeping it separate for now for convenience
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
# Set a non-default repo dir
|
||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||
echo "Testing this package: $TEST_TARGET_SO"
|
||||
echo "Test version command"
|
||||
reported_version_string=$( $TEST_TARGET_SO version )
|
||||
echo "Version reported is: ${reported_version_string}"
|
||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||
rm -rf $CERC_REPO_BASE_DIR
|
||||
mkdir -p $CERC_REPO_BASE_DIR
|
||||
$TEST_TARGET_SO --stack ${stack} setup-repositories
|
||||
$TEST_TARGET_SO --stack ${stack} build-containers
|
||||
# Test basic stack-orchestrator deploy to k8s
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/${stack}-deployment-dir
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/${stack}-deployment-spec.yml
|
||||
$TEST_TARGET_SO --stack ${stack} deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
echo "deploy init test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
|
||||
# Switch to a full path for bind mount.
|
||||
volume_name="registry-data"
|
||||
sed -i "s|^\(\s*${volume_name}:$\)$|\1 ${test_deployment_dir}/data/${volume_name}|" $test_deployment_spec
|
||||
|
||||
# Add ingress config to the spec file
|
||||
ed $test_deployment_spec <<IngressSpec
|
||||
/network:/
|
||||
a
|
||||
http-proxy:
|
||||
- host-name: localhost
|
||||
routes:
|
||||
- path: /
|
||||
proxy-to: registry:5000
|
||||
.
|
||||
w
|
||||
q
|
||||
IngressSpec
|
||||
|
||||
$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
echo "deploy create test: deployment directory not present"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy create test: passed"
|
||||
|
||||
# Note: this isn't strictly necessary, except we end up trying to push the image into
|
||||
# the kind cluster then fails because it can't be found locally
|
||||
docker pull registry:2.8
|
||||
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
# Check logs command works
|
||||
wait_for_log_output
|
||||
sleep 1
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"listening on"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
echo $log_output_3
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check that we can use the registry
|
||||
# Note: since this pulls from the DockerCo registry without auth it's possible it'll run into rate limiting issues
|
||||
docker pull hello-world
|
||||
docker tag hello-world localhost:80/hello-world
|
||||
docker push localhost:80/hello-world
|
||||
# Then do a quick check that we actually pushed something there
|
||||
# See: https://stackoverflow.com/questions/31251356/how-to-get-a-list-of-images-on-docker-registry-v2
|
||||
registry_response=$(curl -s -X GET http://localhost:80/v2/_catalog)
|
||||
if [[ "$registry_response" == *"{\"repositories\":[\"hello-world\"]}"* ]]; then
|
||||
echo "registry content test: passed"
|
||||
else
|
||||
echo "registry content test: FAILED"
|
||||
echo $registry_response
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
@ -83,7 +83,7 @@ $TEST_TARGET_SO --stack test deploy down --delete-volumes
|
||||
# Basic test of creating a deployment
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
||||
$TEST_TARGET_SO --stack test deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
|
||||
$TEST_TARGET_SO --stack test deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED,CERC_TEST_PARAM_3=FAST
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
@ -141,6 +141,13 @@ else
|
||||
echo "deployment compose config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_3 was passed correctly
|
||||
if [[ "$log_output_3" == *"Test-param-3: FAST"* ]]; then
|
||||
echo "deployment config test: passed"
|
||||
else
|
||||
echo "deployment config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that the ConfigMap is mounted and contains the expected content.
|
||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
|
Loading…
Reference in New Issue
Block a user