forked from cerc-io/stack-orchestrator
Compare commits
55 Commits
zach/birbi
...
main
Author | SHA1 | Date | |
---|---|---|---|
01f9fe67ed | |||
049ffcff71 | |||
f5314a979b | |||
39f4fa4487 | |||
0b0394a940 | |||
37b9500483 | |||
3c3e582939 | |||
26d265360d | |||
f81b78cfbc | |||
d9bb6b3588 | |||
b59beb66eb | |||
65d67dba10 | |||
b22c72e715 | |||
c9444591f5 | |||
903f3b10e2 | |||
72ed2eb91a | |||
2104eb5f30 | |||
afd6be3b13 | |||
f914baa913 | |||
8be1e684e8 | |||
5d16251ce9 | |||
3309782439 | |||
4b3b3478e7 | |||
2a9955055c | |||
8964e1c0fe | |||
d2ebb81d77 | |||
4a981d8d2e | |||
88a0236ca9 | |||
937b983ec9 | |||
bfbcfb7904 | |||
3d5ececba5 | |||
6848fc33cf | |||
36bb068983 | |||
25a2b70f2c | |||
2fcd416e29 | |||
6629017d6a | |||
1c30441000 | |||
b398050787 | |||
12ec1bec43 | |||
62af03077f | |||
|
098567625a | ||
428b05158e | |||
a750b645b9 | |||
|
23ee3e19b7 | ||
|
2d764fc7d0 | ||
b7f215d9bf | |||
eca52b10b7 | |||
b9128841e4 | |||
0a302ea555 | |||
aa0f60baa1 | |||
cef73d8de2 | |||
7d0f2adb46 | |||
5fdee25dc1 | |||
554f05de87 | |||
b4fbee9b13 |
@ -6,6 +6,8 @@ on:
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '2 14 * * *'
|
||||
|
||||
# Needed until we can incorporate docker startup into the executor container
|
||||
env:
|
||||
|
@ -6,11 +6,8 @@ on:
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
||||
|
||||
# Needed until we can incorporate docker startup into the executor container
|
||||
env:
|
||||
DOCKER_HOST: unix:///var/run/dind.sock
|
||||
|
||||
schedule:
|
||||
- cron: '1 13 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@ -47,9 +44,5 @@ jobs:
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||
run: |
|
||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||
sleep 5
|
||||
- name: "Run fixturenet-laconicd tests"
|
||||
run: ./tests/fixturenet-laconicd/run-test.sh
|
||||
|
21
.gitea/workflows/lint.yml
Normal file
21
.gitea/workflows/lint.yml
Normal file
@ -0,0 +1,21 @@
|
||||
name: Lint Checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: '*'
|
||||
push:
|
||||
branches: '*'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run linter"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
- name: "Install Python"
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name : "Run flake8"
|
||||
uses: py-actions/flake8@v2
|
54
.gitea/workflows/test-container-registry.yml
Normal file
54
.gitea/workflows/test-container-registry.yml
Normal file
@ -0,0 +1,54 @@
|
||||
name: Container Registry Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: '*'
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/test-container-registry'
|
||||
- '.gitea/workflows/test-container-registry.yml'
|
||||
- 'tests/container-registry/run-test.sh'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '6 19 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run contaier registry hosting test on kind/k8s"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
# At present the stock setup-python action fails on Linux/aarch64
|
||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Print Python version"
|
||||
run: python3 --version
|
||||
- name: "Install shiv"
|
||||
run: pip install shiv
|
||||
- name: "Generate build version file"
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Check cgroups version"
|
||||
run: mount | grep cgroup
|
||||
- name: "Install kind"
|
||||
run: ./tests/scripts/install-kind.sh
|
||||
- name: "Install Kubectl"
|
||||
run: ./tests/scripts/install-kubectl.sh
|
||||
- name: "Install ed" # Only needed until we remove the need to edit the spec file
|
||||
run: apt update && apt install -y ed
|
||||
- name: "Run container registry deployment test"
|
||||
run: |
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/container-registry/run-test.sh
|
||||
|
52
.gitea/workflows/test-database.yml
Normal file
52
.gitea/workflows/test-database.yml
Normal file
@ -0,0 +1,52 @@
|
||||
name: Database Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: '*'
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/test-database'
|
||||
- '.gitea/workflows/test-database.yml'
|
||||
- 'tests/database/run-test.sh'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '5 18 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run database hosting test on kind/k8s"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
# At present the stock setup-python action fails on Linux/aarch64
|
||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
- name: "Print Python version"
|
||||
run: python3 --version
|
||||
- name: "Install shiv"
|
||||
run: pip install shiv
|
||||
- name: "Generate build version file"
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Check cgroups version"
|
||||
run: mount | grep cgroup
|
||||
- name: "Install kind"
|
||||
run: ./tests/scripts/install-kind.sh
|
||||
- name: "Install Kubectl"
|
||||
run: ./tests/scripts/install-kubectl.sh
|
||||
- name: "Run database deployment test"
|
||||
run: |
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/database/run-test.sh
|
||||
|
@ -1,16 +1,22 @@
|
||||
name: K8s Deploy Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: '*'
|
||||
push:
|
||||
branches: '*'
|
||||
paths:
|
||||
- '!**'
|
||||
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
||||
- '.gitea/workflows/triggers/test-k8s-deploy'
|
||||
- '.gitea/workflows/test-k8s-deploy.yml'
|
||||
- 'tests/k8s-deploy/run-deploy-test.sh'
|
||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||
- cron: '3 15 * * *'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: "Run deploy test suite on kind/k8s"
|
||||
runs-on: ubuntu-22.04-with-syn-ethdb
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: "Clone project repository"
|
||||
uses: actions/checkout@v3
|
||||
@ -34,9 +40,15 @@ jobs:
|
||||
run: ./scripts/create_build_tag_file.sh
|
||||
- name: "Build local shiv package"
|
||||
run: ./scripts/build_shiv_package.sh
|
||||
- name: "Check cgroups version"
|
||||
run: mount | grep cgroup
|
||||
- name: "Install kind"
|
||||
run: ./tests/scripts/install-kind.sh
|
||||
- name: "Install Kubectl"
|
||||
run: ./tests/scripts/install-kubectl.sh
|
||||
- name: "Run k8s deployment test"
|
||||
run: ./tests/k8s-deploy/run-deploy-test.sh
|
||||
run: |
|
||||
source /opt/bash-utils/cgroup-helper.sh
|
||||
join_cgroup
|
||||
./tests/k8s-deploy/run-deploy-test.sh
|
||||
|
||||
|
@ -1,2 +1,3 @@
|
||||
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||
trigger
|
||||
trigger
|
||||
|
@ -1,2 +1,3 @@
|
||||
Change this file to trigger running the fixturenet-laconicd-test CI job
|
||||
|
||||
Trigger
|
||||
Trigger
|
||||
|
1
.gitea/workflows/triggers/test-container-registry
Normal file
1
.gitea/workflows/triggers/test-container-registry
Normal file
@ -0,0 +1 @@
|
||||
Change this file to trigger running the test-container-registry CI job
|
2
.gitea/workflows/triggers/test-database
Normal file
2
.gitea/workflows/triggers/test-database
Normal file
@ -0,0 +1,2 @@
|
||||
Change this file to trigger running the test-database CI job
|
||||
Trigger test run
|
@ -1 +1,2 @@
|
||||
Change this file to trigger running the test-k8s-deploy CI job
|
||||
Trigger test on PR branch
|
||||
|
@ -29,10 +29,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose
|
||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||
|
||||
Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||
|
||||
```bash
|
||||
curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
||||
```
|
||||
|
||||
Give it execute permissions:
|
||||
@ -52,7 +52,7 @@ Version: 1.1.0-7a607c2-202304260513
|
||||
Save the distribution url to `~/.laconic-so/config.yml`:
|
||||
```bash
|
||||
mkdir ~/.laconic-so
|
||||
echo "distribution-url: https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so" > ~/.laconic-so/config.yml
|
||||
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
|
||||
```
|
||||
|
||||
### Update
|
||||
|
@ -26,7 +26,7 @@ In addition to the pre-requisites listed in the [README](/README.md), the follow
|
||||
|
||||
1. Clone this repository:
|
||||
```
|
||||
$ git clone https://github.com/cerc-io/stack-orchestrator.git
|
||||
$ git clone https://git.vdb.to/cerc-io/stack-orchestrator.git
|
||||
```
|
||||
|
||||
2. Enter the project directory:
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Adding a new stack
|
||||
|
||||
See [this PR](https://github.com/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://github.com/cerc-io/stack-orchestrator/pull/435) is another good example.
|
||||
See [this PR](https://git.vdb.to/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://git.vdb.to/cerc-io/stack-orchestrator/pull/435) is another good example.
|
||||
|
||||
For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done.
|
||||
|
||||
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://github.com/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
||||
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
||||
|
||||
## Example
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Specification
|
||||
|
||||
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://github.com/cerc-io/stack-orchestrator/issues/315).
|
||||
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315).
|
||||
|
||||
## Implementation
|
||||
|
||||
|
@ -10,3 +10,4 @@ pydantic==1.10.9
|
||||
tomli==2.0.1
|
||||
validators==0.22.0
|
||||
kubernetes>=28.1.0
|
||||
humanfriendly>=10.0
|
||||
|
@ -41,4 +41,4 @@ runcmd:
|
||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
- systemctl enable docker
|
||||
- systemctl start docker
|
||||
- git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
||||
- git clone https://git.vdb.to/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
||||
|
@ -31,5 +31,5 @@ runcmd:
|
||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
- systemctl enable docker
|
||||
- systemctl start docker
|
||||
- curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||
- curl -L -o /usr/local/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
||||
- chmod +x /usr/local/bin/laconic-so
|
||||
|
19
scripts/quick-deploy-test.sh
Executable file
19
scripts/quick-deploy-test.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
# Beginnings of a script to quickly spin up and test a deployment
|
||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||
set -x
|
||||
fi
|
||||
if [[ -n "$1" ]]; then
|
||||
stack_name=$1
|
||||
else
|
||||
stack_name="test"
|
||||
fi
|
||||
spec_file_name="${stack_name}-spec.yml"
|
||||
deployment_dir_name="${stack_name}-deployment"
|
||||
rm -f ${spec_file_name}
|
||||
rm -rf ${deployment_dir_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to compose init --output ${spec_file_name}
|
||||
laconic-so --stack ${stack_name} deploy --deploy-to compose create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name}
|
||||
#laconic-so deployment --dir ${deployment_dir_name} start
|
||||
#laconic-so deployment --dir ${deployment_dir_name} ps
|
||||
#laconic-so deployment --dir ${deployment_dir_name} stop
|
@ -137,7 +137,7 @@ fi
|
||||
echo "**************************************************************************************"
|
||||
echo "Installing laconic-so"
|
||||
# install latest `laconic-so`
|
||||
distribution_url=https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||
distribution_url=https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
||||
install_filename=${install_dir}/laconic-so
|
||||
mkdir -p ${install_dir}
|
||||
curl -L -o ${install_filename} ${distribution_url}
|
||||
|
2
setup.py
2
setup.py
@ -13,7 +13,7 @@ setup(
|
||||
description='Orchestrates deployment of the Laconic stack',
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url='https://github.com/cerc-io/stack-orchestrator',
|
||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
||||
py_modules=['stack_orchestrator'],
|
||||
packages=find_packages(),
|
||||
install_requires=[requirements],
|
||||
|
@ -27,12 +27,13 @@ import subprocess
|
||||
import click
|
||||
import importlib.resources
|
||||
from pathlib import Path
|
||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external
|
||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external, warn_exit
|
||||
from stack_orchestrator.base import get_npm_registry_url
|
||||
|
||||
# TODO: find a place for this
|
||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||
|
||||
|
||||
def make_container_build_env(dev_root_path: str,
|
||||
container_build_dir: str,
|
||||
debug: bool,
|
||||
@ -104,6 +105,9 @@ def process_container(stack: str,
|
||||
build_command = os.path.join(container_build_dir,
|
||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||
if not dry_run:
|
||||
# No PATH at all causes failures with podman.
|
||||
if "PATH" not in container_build_env:
|
||||
container_build_env["PATH"] = os.environ["PATH"]
|
||||
if verbose:
|
||||
print(f"Executing: {build_command} with environment: {container_build_env}")
|
||||
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
|
||||
@ -119,6 +123,7 @@ def process_container(stack: str,
|
||||
else:
|
||||
print("Skipped")
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--include', help="only build these containers")
|
||||
@click.option('--exclude', help="don\'t build these containers")
|
||||
@ -159,6 +164,8 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||
containers_in_scope = []
|
||||
if stack:
|
||||
stack_config = get_parsed_stack_config(stack)
|
||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||
warn_exit(f"stack {stack} does not define any containers")
|
||||
containers_in_scope = stack_config['containers']
|
||||
else:
|
||||
containers_in_scope = all_containers
|
||||
|
@ -25,10 +25,11 @@ from decouple import config
|
||||
import click
|
||||
from pathlib import Path
|
||||
from stack_orchestrator.build import build_containers
|
||||
from stack_orchestrator.deploy.webapp.util import determine_base_container
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--base-container', default="cerc/nextjs-base")
|
||||
@click.option('--base-container')
|
||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||
@ -57,6 +58,9 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
||||
if not quiet:
|
||||
print(f'Dev Root is: {dev_root_path}')
|
||||
|
||||
if not base_container:
|
||||
base_container = determine_base_container(source_repo)
|
||||
|
||||
# First build the base container.
|
||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||
force_rebuild, extra_build_args)
|
||||
@ -64,13 +68,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
||||
build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
||||
verbose, dry_run, continue_on_error)
|
||||
|
||||
|
||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||
base_container.replace("/", "-"),
|
||||
"Dockerfile.webapp")
|
||||
base_container.replace("/", "-"),
|
||||
"Dockerfile.webapp")
|
||||
if not tag:
|
||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
|
||||
|
@ -27,6 +27,12 @@ kube_config_key = "kube-config"
|
||||
deploy_to_key = "deploy-to"
|
||||
network_key = "network"
|
||||
http_proxy_key = "http-proxy"
|
||||
image_resigtry_key = "image-registry"
|
||||
image_registry_key = "image-registry"
|
||||
configmaps_key = "configmaps"
|
||||
resources_key = "resources"
|
||||
volumes_key = "volumes"
|
||||
security_key = "security"
|
||||
annotations_key = "annotations"
|
||||
labels_key = "labels"
|
||||
kind_config_filename = "kind-config.yml"
|
||||
kube_config_filename = "kubeconfig.yml"
|
||||
|
@ -0,0 +1,13 @@
|
||||
services:
|
||||
registry:
|
||||
image: registry:2.8
|
||||
restart: always
|
||||
environment:
|
||||
REGISTRY_LOG_LEVEL: ${REGISTRY_LOG_LEVEL}
|
||||
volumes:
|
||||
- registry-data:/var/lib/registry
|
||||
ports:
|
||||
- "5000"
|
||||
|
||||
volumes:
|
||||
registry-data:
|
@ -2,7 +2,7 @@ services:
|
||||
laconicd:
|
||||
restart: unless-stopped
|
||||
image: cerc/laconicd:local
|
||||
entrypoint: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||
volumes:
|
||||
# The cosmos-sdk node's database directory:
|
||||
- laconicd-data:/root/.laconicd
|
||||
@ -15,12 +15,12 @@ services:
|
||||
- "6060"
|
||||
- "26657"
|
||||
- "26656"
|
||||
- "9473:9473"
|
||||
- "9473"
|
||||
- "8545"
|
||||
- "8546"
|
||||
- "9090"
|
||||
- "9091"
|
||||
- "1317:1317"
|
||||
- "1317"
|
||||
cli:
|
||||
image: cerc/laconic-registry-cli:local
|
||||
volumes:
|
||||
|
@ -1,8 +0,0 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
geojson:
|
||||
image: cerc/geojson:local
|
||||
restart: always
|
||||
ports:
|
||||
- 0.0.0.0:8080:8080
|
12
stack_orchestrator/data/compose/docker-compose-mars-v2.yml
Normal file
12
stack_orchestrator/data/compose/docker-compose-mars-v2.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
mars:
|
||||
image: cerc/mars-v2:local
|
||||
restart: always
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
20
stack_orchestrator/data/compose/docker-compose-mars.yml
Normal file
20
stack_orchestrator/data/compose/docker-compose-mars.yml
Normal file
@ -0,0 +1,20 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
mars:
|
||||
image: cerc/mars:local
|
||||
restart: always
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- URL_OSMOSIS_GQL=https://osmosis-node.marsprotocol.io/GGSFGSFGFG34/osmosis-hive-front/graphql
|
||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
||||
- URL_NEUTRON_GQL=https://neutron.rpc.p2p.world/qgrnU6PsQZA8F9S5Fb8Fn3tV3kXmMBl2M9bcc9jWLjQy8p/hive/graphql
|
||||
- URL_NEUTRON_REST=https://rest-kralum.neutron-1.neutron.org
|
||||
- URL_NEUTRON_RPC=https://rpc-kralum.neutron-1.neutron.org
|
||||
- URL_NEUTRON_TEST_GQL=https://testnet-neutron-gql.marsprotocol.io/graphql
|
||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||
|
@ -0,0 +1,8 @@
|
||||
version: "3.2"
|
||||
|
||||
services:
|
||||
ping-pub:
|
||||
image: cerc/ping-pub:local
|
||||
restart: always
|
||||
ports:
|
||||
- "5173:5173"
|
@ -0,0 +1,20 @@
|
||||
services:
|
||||
|
||||
database:
|
||||
image: cerc/test-database-container:local
|
||||
restart: always
|
||||
volumes:
|
||||
- db-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_USER: "test-user"
|
||||
POSTGRES_DB: "test-db"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||
ports:
|
||||
- "5432"
|
||||
|
||||
test-client:
|
||||
image: cerc/test-database-client:local
|
||||
|
||||
volumes:
|
||||
db-data:
|
@ -5,10 +5,15 @@ services:
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
||||
volumes:
|
||||
- test-data:/data
|
||||
- test-data-bind:/data
|
||||
- test-data-auto:/data2
|
||||
- test-config:/config:ro
|
||||
ports:
|
||||
- "80"
|
||||
|
||||
volumes:
|
||||
test-data:
|
||||
test-data-bind:
|
||||
test-data-auto:
|
||||
test-config:
|
||||
|
@ -5,4 +5,4 @@ services:
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
ports:
|
||||
- "3000"
|
||||
- "80"
|
||||
|
@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the birbit image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
docker build -t cerc/birbit:local -f ${CERC_REPO_BASE_DIR}/birbit/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/birbit
|
@ -10,9 +10,10 @@ COPY genesis /opt/genesis
|
||||
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
|
||||
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
|
||||
COPY --from=ethgen /apps /apps
|
||||
RUN cd /apps/el-gen && pip3 install -r requirements.txt
|
||||
RUN cd /apps/el-gen && pip3 install --break-system-packages -r requirements.txt
|
||||
# web3==5.24.0 used by el-gen is broken on python 3.11
|
||||
RUN pip3 install --upgrade "web3==6.5.0"
|
||||
RUN pip3 install --break-system-packages --upgrade "web3==6.5.0"
|
||||
RUN pip3 install --break-system-packages --upgrade "typing-extensions"
|
||||
|
||||
# Build genesis config
|
||||
RUN apk add --no-cache make bash envsubst jq
|
||||
|
@ -1,11 +0,0 @@
|
||||
FROM node:14
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN npm install && npm run build
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["npm", "run", "serve"]
|
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the geojson image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
docker build -t cerc/geojson:local -f ${CERC_CONTAINER_BASE_DIR}/cerc-geojson/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/geojson.io
|
||||
#docker build -t cerc/geojson:local -f ${CERC_REPO_BASE_DIR}/geojson.io/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/geojson.io
|
4
stack_orchestrator/data/container-build/cerc-mars-v2/build.sh
Executable file
4
stack_orchestrator/data/container-build/cerc-mars-v2/build.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the mars-v2 image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
docker build -t cerc/mars-v2:local -f ${CERC_REPO_BASE_DIR}/mars-v2-frontend/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/mars-v2-frontend
|
4
stack_orchestrator/data/container-build/cerc-mars/build.sh
Executable file
4
stack_orchestrator/data/container-build/cerc-mars/build.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the mars image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
docker build -t cerc/mars:local -f ${CERC_REPO_BASE_DIR}/mars-interface/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/mars-interface
|
@ -30,13 +30,13 @@ RUN \
|
||||
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install --no-install-recommends jq gettext-base
|
||||
&& apt-get -y install --no-install-recommends jq gettext-base procps
|
||||
|
||||
# [Optional] Uncomment if you want to install more global node modules
|
||||
# RUN su node -c "npm install -g <your-package-list-here>"
|
||||
|
||||
# Expose port for http
|
||||
EXPOSE 3000
|
||||
EXPOSE 80
|
||||
|
||||
COPY /scripts /scripts
|
||||
|
||||
|
@ -33,8 +33,8 @@ if [ -f ".env" ]; then
|
||||
rm -f $TMP_ENV
|
||||
fi
|
||||
|
||||
for f in $(find "$TRG_DIR" -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do
|
||||
for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},();]' '\n' | egrep -o '^"CERC_RUNTIME_ENV_[^\"]+"'); do
|
||||
for f in $(find . -type f \( -regex '.*.html?' -or -regex ".*.[tj]s\(x\|on\)?$" \) | grep -v 'node_modules' | grep -v '.git'); do
|
||||
for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '["/\\{},();]' '\n' | tr -s "[']" '\n' | egrep -o -e '^CERC_RUNTIME_ENV_.+$' -e '^LACONIC_HOSTED_CONFIG_.+$'); do
|
||||
orig_name=$(echo -n "${e}" | sed 's/"//g')
|
||||
cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g')
|
||||
cur_val=$(echo -n "\$${cur_name}" | envsubst)
|
||||
|
@ -58,4 +58,4 @@ if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
$CERC_BUILD_TOOL start . -p ${CERC_LISTEN_PORT:-3000}
|
||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||
|
5
stack_orchestrator/data/container-build/cerc-ping-pub/build.sh
Executable file
5
stack_orchestrator/data/container-build/cerc-ping-pub/build.sh
Executable file
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build the ping pub image
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $CERC_REPO_BASE_DIR/explorer/Dockerfile $CERC_REPO_BASE_DIR/explorer
|
@ -1,21 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
# Test if the container's filesystem is old (run previously) or new
|
||||
EXISTSFILENAME=/data/exists
|
||||
|
||||
echo "Test container starting"
|
||||
if [[ -f "$EXISTSFILENAME" ]];
|
||||
then
|
||||
TIMESTAMP=`cat $EXISTSFILENAME`
|
||||
echo "Filesystem is old, created: $TIMESTAMP"
|
||||
|
||||
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
|
||||
if [[ -n "$DATA_DEVICE" ]]; then
|
||||
echo "/data: MOUNTED dev=${DATA_DEVICE}"
|
||||
else
|
||||
echo "Filesystem is fresh"
|
||||
echo `date` > $EXISTSFILENAME
|
||||
echo "/data: not mounted"
|
||||
fi
|
||||
|
||||
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
|
||||
if [[ -n "$DATA_DEVICE" ]]; then
|
||||
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
|
||||
else
|
||||
echo "/data2: not mounted"
|
||||
fi
|
||||
|
||||
# Test if the container's filesystem is old (run previously) or new
|
||||
for d in /data /data2; do
|
||||
if [[ -f "$d/exists" ]];
|
||||
then
|
||||
TIMESTAMP=`cat $d/exists`
|
||||
echo "$d filesystem is old, created: $TIMESTAMP"
|
||||
else
|
||||
echo "$d filesystem is fresh"
|
||||
echo `date` > $d/exists
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$CERC_TEST_PARAM_1" ]; then
|
||||
echo "Test-param-1: ${CERC_TEST_PARAM_1}"
|
||||
fi
|
||||
if [ -n "$CERC_TEST_PARAM_2" ]; then
|
||||
echo "Test-param-2: ${CERC_TEST_PARAM_2}"
|
||||
fi
|
||||
|
||||
if [ -d "/config" ]; then
|
||||
echo "/config: EXISTS"
|
||||
for f in /config/*; do
|
||||
if [[ -f "$f" ]] || [[ -L "$f" ]]; then
|
||||
echo "$f:"
|
||||
cat "$f"
|
||||
echo ""
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "/config: does NOT EXIST"
|
||||
fi
|
||||
|
||||
# Run nginx which will block here forever
|
||||
/usr/sbin/nginx -g "daemon off;"
|
||||
|
@ -0,0 +1,12 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \
|
||||
apt-get install -y software-properties-common && \
|
||||
apt-get install -y postgresql-client && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
COPY run.sh /app/run.sh
|
||||
|
||||
ENTRYPOINT ["/app/run.sh"]
|
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/test-container
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
71
stack_orchestrator/data/container-build/cerc-test-database-client/run.sh
Executable file
71
stack_orchestrator/data/container-build/cerc-test-database-client/run.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
# TODO derive this from config
|
||||
database_url="postgresql://test-user:password@localhost:5432/test-db"
|
||||
psql_command="psql ${database_url}"
|
||||
program_name="Database test client:"
|
||||
|
||||
wait_for_database_up () {
|
||||
for i in {1..50}
|
||||
do
|
||||
${psql_command} -c "select 1;"
|
||||
psql_succeeded=$?
|
||||
if [[ ${psql_succeeded} == 0 ]]; then
|
||||
# if ready, return
|
||||
echo "${program_name} database up"
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
echo "${program_name} waiting for database: ${i}"
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "${program_name} waiting for database: FAILED"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Used to synchronize with the test runner
|
||||
notify_test_complete () {
|
||||
echo "${program_name} test complete"
|
||||
}
|
||||
|
||||
does_test_data_exist () {
|
||||
query_result=$(${psql_command} -t -c "select count(*) from test_table_1 where key_column = 'test_key_1';" | head -1 | tr -d ' ')
|
||||
if [[ "${query_result}" == "1" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
create_test_data () {
|
||||
${psql_command} -c "create table test_table_1 (key_column text, value_column text, primary key(key_column));"
|
||||
${psql_command} -c "insert into test_table_1 values ('test_key_1', 'test_value_1');"
|
||||
}
|
||||
|
||||
wait_forever() {
|
||||
# Loop to keep docker/k8s happy since this is the container entrypoint
|
||||
while :; do sleep 600; done
|
||||
}
|
||||
|
||||
wait_for_database_up
|
||||
|
||||
# Check if the test database content exists already
|
||||
if does_test_data_exist; then
|
||||
# If so, log saying so. Test harness will look for this log output
|
||||
echo "${program_name} test data already exists"
|
||||
else
|
||||
# Otherwise log saying the content was not present
|
||||
echo "${program_name} test data does not exist"
|
||||
echo "${program_name} creating test data"
|
||||
# then create it
|
||||
create_test_data
|
||||
fi
|
||||
|
||||
notify_test_complete
|
||||
wait_forever
|
@ -0,0 +1,3 @@
|
||||
FROM postgres:16-bullseye
|
||||
|
||||
EXPOSE 5432
|
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/test-container
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
docker build -t cerc/test-database-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
@ -1,6 +1,6 @@
|
||||
# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
|
||||
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
|
||||
ARG VARIANT=18-bullseye
|
||||
ARG VARIANT=20-bullseye
|
||||
FROM node:${VARIANT}
|
||||
|
||||
ARG USERNAME=node
|
||||
@ -28,7 +28,7 @@ RUN \
|
||||
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install --no-install-recommends jq
|
||||
&& apt-get -y install --no-install-recommends jq gettext-base
|
||||
|
||||
# [Optional] Uncomment if you want to install an additional version of node using nvm
|
||||
# ARG EXTRA_NODE_VERSION=10
|
||||
@ -37,9 +37,7 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# We do this to get a yq binary from the published container, for the correct architecture we're building here
|
||||
COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq
|
||||
|
||||
RUN mkdir -p /scripts
|
||||
COPY ./apply-webapp-config.sh /scripts
|
||||
COPY ./start-serving-app.sh /scripts
|
||||
COPY scripts /scripts
|
||||
|
||||
# [Optional] Uncomment if you want to install more global node modules
|
||||
# RUN su node -c "npm install -g <your-package-list-here>"
|
||||
|
@ -0,0 +1,11 @@
|
||||
FROM cerc/webapp-base:local as builder
|
||||
|
||||
ARG CERC_BUILD_TOOL
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN rm -rf node_modules build .next*
|
||||
RUN /scripts/build-app.sh /app build /data
|
||||
|
||||
FROM cerc/webapp-base:local
|
||||
COPY --from=builder /data /data
|
@ -1,9 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/laconic-registry-cli
|
||||
# Build cerc/webapp-base
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/webapp-base:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile ${SCRIPT_DIR}
|
||||
CERC_CONTAINER_BUILD_WORK_DIR=${CERC_CONTAINER_BUILD_WORK_DIR:-$SCRIPT_DIR}
|
||||
CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/Dockerfile}
|
||||
CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/webapp-base:local}
|
||||
|
||||
docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR
|
||||
|
||||
if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/webapp-base:local" ]; then
|
||||
cat <<EOF
|
||||
|
||||
#################################################################
|
||||
|
||||
Built host container for $CERC_CONTAINER_BUILD_WORK_DIR with tag:
|
||||
|
||||
$CERC_CONTAINER_BUILD_TAG
|
||||
|
||||
To test locally run:
|
||||
|
||||
laconic-so run-webapp --image $CERC_CONTAINER_BUILD_TAG --env-file /path/to/environment.env
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="${1:-./}"
|
||||
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
|
||||
if [ -f ".env" ]; then
|
||||
TMP_ENV=`mktemp`
|
||||
declare -px > $TMP_ENV
|
||||
set -a
|
||||
source .env
|
||||
source $TMP_ENV
|
||||
set +a
|
||||
rm -f $TMP_ENV
|
||||
fi
|
||||
|
||||
for f in $(find . -type f \( -regex '.*.html?' -or -regex ".*.[tj]s\(x\|on\)?$" \) | grep -v 'node_modules' | grep -v '.git'); do
|
||||
for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '["/\\{},();]' '\n' | tr -s "[']" '\n' | egrep -o -e '^CERC_RUNTIME_ENV_.+$' -e '^LACONIC_HOSTED_CONFIG_.+$'); do
|
||||
orig_name=$(echo -n "${e}" | sed 's/"//g')
|
||||
cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g')
|
||||
cur_val=$(echo -n "\$${cur_name}" | envsubst)
|
||||
if [ "$CERC_RETAIN_ENV_QUOTES" != "true" ]; then
|
||||
cur_val=$(sed "s/^[\"']//" <<< "$cur_val" | sed "s/[\"']//")
|
||||
fi
|
||||
esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val")
|
||||
echo "$f: $cur_name=$cur_val"
|
||||
sed -i "s/$orig_name/$esc_val/g" $f
|
||||
done
|
||||
done
|
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
CERC_BUILD_TOOL="${CERC_BUILD_TOOL}"
|
||||
WORK_DIR="${1:-/app}"
|
||||
OUTPUT_DIR="${2:-build}"
|
||||
DEST_DIR="${3:-/data}"
|
||||
|
||||
if [ -f "${WORK_DIR}/package.json" ]; then
|
||||
echo "Building node-based webapp ..."
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
|
||||
if [ -z "$CERC_BUILD_TOOL" ]; then
|
||||
if [ -f "yarn.lock" ]; then
|
||||
CERC_BUILD_TOOL=yarn
|
||||
else
|
||||
CERC_BUILD_TOOL=npm
|
||||
fi
|
||||
fi
|
||||
|
||||
$CERC_BUILD_TOOL install || exit 1
|
||||
$CERC_BUILD_TOOL build || exit 1
|
||||
|
||||
rm -rf "${DEST_DIR}"
|
||||
mv "${WORK_DIR}/${OUTPUT_DIR}" "${DEST_DIR}"
|
||||
else
|
||||
echo "Copying static app ..."
|
||||
mv "${WORK_DIR}" "${DEST_DIR}"
|
||||
fi
|
||||
|
||||
# One special fix ...
|
||||
cd "${DEST_DIR}"
|
||||
for f in $(find . -type f -name '*.htm*'); do
|
||||
sed -i -e 's#/LACONIC_HOSTED_CONFIG_homepage/#LACONIC_HOSTED_CONFIG_homepage/#g' "$f"
|
||||
done
|
||||
|
||||
exit 0
|
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||
|
||||
if [ "true" == "$CERC_ENABLE_CORS" ]; then
|
||||
CERC_HTTP_EXTRA_ARGS="$CERC_HTTP_EXTRA_ARGS --cors"
|
||||
fi
|
||||
|
||||
/scripts/apply-webapp-config.sh /config/config.yml ${CERC_WEBAPP_FILES_DIR}
|
||||
/scripts/apply-runtime-env.sh ${CERC_WEBAPP_FILES_DIR}
|
||||
http-server $CERC_HTTP_EXTRA_ARGS -p ${CERC_LISTEN_PORT:-80} ${CERC_WEBAPP_FILES_DIR}
|
@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
||||
|
||||
/scripts/apply-webapp-config.sh /config/config.yml ${CERC_WEBAPP_FILES_DIR}
|
||||
http-server -p 80 ${CERC_WEBAPP_FILES_DIR}
|
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/webapp-deployer-backend
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/webapp-deployer-backend:local ${build_command_args} ${CERC_REPO_BASE_DIR}/webapp-deployment-status-api
|
@ -0,0 +1,673 @@
|
||||
# from: https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
|
||||
# via: https://kind.sigs.k8s.io/docs/user/ingress/#ingress-nginx
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
name: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
automountServiceAccountToken: true
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
allow-snippet-annotations: "false"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- appProtocol: http
|
||||
name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- appProtocol: https
|
||||
name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ports:
|
||||
- appProtocol: https
|
||||
name: https-webhook
|
||||
port: 443
|
||||
targetPort: webhook
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
- --watch-ingress-without-class=true
|
||||
- --publish-status-address=localhost
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
image: registry.k8s.io/ingress-nginx/controller:v1.9.6@sha256:1405cc613bd95b2c6edd8b2a152510ae91c7e62aea4698500d23b2145960ab9c
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: controller
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 8443
|
||||
name: webhook
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 101
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/certificates/
|
||||
name: webhook-cert
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeSelector:
|
||||
ingress-ready: "true"
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 0
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Equal
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission-create
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission-create
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- create
|
||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --secret-name=ingress-nginx-admission
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06@sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: create
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission-patch
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission-patch
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- patch
|
||||
- --webhook-name=ingress-nginx-admission
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --patch-mutating=false
|
||||
- --secret-name=ingress-nginx-admission
|
||||
- --patch-failure-policy=Fail
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06@sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: patch
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.9.6
|
||||
name: ingress-nginx-admission
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
path: /networking/v1/ingresses
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Equivalent
|
||||
name: validate.nginx.ingress.kubernetes.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
sideEffects: None
|
@ -1,3 +0,0 @@
|
||||
# birbit
|
||||
|
||||
takes geo-tagged images, publishes the metadata on chain, then generates a geojson front end of that geo data
|
@ -1,19 +0,0 @@
|
||||
version: "0.1"
|
||||
name: birbit
|
||||
repos:
|
||||
#- github.com/zramsay/birbit
|
||||
- github.com/mapbox/geojson.io
|
||||
containers:
|
||||
#- cerc/birbit
|
||||
- cerc/geojson
|
||||
pods:
|
||||
# configurable process that reads from chain and generates a view for the front end
|
||||
# for MVP is basically does nothing, b/c FE view is reading all records
|
||||
# imagine a 'watcher' for a specific view
|
||||
#- birbit
|
||||
# front end, gets its file from kubo;
|
||||
# https://github.com/mapbox/geojson.io/blob/main/API.md#datadatatextx-url
|
||||
# deployed via kubernetes
|
||||
- geojson
|
||||
# hosts all the dot geojson files that are required to generated views
|
||||
- kubo
|
@ -0,0 +1,3 @@
|
||||
# Container Registry Stack
|
||||
|
||||
Host a container image registry
|
@ -0,0 +1,5 @@
|
||||
version: "1.0"
|
||||
name: container-registry
|
||||
description: "Container registry stack"
|
||||
pods:
|
||||
- container-registry
|
@ -1,6 +1,6 @@
|
||||
# fixturenet-eth
|
||||
|
||||
Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator)):
|
||||
Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://git.vdb.to/cerc-io/stack-orchestrator)):
|
||||
|
||||
## Clone required repositories
|
||||
|
||||
|
@ -7,11 +7,11 @@ Instructions for deploying a local Laconic blockchain "fixturenet" for developme
|
||||
**Note:** For building some NPMs, access to the @lirewine repositories is required. If you don't have access, see [this tutorial](/docs/laconicd-fixturenet.md) to run this stack
|
||||
|
||||
## 1. Install Laconic Stack Orchestrator
|
||||
Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as:
|
||||
Installation is covered in detail [here](https://git.vdb.to/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as:
|
||||
```
|
||||
$ mkdir my-working-dir
|
||||
$ cd my-working-dir
|
||||
$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||
$ curl -L -o ./laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
||||
$ chmod +x ./laconic-so
|
||||
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
||||
```
|
||||
|
@ -3,11 +3,11 @@
|
||||
Instructions for deploying a local Laconic blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator.
|
||||
|
||||
## 1. Install Laconic Stack Orchestrator
|
||||
Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as:
|
||||
Installation is covered in detail [here](https://git.vdb.to/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as:
|
||||
```
|
||||
$ mkdir my-working-dir
|
||||
$ cd my-working-dir
|
||||
$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||
$ curl -L -o ./laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
||||
$ chmod +x ./laconic-so
|
||||
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
||||
```
|
||||
|
@ -18,7 +18,7 @@ $ laconic-so --stack mainnet-eth build-containers
|
||||
|
||||
```
|
||||
$ laconic-so --stack mainnet-eth deploy init --map-ports-to-host any-same --output mainnet-eth-spec.yml
|
||||
$ laconic-so deploy create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment
|
||||
$ laconic-so deploy --stack mainnet-eth create --spec-file mainnet-eth-spec.yml --deployment-dir mainnet-eth-deployment
|
||||
```
|
||||
## Start the stack
|
||||
```
|
||||
|
16
stack_orchestrator/data/stacks/mars-v2/README.md
Normal file
16
stack_orchestrator/data/stacks/mars-v2/README.md
Normal file
@ -0,0 +1,16 @@
|
||||
# mars
|
||||
|
||||
On a fresh Digital Ocean droplet with Ubuntu:
|
||||
|
||||
```
|
||||
git clone https://github.com/cerc-io/stack-orchestrator
|
||||
cd stack-orchestrator
|
||||
./scripts/quick-install-linux.sh
|
||||
```
|
||||
Read and follow the instructions output from the above output to complete installation, then:
|
||||
|
||||
```
|
||||
laconic-so --stack mars-v2 setup-repositories
|
||||
laconic-so --stack mars-v2 build-containers
|
||||
laconic-so --stack mars-v2 deploy up
|
||||
```
|
8
stack_orchestrator/data/stacks/mars-v2/stack.yml
Normal file
8
stack_orchestrator/data/stacks/mars-v2/stack.yml
Normal file
@ -0,0 +1,8 @@
|
||||
version: "0.1"
|
||||
name: mars-v2
|
||||
repos:
|
||||
- github.com/mars-protocol/mars-v2-frontend
|
||||
containers:
|
||||
- cerc/mars-v2
|
||||
pods:
|
||||
- mars-v2
|
16
stack_orchestrator/data/stacks/mars/README.md
Normal file
16
stack_orchestrator/data/stacks/mars/README.md
Normal file
@ -0,0 +1,16 @@
|
||||
# mars
|
||||
|
||||
On a fresh Digital Ocean droplet with Ubuntu:
|
||||
|
||||
```
|
||||
git clone https://github.com/cerc-io/stack-orchestrator
|
||||
cd stack-orchestrator
|
||||
./scripts/quick-install-linux.sh
|
||||
```
|
||||
Read and follow the instructions output from the above output to complete installation, then:
|
||||
|
||||
```
|
||||
laconic-so --stack mars setup-repositories
|
||||
laconic-so --stack mars build-containers
|
||||
laconic-so --stack mars deploy up
|
||||
```
|
8
stack_orchestrator/data/stacks/mars/stack.yml
Normal file
8
stack_orchestrator/data/stacks/mars/stack.yml
Normal file
@ -0,0 +1,8 @@
|
||||
version: "0.1"
|
||||
name: mars
|
||||
repos:
|
||||
- github.com/cerc-io/mars-interface
|
||||
containers:
|
||||
- cerc/mars
|
||||
pods:
|
||||
- mars
|
@ -4,7 +4,7 @@ The MobyMask watcher is a Laconic Network component that provides efficient acce
|
||||
|
||||
## Deploy the MobyMask Watcher
|
||||
|
||||
The instructions below show how to deploy a MobyMask watcher using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator#install)).
|
||||
The instructions below show how to deploy a MobyMask watcher using laconic-stack-orchestrator (the installation of which is covered [here](https://git.vdb.to/cerc-io/stack-orchestrator#install)).
|
||||
|
||||
This deployment expects that ipld-eth-server's endpoints are available on the local machine at http://ipld-eth-server.example.com:8083/graphql and http://ipld-eth-server.example.com:8082. More advanced configurations are supported by modifying the watcher's [config file](../../config/watcher-mobymask/mobymask-watcher.toml).
|
||||
|
||||
|
10
stack_orchestrator/data/stacks/ping-pub/README.md
Normal file
10
stack_orchestrator/data/stacks/ping-pub/README.md
Normal file
@ -0,0 +1,10 @@
|
||||
# ping-pub
|
||||
Experimental block explorer for laconic
|
||||
|
||||
```
|
||||
laconic-so --stack ping-pub setup-repositories
|
||||
laconic-so --stack ping-pub build-containers
|
||||
laconic-so --stack ping-pub deploy init --output ping-pub-spec.yml --map-ports-to-host localhost-same
|
||||
laconic-so --stack ping-pub deploy create --spec-file ping-pub-spec.yml --deployment-dir pp-deployment
|
||||
laconic-so deployment --dir pp-deployment start
|
||||
```
|
9
stack_orchestrator/data/stacks/ping-pub/stack.yml
Normal file
9
stack_orchestrator/data/stacks/ping-pub/stack.yml
Normal file
@ -0,0 +1,9 @@
|
||||
version: "0.1"
|
||||
name: ping-pub
|
||||
repos:
|
||||
# fork, but only for config & Dockerfile reasonsb
|
||||
- github.com/LaconicNetwork/explorer@laconic
|
||||
containers:
|
||||
- cerc/ping-pub
|
||||
pods:
|
||||
- ping-pub
|
3
stack_orchestrator/data/stacks/test-database/README.md
Normal file
3
stack_orchestrator/data/stacks/test-database/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Test Database Stack
|
||||
|
||||
A stack with a database for test/demo purposes.
|
9
stack_orchestrator/data/stacks/test-database/stack.yml
Normal file
9
stack_orchestrator/data/stacks/test-database/stack.yml
Normal file
@ -0,0 +1,9 @@
|
||||
version: "1.0"
|
||||
name: test
|
||||
description: "A test database stack"
|
||||
repos:
|
||||
containers:
|
||||
- cerc/test-database-container
|
||||
- cerc/test-database-client
|
||||
pods:
|
||||
- test-database
|
@ -0,0 +1,11 @@
|
||||
version: "1.0"
|
||||
name: webapp-deployer-backend
|
||||
description: "Deployer for webapps"
|
||||
repos:
|
||||
- git.vdb.to/telackey/webapp-deployment-status-api
|
||||
containers:
|
||||
- cerc/webapp-deployer-backend
|
||||
pods:
|
||||
- name: webapp-deployer-backend
|
||||
repository: git.vdb.to/telackey/webapp-deployment-status-api
|
||||
path: ./
|
@ -17,6 +17,7 @@ from pathlib import Path
|
||||
from python_on_whales import DockerClient, DockerException
|
||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||
from stack_orchestrator.opts import opts
|
||||
|
||||
|
||||
class DockerDeployer(Deployer):
|
||||
@ -29,60 +30,69 @@ class DockerDeployer(Deployer):
|
||||
self.type = type
|
||||
|
||||
def up(self, detach, services):
|
||||
try:
|
||||
return self.docker.compose.up(detach=detach, services=services)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.up(detach=detach, services=services)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def down(self, timeout, volumes):
|
||||
try:
|
||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def update(self):
|
||||
try:
|
||||
return self.docker.compose.restart()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.restart()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def status(self):
|
||||
try:
|
||||
for p in self.docker.compose.ps():
|
||||
print(f"{p.name}\t{p.state.status}")
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
for p in self.docker.compose.ps():
|
||||
print(f"{p.name}\t{p.state.status}")
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def ps(self):
|
||||
try:
|
||||
return self.docker.compose.ps()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.ps()
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def port(self, service, private_port):
|
||||
try:
|
||||
return self.docker.compose.port(service=service, private_port=private_port)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.port(service=service, private_port=private_port)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def execute(self, service, command, tty, envs):
|
||||
try:
|
||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def logs(self, services, tail, follow, stream):
|
||||
try:
|
||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||
try:
|
||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||
except DockerException as e:
|
||||
raise DeployerException(e)
|
||||
|
||||
|
||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||
|
@ -85,54 +85,39 @@ def create_deploy_context(
|
||||
def up_operation(ctx, services_list, stay_attached=False):
|
||||
global_context = ctx.parent.parent.obj
|
||||
deploy_context = ctx.obj
|
||||
if not global_context.dry_run:
|
||||
cluster_context = deploy_context.cluster_context
|
||||
container_exec_env = _make_runtime_env(global_context)
|
||||
for attr, value in container_exec_env.items():
|
||||
os.environ[attr] = value
|
||||
if global_context.verbose:
|
||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||
for pre_start_command in cluster_context.pre_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
||||
for post_start_command in cluster_context.post_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||
cluster_context = deploy_context.cluster_context
|
||||
container_exec_env = _make_runtime_env(global_context)
|
||||
for attr, value in container_exec_env.items():
|
||||
os.environ[attr] = value
|
||||
if global_context.verbose:
|
||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||
for pre_start_command in cluster_context.pre_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
||||
for post_start_command in cluster_context.post_start_commands:
|
||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||
|
||||
|
||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose down")
|
||||
timeout_arg = None
|
||||
if extra_args_list:
|
||||
timeout_arg = extra_args_list[0]
|
||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||
timeout_arg = None
|
||||
if extra_args_list:
|
||||
timeout_arg = extra_args_list[0]
|
||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||
|
||||
|
||||
def status_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose status")
|
||||
ctx.obj.deployer.status()
|
||||
ctx.obj.deployer.status()
|
||||
|
||||
|
||||
def update_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose update")
|
||||
ctx.obj.deployer.update()
|
||||
ctx.obj.deployer.update()
|
||||
|
||||
|
||||
def ps_operation(ctx):
|
||||
global_context = ctx.parent.parent.obj
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose ps")
|
||||
container_list = ctx.obj.deployer.ps()
|
||||
if len(container_list) > 0:
|
||||
print("Running containers:")
|
||||
@ -187,15 +172,11 @@ def exec_operation(ctx, extra_args):
|
||||
|
||||
|
||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||
global_context = ctx.parent.parent.obj
|
||||
extra_args_list = list(extra_args) or None
|
||||
if not global_context.dry_run:
|
||||
if global_context.verbose:
|
||||
print("Running compose logs")
|
||||
services_list = extra_args_list if extra_args_list is not None else []
|
||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||
for stream_type, stream_content in logs_stream:
|
||||
print(stream_content.decode("utf-8"), end="")
|
||||
services_list = extra_args_list if extra_args_list is not None else []
|
||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||
for stream_type, stream_content in logs_stream:
|
||||
print(stream_content.decode("utf-8"), end="")
|
||||
|
||||
|
||||
@command.command()
|
||||
@ -347,8 +328,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
else:
|
||||
if deployment:
|
||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
||||
pod_pre_start_command = pod["pre_start_command"]
|
||||
pod_post_start_command = pod["post_start_command"]
|
||||
pod_pre_start_command = pod.get("pre_start_command")
|
||||
pod_post_start_command = pod.get("post_start_command")
|
||||
script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
|
||||
if pod_pre_start_command is not None:
|
||||
pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
|
||||
@ -357,8 +338,8 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
else:
|
||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
||||
pod_pre_start_command = pod["pre_start_command"]
|
||||
pod_post_start_command = pod["post_start_command"]
|
||||
pod_pre_start_command = pod.get("pre_start_command")
|
||||
pod_post_start_command = pod.get("post_start_command")
|
||||
if pod_pre_start_command is not None:
|
||||
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
||||
if pod_post_start_command is not None:
|
||||
@ -463,7 +444,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
||||
tty=False,
|
||||
envs=container_exec_env)
|
||||
waiting_for_data = False
|
||||
if ctx.debug:
|
||||
if ctx.debug and not waiting_for_data:
|
||||
print(f"destination output: {destination_output}")
|
||||
|
||||
|
||||
|
@ -18,11 +18,11 @@ from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerCon
|
||||
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
||||
|
||||
|
||||
def getDeployerConfigGenerator(type: str):
|
||||
def getDeployerConfigGenerator(type: str, deployment_context):
|
||||
if type == "compose" or type is None:
|
||||
return DockerDeployerConfigGenerator(type)
|
||||
elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
||||
return K8sDeployerConfigGenerator(type)
|
||||
return K8sDeployerConfigGenerator(type, deployment_context)
|
||||
else:
|
||||
print(f"ERROR: deploy-to {type} is not valid")
|
||||
|
||||
|
@ -27,6 +27,7 @@ from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
||||
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file)
|
||||
from stack_orchestrator.deploy.spec import Spec
|
||||
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
||||
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||
@ -54,19 +55,44 @@ def _get_ports(stack):
|
||||
|
||||
def _get_named_volumes(stack):
|
||||
# Parse the compose files looking for named volumes
|
||||
named_volumes = []
|
||||
named_volumes = {
|
||||
"rw": [],
|
||||
"ro": []
|
||||
}
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
|
||||
def find_vol_usage(parsed_pod_file, vol):
|
||||
ret = {}
|
||||
if "services" in parsed_pod_file:
|
||||
for svc_name, svc in parsed_pod_file["services"].items():
|
||||
if "volumes" in svc:
|
||||
for svc_volume in svc["volumes"]:
|
||||
parts = svc_volume.split(":")
|
||||
if parts[0] == vol:
|
||||
ret[svc_name] = {
|
||||
"volume": parts[0],
|
||||
"mount": parts[1],
|
||||
"options": parts[2] if len(parts) == 3 else None
|
||||
}
|
||||
return ret
|
||||
|
||||
for pod in pods:
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
for volume in volumes.keys():
|
||||
# Volume definition looks like:
|
||||
# 'laconicd-data': None
|
||||
named_volumes.append(volume)
|
||||
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
||||
read_only = vu["options"] == "ro"
|
||||
if read_only:
|
||||
if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]:
|
||||
named_volumes["ro"].append(vu["volume"])
|
||||
else:
|
||||
if vu["volume"] not in named_volumes["rw"]:
|
||||
named_volumes["rw"].append(vu["volume"])
|
||||
|
||||
return named_volumes
|
||||
|
||||
|
||||
@ -86,6 +112,7 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
|
||||
|
||||
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
||||
def _fixup_pod_file(pod, spec, compose_dir):
|
||||
deployment_type = spec[constants.deploy_to_key]
|
||||
# Fix up volumes
|
||||
if "volumes" in spec:
|
||||
spec_volumes = spec["volumes"]
|
||||
@ -94,16 +121,35 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
||||
for volume in pod_volumes.keys():
|
||||
if volume in spec_volumes:
|
||||
volume_spec = spec_volumes[volume]
|
||||
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
||||
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
||||
new_volume_spec = {"driver": "local",
|
||||
"driver_opts": {
|
||||
"type": "none",
|
||||
"device": volume_spec_fixedup,
|
||||
"o": "bind"
|
||||
}
|
||||
}
|
||||
pod["volumes"][volume] = new_volume_spec
|
||||
if volume_spec:
|
||||
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
||||
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
||||
# this is Docker specific
|
||||
if spec.is_docker_deployment():
|
||||
new_volume_spec = {
|
||||
"driver": "local",
|
||||
"driver_opts": {
|
||||
"type": "none",
|
||||
"device": volume_spec_fixedup,
|
||||
"o": "bind"
|
||||
}
|
||||
}
|
||||
pod["volumes"][volume] = new_volume_spec
|
||||
|
||||
# Fix up configmaps
|
||||
if constants.configmaps_key in spec:
|
||||
if spec.is_kubernetes_deployment():
|
||||
spec_cfgmaps = spec[constants.configmaps_key]
|
||||
if "volumes" in pod:
|
||||
pod_volumes = pod[constants.volumes_key]
|
||||
for volume in pod_volumes.keys():
|
||||
if volume in spec_cfgmaps:
|
||||
volume_cfg = spec_cfgmaps[volume]
|
||||
# Just make the dir (if necessary)
|
||||
_create_bind_dir_if_relative(volume, volume_cfg, compose_dir)
|
||||
else:
|
||||
print(f"Warning: ConfigMaps not supported for {deployment_type}")
|
||||
|
||||
# Fix up ports
|
||||
if "network" in spec and "ports" in spec["network"]:
|
||||
spec_ports = spec["network"]["ports"]
|
||||
@ -286,7 +332,7 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
||||
if image_registry is None:
|
||||
error_exit("--image-registry must be supplied with --deploy-to k8s")
|
||||
spec_file_content.update({constants.kube_config_key: kube_config})
|
||||
spec_file_content.update({constants.image_resigtry_key: image_registry})
|
||||
spec_file_content.update({constants.image_registry_key: image_registry})
|
||||
else:
|
||||
# Check for --kube-config supplied for non-relevant deployer types
|
||||
if kube_config is not None:
|
||||
@ -319,9 +365,24 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
||||
named_volumes = _get_named_volumes(stack)
|
||||
if named_volumes:
|
||||
volume_descriptors = {}
|
||||
for named_volume in named_volumes:
|
||||
volume_descriptors[named_volume] = f"./data/{named_volume}"
|
||||
spec_file_content["volumes"] = volume_descriptors
|
||||
configmap_descriptors = {}
|
||||
for named_volume in named_volumes["rw"]:
|
||||
if "k8s" in deployer_type:
|
||||
volume_descriptors[named_volume] = None
|
||||
else:
|
||||
volume_descriptors[named_volume] = f"./data/{named_volume}"
|
||||
for named_volume in named_volumes["ro"]:
|
||||
if "k8s" in deployer_type:
|
||||
if "config" in named_volume:
|
||||
configmap_descriptors[named_volume] = f"./configmaps/{named_volume}"
|
||||
else:
|
||||
volume_descriptors[named_volume] = None
|
||||
else:
|
||||
volume_descriptors[named_volume] = f"./data/{named_volume}"
|
||||
if volume_descriptors:
|
||||
spec_file_content["volumes"] = volume_descriptors
|
||||
if configmap_descriptors:
|
||||
spec_file_content["configmaps"] = configmap_descriptors
|
||||
|
||||
if opts.o.debug:
|
||||
print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
|
||||
@ -360,6 +421,17 @@ def _create_deployment_file(deployment_dir: Path):
|
||||
output_file.write(f"{constants.cluster_id_key}: {cluster}\n")
|
||||
|
||||
|
||||
def _check_volume_definitions(spec):
|
||||
if spec.is_kubernetes_deployment():
|
||||
for volume_name, volume_path in spec.get_volumes().items():
|
||||
if volume_path:
|
||||
if not os.path.isabs(volume_path):
|
||||
raise Exception(
|
||||
f"Relative path {volume_path} for volume {volume_name} not "
|
||||
f"supported for deployment type {spec.get_deployment_type()}"
|
||||
)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||
@ -375,7 +447,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
# The init command's implementation is in a separate function so that we can
|
||||
# call it from other commands, bypassing the click decoration stuff
|
||||
def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
parsed_spec = get_parsed_deployment_spec(spec_file)
|
||||
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
|
||||
_check_volume_definitions(parsed_spec)
|
||||
stack_name = parsed_spec["stack"]
|
||||
deployment_type = parsed_spec[constants.deploy_to_key]
|
||||
stack_file = get_stack_file_path(stack_name)
|
||||
@ -441,7 +514,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
||||
deployment_context = DeploymentContext()
|
||||
deployment_context.init(deployment_dir_path)
|
||||
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type)
|
||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
|
||||
# TODO: make deployment_dir_path a Path above
|
||||
deployer_config_generator.generate(deployment_dir_path)
|
||||
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
||||
|
@ -31,7 +31,8 @@ def _image_needs_pushed(image: str):
|
||||
|
||||
def remote_tag_for_image(image: str, remote_repo_url: str):
|
||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||
(org, image_name_with_version) = image.split("/")
|
||||
major_parts = image.split("/", 2)
|
||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||
(image_name, image_version) = image_name_with_version.split(":")
|
||||
if image_version == "local":
|
||||
return f"{remote_repo_url}/{image_name}:deploy"
|
||||
@ -45,7 +46,7 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
|
||||
cluster_context = command_context.cluster_context
|
||||
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
||||
# Tag the images for the remote repo
|
||||
remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key]
|
||||
remote_repo_url = deployment_context.spec.obj[constants.image_registry_key]
|
||||
docker = DockerClient()
|
||||
for image in images:
|
||||
if _image_needs_pushed(image):
|
||||
|
@ -13,19 +13,50 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
|
||||
from kubernetes import client
|
||||
from typing import Any, List, Set
|
||||
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.util import env_var_map_from_file
|
||||
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
|
||||
from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path
|
||||
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map
|
||||
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
||||
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs
|
||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
||||
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
||||
from stack_orchestrator.deploy.spec import Spec
|
||||
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
||||
from stack_orchestrator.deploy.images import remote_tag_for_image
|
||||
|
||||
DEFAULT_VOLUME_RESOURCES = Resources({
|
||||
"reservations": {"storage": "2Gi"}
|
||||
})
|
||||
|
||||
DEFAULT_CONTAINER_RESOURCES = Resources({
|
||||
"reservations": {"cpus": "0.1", "memory": "200M"},
|
||||
"limits": {"cpus": "1.0", "memory": "2000M"},
|
||||
})
|
||||
|
||||
|
||||
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
||||
def to_dict(limits: ResourceLimits):
|
||||
if not limits:
|
||||
return None
|
||||
|
||||
ret = {}
|
||||
if limits.cpus:
|
||||
ret["cpu"] = str(limits.cpus)
|
||||
if limits.memory:
|
||||
ret["memory"] = f"{int(limits.memory / (1000 * 1000))}M"
|
||||
if limits.storage:
|
||||
ret["storage"] = f"{int(limits.storage / (1000 * 1000))}M"
|
||||
return ret
|
||||
|
||||
return client.V1ResourceRequirements(
|
||||
requests=to_dict(resources.reservations),
|
||||
limits=to_dict(resources.limits)
|
||||
)
|
||||
|
||||
|
||||
class ClusterInfo:
|
||||
parsed_pod_yaml_map: Any
|
||||
@ -47,7 +78,30 @@ class ClusterInfo:
|
||||
if (opts.o.debug):
|
||||
print(f"Env vars: {self.environment_variables.map}")
|
||||
|
||||
def get_ingress(self):
|
||||
def get_nodeport(self):
|
||||
for pod_name in self.parsed_pod_yaml_map:
|
||||
pod = self.parsed_pod_yaml_map[pod_name]
|
||||
services = pod["services"]
|
||||
for service_name in services:
|
||||
service_info = services[service_name]
|
||||
if "ports" in service_info:
|
||||
port = int(service_info["ports"][0])
|
||||
if opts.o.debug:
|
||||
print(f"service port: {port}")
|
||||
service = client.V1Service(
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport"),
|
||||
spec=client.V1ServiceSpec(
|
||||
type="NodePort",
|
||||
ports=[client.V1ServicePort(
|
||||
port=port,
|
||||
target_port=port
|
||||
)],
|
||||
selector={"app": self.app_name}
|
||||
)
|
||||
)
|
||||
return service
|
||||
|
||||
def get_ingress(self, use_tls=False):
|
||||
# No ingress for a deployment that has no http-proxy defined, for now
|
||||
http_proxy_info_list = self.spec.get_http_proxy()
|
||||
ingress = None
|
||||
@ -62,7 +116,7 @@ class ClusterInfo:
|
||||
tls = [client.V1IngressTLS(
|
||||
hosts=[host_name],
|
||||
secret_name=f"{self.app_name}-tls"
|
||||
)]
|
||||
)] if use_tls else None
|
||||
paths = []
|
||||
for route in http_proxy_info["routes"]:
|
||||
path = route["path"]
|
||||
@ -112,9 +166,10 @@ class ClusterInfo:
|
||||
services = pod["services"]
|
||||
for service_name in services:
|
||||
service_info = services[service_name]
|
||||
port = int(service_info["ports"][0])
|
||||
if opts.o.debug:
|
||||
print(f"service port: {port}")
|
||||
if "ports" in service_info:
|
||||
port = int(service_info["ports"][0])
|
||||
if opts.o.debug:
|
||||
print(f"service port: {port}")
|
||||
service = client.V1Service(
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||
spec=client.V1ServiceSpec(
|
||||
@ -130,39 +185,112 @@ class ClusterInfo:
|
||||
|
||||
def get_pvcs(self):
|
||||
result = []
|
||||
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||
spec_volumes = self.spec.get_volumes()
|
||||
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||
resources = self.spec.get_volume_resources()
|
||||
if not resources:
|
||||
resources = DEFAULT_VOLUME_RESOURCES
|
||||
if opts.o.debug:
|
||||
print(f"Volumes: {volumes}")
|
||||
for volume_name in volumes:
|
||||
print(f"Spec Volumes: {spec_volumes}")
|
||||
print(f"Named Volumes: {named_volumes}")
|
||||
print(f"Resources: {resources}")
|
||||
for volume_name, volume_path in spec_volumes.items():
|
||||
if volume_name not in named_volumes:
|
||||
if opts.o.debug:
|
||||
print(f"{volume_name} not in pod files")
|
||||
continue
|
||||
|
||||
labels = {
|
||||
"app": self.app_name,
|
||||
"volume-label": f"{self.app_name}-{volume_name}"
|
||||
}
|
||||
if volume_path:
|
||||
storage_class_name = "manual"
|
||||
k8s_volume_name = f"{self.app_name}-{volume_name}"
|
||||
else:
|
||||
# These will be auto-assigned.
|
||||
storage_class_name = None
|
||||
k8s_volume_name = None
|
||||
|
||||
spec = client.V1PersistentVolumeClaimSpec(
|
||||
access_modes=["ReadWriteOnce"],
|
||||
storage_class_name="manual",
|
||||
resources=client.V1ResourceRequirements(
|
||||
requests={"storage": "2Gi"}
|
||||
),
|
||||
volume_name=volume_name
|
||||
storage_class_name=storage_class_name,
|
||||
resources=to_k8s_resource_requirements(resources),
|
||||
volume_name=k8s_volume_name
|
||||
)
|
||||
pvc = client.V1PersistentVolumeClaim(
|
||||
metadata=client.V1ObjectMeta(name=volume_name,
|
||||
labels={"volume-label": volume_name}),
|
||||
spec=spec,
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
|
||||
spec=spec
|
||||
)
|
||||
result.append(pvc)
|
||||
return result
|
||||
|
||||
def get_configmaps(self):
|
||||
result = []
|
||||
spec_configmaps = self.spec.get_configmaps()
|
||||
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||
for cfg_map_name, cfg_map_path in spec_configmaps.items():
|
||||
if cfg_map_name not in named_volumes:
|
||||
if opts.o.debug:
|
||||
print(f"{cfg_map_name} not in pod files")
|
||||
continue
|
||||
|
||||
if not cfg_map_path.startswith("/"):
|
||||
cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path)
|
||||
|
||||
# Read in all the files at a single-level of the directory. This mimics the behavior
|
||||
# of `kubectl create configmap foo --from-file=/path/to/dir`
|
||||
data = {}
|
||||
for f in os.listdir(cfg_map_path):
|
||||
full_path = os.path.join(cfg_map_path, f)
|
||||
if os.path.isfile(full_path):
|
||||
data[f] = open(full_path, 'rt').read()
|
||||
|
||||
spec = client.V1ConfigMap(
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
||||
labels={"configmap-label": cfg_map_name}),
|
||||
data=data
|
||||
)
|
||||
result.append(spec)
|
||||
return result
|
||||
|
||||
def get_pvs(self):
|
||||
result = []
|
||||
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||
for volume_name in volumes:
|
||||
spec_volumes = self.spec.get_volumes()
|
||||
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||
resources = self.spec.get_volume_resources()
|
||||
if not resources:
|
||||
resources = DEFAULT_VOLUME_RESOURCES
|
||||
for volume_name, volume_path in spec_volumes.items():
|
||||
# We only need to create a volume if it is fully qualified HostPath.
|
||||
# Otherwise, we create the PVC and expect the node to allocate the volume for us.
|
||||
if not volume_path:
|
||||
if opts.o.debug:
|
||||
print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.")
|
||||
continue
|
||||
|
||||
if volume_name not in named_volumes:
|
||||
if opts.o.debug:
|
||||
print(f"{volume_name} not in pod files")
|
||||
continue
|
||||
|
||||
if not os.path.isabs(volume_path):
|
||||
print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.")
|
||||
continue
|
||||
|
||||
if self.spec.is_kind_deployment():
|
||||
host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name))
|
||||
else:
|
||||
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
||||
spec = client.V1PersistentVolumeSpec(
|
||||
storage_class_name="manual",
|
||||
access_modes=["ReadWriteOnce"],
|
||||
capacity={"storage": "2Gi"},
|
||||
host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name))
|
||||
capacity=to_k8s_resource_requirements(resources).requests,
|
||||
host_path=host_path
|
||||
)
|
||||
pv = client.V1PersistentVolume(
|
||||
metadata=client.V1ObjectMeta(name=volume_name,
|
||||
labels={"volume-label": volume_name}),
|
||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
|
||||
labels={"volume-label": f"{self.app_name}-{volume_name}"}),
|
||||
spec=spec,
|
||||
)
|
||||
result.append(pv)
|
||||
@ -171,6 +299,9 @@ class ClusterInfo:
|
||||
# TODO: put things like image pull policy into an object-scope struct
|
||||
def get_deployment(self, image_pull_policy: str = None):
|
||||
containers = []
|
||||
resources = self.spec.get_container_resources()
|
||||
if not resources:
|
||||
resources = DEFAULT_CONTAINER_RESOURCES
|
||||
for pod_name in self.parsed_pod_yaml_map:
|
||||
pod = self.parsed_pod_yaml_map[pod_name]
|
||||
services = pod["services"]
|
||||
@ -178,10 +309,18 @@ class ClusterInfo:
|
||||
container_name = service_name
|
||||
service_info = services[service_name]
|
||||
image = service_info["image"]
|
||||
port = int(service_info["ports"][0])
|
||||
if "ports" in service_info:
|
||||
port = int(service_info["ports"][0])
|
||||
if opts.o.debug:
|
||||
print(f"image: {image}")
|
||||
print(f"service port: {port}")
|
||||
merged_envs = merge_envs(
|
||||
envs_from_compose_file(
|
||||
service_info["environment"]), self.environment_variables.map
|
||||
) if "environment" in service_info else self.environment_variables.map
|
||||
envs = envs_from_environment_variables_map(merged_envs)
|
||||
if opts.o.debug:
|
||||
print(f"image: {image}")
|
||||
print(f"service port: {port}")
|
||||
print(f"Merged envs: {envs}")
|
||||
# Re-write the image tag for remote deployment
|
||||
image_to_use = remote_tag_for_image(
|
||||
image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image
|
||||
@ -190,19 +329,40 @@ class ClusterInfo:
|
||||
name=container_name,
|
||||
image=image_to_use,
|
||||
image_pull_policy=image_pull_policy,
|
||||
env=envs_from_environment_variables_map(self.environment_variables.map),
|
||||
env=envs,
|
||||
ports=[client.V1ContainerPort(container_port=port)],
|
||||
volume_mounts=volume_mounts,
|
||||
resources=client.V1ResourceRequirements(
|
||||
requests={"cpu": "100m", "memory": "200Mi"},
|
||||
limits={"cpu": "500m", "memory": "500Mi"},
|
||||
security_context=client.V1SecurityContext(
|
||||
privileged=self.spec.get_privileged(),
|
||||
capabilities=client.V1Capabilities(
|
||||
add=self.spec.get_capabilities()
|
||||
) if self.spec.get_capabilities() else None
|
||||
),
|
||||
resources=to_k8s_resource_requirements(resources),
|
||||
)
|
||||
containers.append(container)
|
||||
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map)
|
||||
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name)
|
||||
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
||||
|
||||
annotations = None
|
||||
labels = {"app": self.app_name}
|
||||
|
||||
if self.spec.get_annotations():
|
||||
annotations = {}
|
||||
for key, value in self.spec.get_annotations().items():
|
||||
for service_name in services:
|
||||
annotations[key.replace("{name}", service_name)] = value
|
||||
|
||||
if self.spec.get_labels():
|
||||
for key, value in self.spec.get_labels().items():
|
||||
for service_name in services:
|
||||
labels[key.replace("{name}", service_name)] = value
|
||||
|
||||
template = client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(labels={"app": self.app_name}),
|
||||
metadata=client.V1ObjectMeta(
|
||||
annotations=annotations,
|
||||
labels=labels
|
||||
),
|
||||
spec=client.V1PodSpec(containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes),
|
||||
)
|
||||
spec = client.V1DeploymentSpec(
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright © 2023 Vulcanize
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
@ -21,7 +20,9 @@ from kubernetes import client, config
|
||||
from stack_orchestrator import constants
|
||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
|
||||
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config
|
||||
from stack_orchestrator.deploy.k8s.helpers import install_ingress_for_kind, wait_for_ingress_in_kind
|
||||
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, containers_in_pod, log_stream_from_string
|
||||
from stack_orchestrator.deploy.k8s.helpers import generate_kind_config
|
||||
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||
@ -82,99 +83,190 @@ class K8sDeployer(Deployer):
|
||||
self.apps_api = client.AppsV1Api()
|
||||
self.custom_obj_api = client.CustomObjectsApi()
|
||||
|
||||
def up(self, detach, services):
|
||||
|
||||
if self.is_kind():
|
||||
# Create the kind cluster
|
||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||
# Ensure the referenced containers are copied into kind
|
||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||
self.connect_api()
|
||||
|
||||
def _create_volume_data(self):
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
for pv in pvs:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this pv: {pv}")
|
||||
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
||||
if opts.o.debug:
|
||||
print("PVs created:")
|
||||
print(f"{pv_resp}")
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name)
|
||||
if pv_resp:
|
||||
if opts.o.debug:
|
||||
print("PVs already present:")
|
||||
print(f"{pv_resp}")
|
||||
continue
|
||||
except: # noqa: E722
|
||||
pass
|
||||
|
||||
pv_resp = self.core_api.create_persistent_volume(body=pv)
|
||||
if opts.o.debug:
|
||||
print("PVs created:")
|
||||
print(f"{pv_resp}")
|
||||
|
||||
# Figure out the PVCs for this deployment
|
||||
pvcs = self.cluster_info.get_pvcs()
|
||||
for pvc in pvcs:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this pvc: {pvc}")
|
||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||
|
||||
if not opts.o.dry_run:
|
||||
try:
|
||||
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
|
||||
name=pvc.metadata.name, namespace=self.k8s_namespace)
|
||||
if pvc_resp:
|
||||
if opts.o.debug:
|
||||
print("PVCs already present:")
|
||||
print(f"{pvc_resp}")
|
||||
continue
|
||||
except: # noqa: E722
|
||||
pass
|
||||
|
||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||
if opts.o.debug:
|
||||
print("PVCs created:")
|
||||
print(f"{pvc_resp}")
|
||||
|
||||
# Figure out the ConfigMaps for this deployment
|
||||
config_maps = self.cluster_info.get_configmaps()
|
||||
for cfg_map in config_maps:
|
||||
if opts.o.debug:
|
||||
print("PVCs created:")
|
||||
print(f"{pvc_resp}")
|
||||
print(f"Sending this ConfigMap: {cfg_map}")
|
||||
if not opts.o.dry_run:
|
||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||
body=cfg_map,
|
||||
namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("ConfigMap created:")
|
||||
print(f"{cfg_rsp}")
|
||||
|
||||
def _create_deployment(self):
|
||||
# Process compose files into a Deployment
|
||||
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
||||
# Create the k8s objects
|
||||
if opts.o.debug:
|
||||
print(f"Sending this deployment: {deployment}")
|
||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||
body=deployment, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("Deployment created:")
|
||||
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||
|
||||
service: client.V1Service = self.cluster_info.get_service()
|
||||
service_resp = self.core_api.create_namespaced_service(
|
||||
namespace=self.k8s_namespace,
|
||||
body=service
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("Service created:")
|
||||
print(f"{service_resp}")
|
||||
|
||||
if not self.is_kind():
|
||||
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
||||
|
||||
if opts.o.debug:
|
||||
print(f"Sending this ingress: {ingress}")
|
||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||
namespace=self.k8s_namespace,
|
||||
body=ingress
|
||||
if not opts.o.dry_run:
|
||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||
body=deployment, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("Ingress created:")
|
||||
print(f"{ingress_resp}")
|
||||
print("Deployment created:")
|
||||
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||
|
||||
def down(self, timeout, volumes):
|
||||
self.connect_api()
|
||||
# Delete the k8s objects
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
for pv in pvs:
|
||||
service: client.V1Service = self.cluster_info.get_service()
|
||||
if opts.o.debug:
|
||||
print(f"Sending this service: {service}")
|
||||
if not opts.o.dry_run:
|
||||
service_resp = self.core_api.create_namespaced_service(
|
||||
namespace=self.k8s_namespace,
|
||||
body=service
|
||||
)
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pv: {pv}")
|
||||
try:
|
||||
pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
||||
if opts.o.debug:
|
||||
print("PV deleted:")
|
||||
print(f"{pv_resp}")
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
print("Service created:")
|
||||
print(f"{service_resp}")
|
||||
|
||||
# Figure out the PVCs for this deployment
|
||||
pvcs = self.cluster_info.get_pvcs()
|
||||
for pvc in pvcs:
|
||||
def up(self, detach, services):
|
||||
if not opts.o.dry_run:
|
||||
if self.is_kind():
|
||||
# Create the kind cluster
|
||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||
# Ensure the referenced containers are copied into kind
|
||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||
self.connect_api()
|
||||
if self.is_kind():
|
||||
# Now configure an ingress controller (not installed by default in kind)
|
||||
install_ingress_for_kind()
|
||||
# Wait for ingress to start (deployment provisioning will fail unless this is done)
|
||||
wait_for_ingress_in_kind()
|
||||
|
||||
else:
|
||||
print("Dry run mode enabled, skipping k8s API connect")
|
||||
|
||||
self._create_volume_data()
|
||||
self._create_deployment()
|
||||
|
||||
# Note: at present we don't support tls for kind (and enabling tls causes errors)
|
||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||
if ingress:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pvc: {pvc}")
|
||||
try:
|
||||
pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(
|
||||
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||
print(f"Sending this ingress: {ingress}")
|
||||
if not opts.o.dry_run:
|
||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||
namespace=self.k8s_namespace,
|
||||
body=ingress
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("PVCs deleted:")
|
||||
print(f"{pvc_resp}")
|
||||
print("Ingress created:")
|
||||
print(f"{ingress_resp}")
|
||||
else:
|
||||
if opts.o.debug:
|
||||
print("No ingress configured")
|
||||
|
||||
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
||||
if nodeport:
|
||||
if opts.o.debug:
|
||||
print(f"Sending this nodeport: {nodeport}")
|
||||
if not opts.o.dry_run:
|
||||
nodeport_resp = self.core_api.create_namespaced_service(
|
||||
namespace=self.k8s_namespace,
|
||||
body=nodeport
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("NodePort created:")
|
||||
print(f"{nodeport_resp}")
|
||||
|
||||
def down(self, timeout, volumes): # noqa: C901
|
||||
self.connect_api()
|
||||
# Delete the k8s objects
|
||||
|
||||
if volumes:
|
||||
# Create the host-path-mounted PVs for this deployment
|
||||
pvs = self.cluster_info.get_pvs()
|
||||
for pv in pvs:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pv: {pv}")
|
||||
try:
|
||||
pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
||||
if opts.o.debug:
|
||||
print("PV deleted:")
|
||||
print(f"{pv_resp}")
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
# Figure out the PVCs for this deployment
|
||||
pvcs = self.cluster_info.get_pvcs()
|
||||
for pvc in pvcs:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this pvc: {pvc}")
|
||||
try:
|
||||
pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(
|
||||
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("PVCs deleted:")
|
||||
print(f"{pvc_resp}")
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
# Figure out the ConfigMaps for this deployment
|
||||
cfg_maps = self.cluster_info.get_configmaps()
|
||||
for cfg_map in cfg_maps:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this ConfigMap: {cfg_map}")
|
||||
try:
|
||||
cfg_map_resp = self.core_api.delete_namespaced_config_map(
|
||||
name=cfg_map.metadata.name, namespace=self.k8s_namespace
|
||||
)
|
||||
if opts.o.debug:
|
||||
print("ConfigMap deleted:")
|
||||
print(f"{cfg_map_resp}")
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
deployment = self.cluster_info.get_deployment()
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this deployment: {deployment}")
|
||||
@ -196,8 +288,8 @@ class K8sDeployer(Deployer):
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
|
||||
if not self.is_kind():
|
||||
ingress: client.V1Ingress = self.cluster_info.get_ingress()
|
||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||
if ingress:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this ingress: {ingress}")
|
||||
try:
|
||||
@ -206,6 +298,24 @@ class K8sDeployer(Deployer):
|
||||
)
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
else:
|
||||
if opts.o.debug:
|
||||
print("No ingress to delete")
|
||||
|
||||
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
||||
if nodeport:
|
||||
if opts.o.debug:
|
||||
print(f"Deleting this nodeport: {ingress}")
|
||||
try:
|
||||
self.core_api.delete_namespaced_service(
|
||||
namespace=self.k8s_namespace,
|
||||
name=nodeport.metadata.name
|
||||
)
|
||||
except client.exceptions.ApiException as e:
|
||||
_check_delete_exception(e)
|
||||
else:
|
||||
if opts.o.debug:
|
||||
print("No nodeport to delete")
|
||||
|
||||
if self.is_kind():
|
||||
# Destroy the kind cluster
|
||||
@ -306,9 +416,15 @@ class K8sDeployer(Deployer):
|
||||
log_data = "******* Pods not running ********\n"
|
||||
else:
|
||||
k8s_pod_name = pods[0]
|
||||
containers = containers_in_pod(self.core_api, k8s_pod_name)
|
||||
# If the pod is not yet started, the logs request below will throw an exception
|
||||
try:
|
||||
log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test")
|
||||
log_data = ""
|
||||
for container in containers:
|
||||
container_log = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container=container)
|
||||
container_log_lines = container_log.splitlines()
|
||||
for line in container_log_lines:
|
||||
log_data += f"{container}: {line}\n"
|
||||
except client.exceptions.ApiException as e:
|
||||
if opts.o.debug:
|
||||
print(f"Error from read_namespaced_pod_log: {e}")
|
||||
@ -353,8 +469,9 @@ class K8sDeployer(Deployer):
|
||||
class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
||||
type: str
|
||||
|
||||
def __init__(self, type: str) -> None:
|
||||
def __init__(self, type: str, deployment_context) -> None:
|
||||
self.type = type
|
||||
self.deployment_context = deployment_context
|
||||
super().__init__()
|
||||
|
||||
def generate(self, deployment_dir: Path):
|
||||
@ -362,7 +479,7 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
||||
if self.type == "k8s-kind":
|
||||
# Check the file isn't already there
|
||||
# Get the config file contents
|
||||
content = generate_kind_config(deployment_dir)
|
||||
content = generate_kind_config(deployment_dir, self.deployment_context)
|
||||
if opts.o.debug:
|
||||
print(f"kind config is: {content}")
|
||||
config_file = deployment_dir.joinpath(constants.kind_config_filename)
|
||||
|
@ -13,12 +13,14 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
from kubernetes import client
|
||||
from kubernetes import client, utils, watch
|
||||
import os
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import re
|
||||
from typing import Set, Mapping, List
|
||||
|
||||
from stack_orchestrator.util import get_k8s_dir, error_exit
|
||||
from stack_orchestrator.opts import opts
|
||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
|
||||
from stack_orchestrator.deploy.deployer import DeployerException
|
||||
@ -43,6 +45,33 @@ def destroy_cluster(name: str):
|
||||
_run_command(f"kind delete cluster --name {name}")
|
||||
|
||||
|
||||
def wait_for_ingress_in_kind():
|
||||
core_v1 = client.CoreV1Api()
|
||||
for i in range(20):
|
||||
warned_waiting = False
|
||||
w = watch.Watch()
|
||||
for event in w.stream(func=core_v1.list_namespaced_pod,
|
||||
namespace="ingress-nginx",
|
||||
label_selector="app.kubernetes.io/component=controller",
|
||||
timeout_seconds=30):
|
||||
if event['object'].status.container_statuses:
|
||||
if event['object'].status.container_statuses[0].ready is True:
|
||||
if warned_waiting:
|
||||
print("Ingress controller is ready")
|
||||
return
|
||||
print("Waiting for ingress controller to become ready...")
|
||||
warned_waiting = True
|
||||
error_exit("ERROR: Timed out waiting for ingress to become ready")
|
||||
|
||||
|
||||
def install_ingress_for_kind():
|
||||
api_client = client.ApiClient()
|
||||
ingress_install = os.path.abspath(get_k8s_dir().joinpath("components", "ingress", "ingress-nginx-kind-deploy.yaml"))
|
||||
if opts.o.debug:
|
||||
print("Installing nginx ingress controller in kind cluster")
|
||||
utils.create_from_yaml(api_client, yaml_file=ingress_install)
|
||||
|
||||
|
||||
def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
|
||||
for image in image_set:
|
||||
result = _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
|
||||
@ -61,6 +90,17 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
||||
return pods
|
||||
|
||||
|
||||
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str):
|
||||
containers = []
|
||||
pod_response = core_api.read_namespaced_pod(pod_name, namespace="default")
|
||||
if opts.o.debug:
|
||||
print(f"pod_response: {pod_response}")
|
||||
pod_containers = pod_response.spec.containers
|
||||
for pod_container in pod_containers:
|
||||
containers.append(pod_container.name)
|
||||
return containers
|
||||
|
||||
|
||||
def log_stream_from_string(s: str):
|
||||
# Note response has to be UTF-8 encoded because the caller expects to decode it
|
||||
yield ("ignore", s.encode())
|
||||
@ -73,14 +113,14 @@ def named_volumes_from_pod_files(parsed_pod_files):
|
||||
parsed_pod_file = parsed_pod_files[pod]
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
for volume in volumes.keys():
|
||||
for volume, value in volumes.items():
|
||||
# Volume definition looks like:
|
||||
# 'laconicd-data': None
|
||||
named_volumes.append(volume)
|
||||
return named_volumes
|
||||
|
||||
|
||||
def get_node_pv_mount_path(volume_name: str):
|
||||
def get_kind_pv_bind_mount_path(volume_name: str):
|
||||
return f"/mnt/{volume_name}"
|
||||
|
||||
|
||||
@ -97,37 +137,46 @@ def volume_mounts_for_service(parsed_pod_files, service):
|
||||
if "volumes" in service_obj:
|
||||
volumes = service_obj["volumes"]
|
||||
for mount_string in volumes:
|
||||
# Looks like: test-data:/data
|
||||
(volume_name, mount_path) = mount_string.split(":")
|
||||
volume_device = client.V1VolumeMount(mount_path=mount_path, name=volume_name)
|
||||
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
||||
if opts.o.debug:
|
||||
print(f"mount_string: {mount_string}")
|
||||
mount_split = mount_string.split(":")
|
||||
volume_name = mount_split[0]
|
||||
mount_path = mount_split[1]
|
||||
mount_options = mount_split[2] if len(mount_split) == 3 else None
|
||||
if opts.o.debug:
|
||||
print(f"volume_name: {volume_name}")
|
||||
print(f"mount path: {mount_path}")
|
||||
print(f"mount options: {mount_options}")
|
||||
volume_device = client.V1VolumeMount(
|
||||
mount_path=mount_path,
|
||||
name=volume_name,
|
||||
read_only="ro" == mount_options
|
||||
)
|
||||
result.append(volume_device)
|
||||
return result
|
||||
|
||||
|
||||
def volumes_for_pod_files(parsed_pod_files):
|
||||
def volumes_for_pod_files(parsed_pod_files, spec, app_name):
|
||||
result = []
|
||||
for pod in parsed_pod_files:
|
||||
parsed_pod_file = parsed_pod_files[pod]
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
for volume_name in volumes.keys():
|
||||
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name)
|
||||
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
|
||||
result.append(volume)
|
||||
if volume_name in spec.get_configmaps():
|
||||
config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}")
|
||||
volume = client.V1Volume(name=volume_name, config_map=config_map)
|
||||
result.append(volume)
|
||||
else:
|
||||
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=f"{app_name}-{volume_name}")
|
||||
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
|
||||
result.append(volume)
|
||||
return result
|
||||
|
||||
|
||||
def _get_host_paths_for_volumes(parsed_pod_files):
|
||||
result = {}
|
||||
for pod in parsed_pod_files:
|
||||
parsed_pod_file = parsed_pod_files[pod]
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
for volume_name in volumes.keys():
|
||||
volume_definition = volumes[volume_name]
|
||||
host_path = volume_definition["driver_opts"]["device"]
|
||||
result[volume_name] = host_path
|
||||
return result
|
||||
def _get_host_paths_for_volumes(deployment_context):
|
||||
return deployment_context.spec.get_volumes()
|
||||
|
||||
|
||||
def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path:
|
||||
@ -135,12 +184,12 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat
|
||||
return data_mount_path
|
||||
else:
|
||||
# Python Path voodo that looks pretty odd:
|
||||
return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve()
|
||||
return Path.cwd().joinpath(deployment_dir.joinpath(data_mount_path)).resolve()
|
||||
|
||||
|
||||
def _generate_kind_mounts(parsed_pod_files, deployment_dir):
|
||||
def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
|
||||
volume_definitions = []
|
||||
volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files)
|
||||
volume_host_path_map = _get_host_paths_for_volumes(deployment_context)
|
||||
# Note these paths are relative to the location of the pod files (at present)
|
||||
# So we need to fix up to make them correct and absolute because kind assumes
|
||||
# relative to the cwd.
|
||||
@ -153,12 +202,22 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir):
|
||||
if "volumes" in service_obj:
|
||||
volumes = service_obj["volumes"]
|
||||
for mount_string in volumes:
|
||||
# Looks like: test-data:/data
|
||||
(volume_name, mount_path) = mount_string.split(":")
|
||||
volume_definitions.append(
|
||||
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
||||
f" containerPath: {get_node_pv_mount_path(volume_name)}"
|
||||
)
|
||||
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
||||
if opts.o.debug:
|
||||
print(f"mount_string: {mount_string}")
|
||||
mount_split = mount_string.split(":")
|
||||
volume_name = mount_split[0]
|
||||
mount_path = mount_split[1]
|
||||
if opts.o.debug:
|
||||
print(f"volume_name: {volume_name}")
|
||||
print(f"map: {volume_host_path_map}")
|
||||
print(f"mount path: {mount_path}")
|
||||
if volume_name not in deployment_context.spec.get_configmaps():
|
||||
if volume_host_path_map[volume_name]:
|
||||
volume_definitions.append(
|
||||
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
||||
f" containerPath: {get_kind_pv_bind_mount_path(volume_name)}\n"
|
||||
)
|
||||
return (
|
||||
"" if len(volume_definitions) == 0 else (
|
||||
" extraMounts:\n"
|
||||
@ -167,7 +226,8 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir):
|
||||
)
|
||||
|
||||
|
||||
def _generate_kind_port_mappings(parsed_pod_files):
|
||||
# TODO: decide if we need this functionality
|
||||
def _generate_kind_port_mappings_from_services(parsed_pod_files):
|
||||
port_definitions = []
|
||||
for pod in parsed_pod_files:
|
||||
parsed_pod_file = parsed_pod_files[pod]
|
||||
@ -180,7 +240,7 @@ def _generate_kind_port_mappings(parsed_pod_files):
|
||||
for port_string in ports:
|
||||
# TODO handle the complex cases
|
||||
# Looks like: 80 or something more complicated
|
||||
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}")
|
||||
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
|
||||
return (
|
||||
"" if len(port_definitions) == 0 else (
|
||||
" extraPortMappings:\n"
|
||||
@ -189,6 +249,46 @@ def _generate_kind_port_mappings(parsed_pod_files):
|
||||
)
|
||||
|
||||
|
||||
def _generate_kind_port_mappings(parsed_pod_files):
|
||||
port_definitions = []
|
||||
# For now we just map port 80 for the nginx ingress controller we install in kind
|
||||
port_string = "80"
|
||||
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
|
||||
return (
|
||||
"" if len(port_definitions) == 0 else (
|
||||
" extraPortMappings:\n"
|
||||
f"{''.join(port_definitions)}"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Note: this makes any duplicate definition in b overwrite a
|
||||
def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]:
|
||||
result = {**a, **b}
|
||||
return result
|
||||
|
||||
|
||||
def _expand_shell_vars(raw_val: str) -> str:
|
||||
# could be: <string> or ${<env-var-name>} or ${<env-var-name>:-<default-value>}
|
||||
# TODO: implement support for variable substitution and default values
|
||||
# if raw_val is like ${<something>} print a warning and substitute an empty string
|
||||
# otherwise return raw_val
|
||||
match = re.search(r"^\$\{(.*)\}$", raw_val)
|
||||
if match:
|
||||
print(f"WARNING: found unimplemented environment variable substitution: {raw_val}")
|
||||
else:
|
||||
return raw_val
|
||||
|
||||
|
||||
# TODO: handle the case where the same env var is defined in multiple places
|
||||
def envs_from_compose_file(compose_file_envs: Mapping[str, str]) -> Mapping[str, str]:
|
||||
result = {}
|
||||
for env_var, env_val in compose_file_envs.items():
|
||||
expanded_env_val = _expand_shell_vars(env_val)
|
||||
result.update({env_var: expanded_env_val})
|
||||
return result
|
||||
|
||||
|
||||
def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]:
|
||||
result = []
|
||||
for env_var, env_val in map.items():
|
||||
@ -214,18 +314,24 @@ def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V
|
||||
# extraMounts:
|
||||
# - hostPath: /path/to/my/files
|
||||
# containerPath: /files
|
||||
def generate_kind_config(deployment_dir: Path):
|
||||
def generate_kind_config(deployment_dir: Path, deployment_context):
|
||||
compose_file_dir = deployment_dir.joinpath("compose")
|
||||
# TODO: this should come from the stack file, not this way
|
||||
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
||||
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
||||
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir)
|
||||
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context)
|
||||
return (
|
||||
"kind: Cluster\n"
|
||||
"apiVersion: kind.x-k8s.io/v1alpha4\n"
|
||||
"nodes:\n"
|
||||
"- role: control-plane\n"
|
||||
" kubeadmConfigPatches:\n"
|
||||
" - |\n"
|
||||
" kind: InitConfiguration\n"
|
||||
" nodeRegistration:\n"
|
||||
" kubeletExtraArgs:\n"
|
||||
" node-labels: \"ingress-ready=true\"\n"
|
||||
f"{port_mappings_yml}\n"
|
||||
f"{mounts_yml}\n"
|
||||
)
|
||||
|
@ -13,30 +13,130 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
from pathlib import Path
|
||||
import typing
|
||||
import humanfriendly
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from stack_orchestrator.util import get_yaml
|
||||
from stack_orchestrator import constants
|
||||
|
||||
|
||||
class ResourceLimits:
|
||||
cpus: float = None
|
||||
memory: int = None
|
||||
storage: int = None
|
||||
|
||||
def __init__(self, obj={}):
|
||||
if "cpus" in obj:
|
||||
self.cpus = float(obj["cpus"])
|
||||
if "memory" in obj:
|
||||
self.memory = humanfriendly.parse_size(obj["memory"])
|
||||
if "storage" in obj:
|
||||
self.storage = humanfriendly.parse_size(obj["storage"])
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__dict__)
|
||||
|
||||
def __iter__(self):
|
||||
for k in self.__dict__:
|
||||
yield k, self.__dict__[k]
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
|
||||
class Resources:
|
||||
limits: ResourceLimits = None
|
||||
reservations: ResourceLimits = None
|
||||
|
||||
def __init__(self, obj={}):
|
||||
if "reservations" in obj:
|
||||
self.reservations = ResourceLimits(obj["reservations"])
|
||||
if "limits" in obj:
|
||||
self.limits = ResourceLimits(obj["limits"])
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__dict__)
|
||||
|
||||
def __iter__(self):
|
||||
for k in self.__dict__:
|
||||
yield k, self.__dict__[k]
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
|
||||
class Spec:
|
||||
|
||||
obj: typing.Any
|
||||
file_path: Path
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def __init__(self, file_path: Path = None, obj={}) -> None:
|
||||
self.file_path = file_path
|
||||
self.obj = obj
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.obj[item]
|
||||
|
||||
def __contains__(self, item):
|
||||
return item in self.obj
|
||||
|
||||
def get(self, item, default=None):
|
||||
return self.obj.get(item, default)
|
||||
|
||||
def init_from_file(self, file_path: Path):
|
||||
with file_path:
|
||||
self.obj = get_yaml().load(open(file_path, "r"))
|
||||
self.file_path = file_path
|
||||
|
||||
def get_image_registry(self):
|
||||
return (self.obj[constants.image_resigtry_key]
|
||||
if self.obj and constants.image_resigtry_key in self.obj
|
||||
return (self.obj[constants.image_registry_key]
|
||||
if self.obj and constants.image_registry_key in self.obj
|
||||
else None)
|
||||
|
||||
def get_volumes(self):
|
||||
return (self.obj["volumes"]
|
||||
if self.obj and "volumes" in self.obj
|
||||
else {})
|
||||
|
||||
def get_configmaps(self):
|
||||
return (self.obj["configmaps"]
|
||||
if self.obj and "configmaps" in self.obj
|
||||
else {})
|
||||
|
||||
def get_container_resources(self):
|
||||
return Resources(self.obj.get("resources", {}).get("containers", {}))
|
||||
|
||||
def get_volume_resources(self):
|
||||
return Resources(self.obj.get("resources", {}).get("volumes", {}))
|
||||
|
||||
def get_http_proxy(self):
|
||||
return (self.obj[constants.network_key][constants.http_proxy_key]
|
||||
if self.obj and constants.network_key in self.obj
|
||||
and constants.http_proxy_key in self.obj[constants.network_key]
|
||||
else None)
|
||||
|
||||
def get_annotations(self):
|
||||
return self.obj.get("annotations", {})
|
||||
|
||||
def get_labels(self):
|
||||
return self.obj.get("labels", {})
|
||||
|
||||
def get_privileged(self):
|
||||
return "true" == str(self.obj.get("security", {}).get("privileged", "false")).lower()
|
||||
|
||||
def get_capabilities(self):
|
||||
return self.obj.get("security", {}).get("capabilities", [])
|
||||
|
||||
def get_deployment_type(self):
|
||||
return self.obj[constants.deploy_to_key]
|
||||
|
||||
def is_kubernetes_deployment(self):
|
||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type, constants.k8s_deploy_type]
|
||||
|
||||
def is_kind_deployment(self):
|
||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
|
||||
|
||||
def is_docker_deployment(self):
|
||||
return self.get_deployment_type() in [constants.compose_deploy_type]
|
||||
|
@ -44,7 +44,7 @@ def _fixup_url_spec(spec_file_name: str, url: str):
|
||||
- host-name: {parsed_url.hostname}
|
||||
routes:
|
||||
- path: '{parsed_url.path if parsed_url.path else "/"}'
|
||||
proxy-to: webapp:3000
|
||||
proxy-to: webapp:80
|
||||
'''
|
||||
spec_file_path = Path(spec_file_name)
|
||||
with open(spec_file_path) as rfile:
|
||||
|
@ -19,6 +19,8 @@ import shlex
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import click
|
||||
|
||||
@ -27,7 +29,7 @@ from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
||||
build_container_image, push_container_image,
|
||||
file_hash, deploy_to_k8s, publish_deployment,
|
||||
hostname_for_deployment_request, generate_hostname_for_app,
|
||||
match_owner)
|
||||
match_owner, skip_by_tag)
|
||||
|
||||
|
||||
def process_app_deployment_request(
|
||||
@ -39,8 +41,19 @@ def process_app_deployment_request(
|
||||
dns_suffix,
|
||||
deployment_parent_dir,
|
||||
kube_config,
|
||||
image_registry
|
||||
image_registry,
|
||||
log_parent_dir
|
||||
):
|
||||
run_id = f"{app_deployment_request.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
||||
log_file = None
|
||||
if log_parent_dir:
|
||||
log_dir = os.path.join(log_parent_dir, app_deployment_request.id)
|
||||
if not os.path.exists(log_dir):
|
||||
os.mkdir(log_dir)
|
||||
log_file_path = os.path.join(log_dir, f"{run_id}.log")
|
||||
print(f"Directing build logs to: {log_file_path}")
|
||||
log_file = open(log_file_path, "wt")
|
||||
|
||||
# 1. look up application
|
||||
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
||||
|
||||
@ -59,8 +72,8 @@ def process_app_deployment_request(
|
||||
dns_record = laconic.get_record(dns_crn)
|
||||
if dns_record:
|
||||
matched_owner = match_owner(app_deployment_request, dns_record)
|
||||
if not matched_owner and dns_record.request:
|
||||
matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.request, require=True))
|
||||
if not matched_owner and dns_record.attributes.request:
|
||||
matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.attributes.request, require=True))
|
||||
|
||||
if matched_owner:
|
||||
print("Matched DnsRecord ownership:", matched_owner)
|
||||
@ -102,8 +115,10 @@ def process_app_deployment_request(
|
||||
needs_k8s_deploy = False
|
||||
# 6. build container (if needed)
|
||||
if not deployment_record or deployment_record.attributes.application != app.id:
|
||||
build_container_image(app, deployment_container_tag)
|
||||
push_container_image(deployment_dir)
|
||||
# TODO: pull from request
|
||||
extra_build_args = []
|
||||
build_container_image(app, deployment_container_tag, extra_build_args, log_file)
|
||||
push_container_image(deployment_dir, log_file)
|
||||
needs_k8s_deploy = True
|
||||
|
||||
# 7. update config (if needed)
|
||||
@ -116,6 +131,7 @@ def process_app_deployment_request(
|
||||
deploy_to_k8s(
|
||||
deployment_record,
|
||||
deployment_dir,
|
||||
log_file
|
||||
)
|
||||
|
||||
publish_deployment(
|
||||
@ -136,13 +152,17 @@ def load_known_requests(filename):
|
||||
return {}
|
||||
|
||||
|
||||
def dump_known_requests(filename, requests):
|
||||
def dump_known_requests(filename, requests, status="SEEN"):
|
||||
if not filename:
|
||||
return
|
||||
known_requests = load_known_requests(filename)
|
||||
for r in requests:
|
||||
known_requests[r.id] = r.createTime
|
||||
json.dump(known_requests, open(filename, "w"))
|
||||
known_requests[r.id] = {
|
||||
"createTime": r.createTime,
|
||||
"status": status
|
||||
}
|
||||
with open(filename, "w") as f:
|
||||
json.dump(known_requests, f)
|
||||
|
||||
|
||||
@click.command()
|
||||
@ -158,10 +178,14 @@ def dump_known_requests(filename, requests):
|
||||
@click.option("--record-namespace-dns", help="eg, crn://laconic/dns")
|
||||
@click.option("--record-namespace-deployments", help="eg, crn://laconic/deployments")
|
||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
||||
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
||||
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
||||
@click.option("--log-dir", help="Output build/deployment logs to directory.", default=None)
|
||||
@click.pass_context
|
||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir,
|
||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
||||
request_id, discover, state_file, only_update_state,
|
||||
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run):
|
||||
dns_suffix, record_namespace_dns, record_namespace_deployments, dry_run,
|
||||
include_tags, exclude_tags, log_dir):
|
||||
if request_id and discover:
|
||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
@ -179,6 +203,10 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Split CSV and clean up values.
|
||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||
|
||||
laconic = LaconicRegistryClient(laconic_config)
|
||||
|
||||
# Find deployment requests.
|
||||
@ -200,7 +228,9 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
requests.sort(key=lambda r: r.createTime)
|
||||
requests.reverse()
|
||||
requests_by_name = {}
|
||||
skipped_by_name = {}
|
||||
for r in requests:
|
||||
# TODO: Do this _after_ filtering deployments and cancellations to minimize round trips.
|
||||
app = laconic.get_record(r.attributes.application)
|
||||
if not app:
|
||||
print("Skipping request %s, cannot locate app." % r.id)
|
||||
@ -211,17 +241,20 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
requested_name = generate_hostname_for_app(app)
|
||||
print("Generating name %s for request %s." % (requested_name, r.id))
|
||||
|
||||
if requested_name not in requests_by_name:
|
||||
print(
|
||||
"Found request %s to run application %s on %s."
|
||||
% (r.id, r.attributes.application, requested_name)
|
||||
)
|
||||
requests_by_name[requested_name] = r
|
||||
else:
|
||||
print(
|
||||
"Ignoring request %s, it is superseded by %s."
|
||||
% (r.id, requests_by_name[requested_name].id)
|
||||
)
|
||||
if requested_name in skipped_by_name or requested_name in requests_by_name:
|
||||
print("Ignoring request %s, it has been superseded." % r.id)
|
||||
continue
|
||||
|
||||
if skip_by_tag(r, include_tags, exclude_tags):
|
||||
print("Skipping request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id,
|
||||
include_tags,
|
||||
exclude_tags,
|
||||
r.attributes.tags))
|
||||
skipped_by_name[requested_name] = r
|
||||
continue
|
||||
|
||||
print("Found request %s to run application %s on %s." % (r.id, r.attributes.application, requested_name))
|
||||
requests_by_name[requested_name] = r
|
||||
|
||||
# Find deployments.
|
||||
deployments = laconic.app_deployments()
|
||||
@ -256,6 +289,8 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
|
||||
if not dry_run:
|
||||
for r in requests_to_execute:
|
||||
dump_known_requests(state_file, [r], "DEPLOYING")
|
||||
status = "ERROR"
|
||||
try:
|
||||
process_app_deployment_request(
|
||||
ctx,
|
||||
@ -266,7 +301,9 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
||||
dns_suffix,
|
||||
os.path.abspath(deployment_parent_dir),
|
||||
kube_config,
|
||||
image_registry
|
||||
image_registry,
|
||||
log_dir
|
||||
)
|
||||
status = "DEPLOYED"
|
||||
finally:
|
||||
dump_known_requests(state_file, [r])
|
||||
dump_known_requests(state_file, [r], status)
|
||||
|
@ -27,7 +27,7 @@ from dotenv import dotenv_values
|
||||
from stack_orchestrator import constants
|
||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||
|
||||
WEBAPP_PORT = 3000
|
||||
WEBAPP_PORT = 80
|
||||
|
||||
|
||||
@click.command()
|
||||
|
@ -20,7 +20,7 @@ import sys
|
||||
|
||||
import click
|
||||
|
||||
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient, match_owner
|
||||
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient, match_owner, skip_by_tag
|
||||
|
||||
|
||||
def process_app_removal_request(ctx,
|
||||
@ -40,8 +40,8 @@ def process_app_removal_request(ctx,
|
||||
matched_owner = match_owner(app_removal_request, deployment_record, dns_record)
|
||||
|
||||
# Or of the original deployment request.
|
||||
if not matched_owner and deployment_record.request:
|
||||
matched_owner = match_owner(app_removal_request, laconic.get_record(deployment_record.request, require=True))
|
||||
if not matched_owner and deployment_record.attributes.request:
|
||||
matched_owner = match_owner(app_removal_request, laconic.get_record(deployment_record.attributes.request, require=True))
|
||||
|
||||
if matched_owner:
|
||||
print("Matched deployment ownership:", matched_owner)
|
||||
@ -107,10 +107,12 @@ def dump_known_requests(filename, requests):
|
||||
@click.option("--delete-names/--preserve-names", help="Delete all names associated with removed deployments.", default=True)
|
||||
@click.option("--delete-volumes/--preserve-volumes", default=True, help="delete data volumes")
|
||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
||||
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
||||
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
||||
@click.pass_context
|
||||
def command(ctx, laconic_config, deployment_parent_dir,
|
||||
request_id, discover, state_file, only_update_state,
|
||||
delete_names, delete_volumes, dry_run):
|
||||
delete_names, delete_volumes, dry_run, include_tags, exclude_tags):
|
||||
if request_id and discover:
|
||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
@ -123,6 +125,10 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
||||
print("--only-update-state requires --state-file", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Split CSV and clean up values.
|
||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||
|
||||
laconic = LaconicRegistryClient(laconic_config)
|
||||
|
||||
# Find deployment removal requests.
|
||||
@ -141,6 +147,7 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
||||
|
||||
previous_requests = load_known_requests(state_file)
|
||||
requests.sort(key=lambda r: r.createTime)
|
||||
requests.reverse()
|
||||
|
||||
# Find deployments.
|
||||
deployments = {}
|
||||
@ -155,10 +162,22 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
||||
# TODO: should we handle CRNs?
|
||||
removals_by_deployment[r.attributes.deployment] = r
|
||||
|
||||
requests_to_execute = []
|
||||
one_per_deployment = {}
|
||||
for r in requests:
|
||||
if not r.attributes.deployment:
|
||||
print(f"Skipping removal request {r.id} since it was a cancellation.")
|
||||
elif r.attributes.deployment in one_per_deployment:
|
||||
print(f"Skipping removal request {r.id} since it was superseded.")
|
||||
else:
|
||||
one_per_deployment[r.attributes.deployment] = r
|
||||
|
||||
requests_to_execute = []
|
||||
for r in one_per_deployment.values():
|
||||
if skip_by_tag(r, include_tags, exclude_tags):
|
||||
print("Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id,
|
||||
include_tags,
|
||||
exclude_tags,
|
||||
r.attributes.tags))
|
||||
elif r.id in removals_by_request:
|
||||
print(f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}")
|
||||
elif r.attributes.deployment in removals_by_deployment:
|
||||
|
@ -195,7 +195,24 @@ def file_hash(filename):
|
||||
return hashlib.sha1(open(filename).read().encode()).hexdigest()
|
||||
|
||||
|
||||
def build_container_image(app_record, tag, extra_build_args=[]):
|
||||
def determine_base_container(clone_dir, app_type="webapp"):
|
||||
if not app_type or not app_type.startswith("webapp"):
|
||||
raise Exception(f"Unsupported app_type {app_type}")
|
||||
|
||||
base_container = "cerc/webapp-base"
|
||||
if app_type == "webapp/next":
|
||||
base_container = "cerc/nextjs-base"
|
||||
elif app_type == "webapp":
|
||||
pkg_json_path = os.path.join(clone_dir, "package.json")
|
||||
if os.path.exists(pkg_json_path):
|
||||
pkg_json = json.load(open(pkg_json_path))
|
||||
if "next" in pkg_json.get("dependencies", {}):
|
||||
base_container = "cerc/nextjs-base"
|
||||
|
||||
return base_container
|
||||
|
||||
|
||||
def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
@ -210,37 +227,46 @@ def build_container_image(app_record, tag, extra_build_args=[]):
|
||||
git_env = dict(os.environ.copy())
|
||||
# Never prompt
|
||||
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
||||
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env)
|
||||
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env)
|
||||
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=log_file, stderr=log_file)
|
||||
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=log_file, stderr=log_file)
|
||||
else:
|
||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir])
|
||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
|
||||
base_container = determine_base_container(clone_dir, app_record.attributes.app_type)
|
||||
|
||||
print("Building webapp ...")
|
||||
build_command = [sys.argv[0], "build-webapp", "--source-repo", clone_dir, "--tag", tag]
|
||||
build_command = [
|
||||
sys.argv[0], "build-webapp",
|
||||
"--source-repo", clone_dir,
|
||||
"--tag", tag,
|
||||
"--base-container", base_container
|
||||
]
|
||||
if extra_build_args:
|
||||
build_command.append("--extra-build-args")
|
||||
build_command.append(" ".join(extra_build_args))
|
||||
|
||||
result = subprocess.run(build_command)
|
||||
result = subprocess.run(build_command, stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
finally:
|
||||
cmd("rm", "-rf", tmpdir)
|
||||
|
||||
|
||||
def push_container_image(deployment_dir):
|
||||
def push_container_image(deployment_dir, log_file=None):
|
||||
print("Pushing image ...")
|
||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"])
|
||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"],
|
||||
stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
|
||||
|
||||
def deploy_to_k8s(deploy_record, deployment_dir):
|
||||
def deploy_to_k8s(deploy_record, deployment_dir, log_file=None):
|
||||
if not deploy_record:
|
||||
command = "up"
|
||||
else:
|
||||
command = "update"
|
||||
|
||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command])
|
||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
||||
stdout=log_file, stderr=log_file)
|
||||
result.check_returncode()
|
||||
|
||||
|
||||
@ -325,3 +351,17 @@ def generate_hostname_for_app(app):
|
||||
else:
|
||||
m.update(app.attributes.repository.encode())
|
||||
return "%s-%s" % (last_part, m.hexdigest()[0:10])
|
||||
|
||||
|
||||
def skip_by_tag(r, include_tags, exclude_tags):
|
||||
for tag in exclude_tags:
|
||||
if r.attributes.tags and tag in r.attributes.tags:
|
||||
return True
|
||||
|
||||
if include_tags:
|
||||
for tag in include_tags:
|
||||
if r.attributes.tags and tag in r.attributes.tags:
|
||||
return False
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -26,7 +26,7 @@ import importlib.resources
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from stack_orchestrator.constants import stack_file_name
|
||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit
|
||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit
|
||||
|
||||
|
||||
class GitProgress(git.RemoteProgress):
|
||||
@ -249,8 +249,8 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
|
||||
error_exit(f"stack {stack} does not exist")
|
||||
with stack_file_path:
|
||||
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
||||
if "repos" not in stack_config:
|
||||
error_exit(f"stack {stack} does not define any repositories")
|
||||
if "repos" not in stack_config or stack_config["repos"] is None:
|
||||
warn_exit(f"stack {stack} does not define any repositories")
|
||||
else:
|
||||
repos_in_scope = stack_config["repos"]
|
||||
else:
|
||||
|
@ -19,7 +19,7 @@ import sys
|
||||
import ruamel.yaml
|
||||
from pathlib import Path
|
||||
from dotenv import dotenv_values
|
||||
from typing import Mapping
|
||||
from typing import Mapping, Set, List
|
||||
|
||||
|
||||
def include_exclude_check(s, include, exclude):
|
||||
@ -81,17 +81,17 @@ def get_pod_list(parsed_stack):
|
||||
return result
|
||||
|
||||
|
||||
def get_plugin_code_paths(stack):
|
||||
def get_plugin_code_paths(stack) -> List[Path]:
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = parsed_stack["pods"]
|
||||
result = []
|
||||
result: Set[Path] = set()
|
||||
for pod in pods:
|
||||
if type(pod) is str:
|
||||
result.append(get_stack_file_path(stack).parent)
|
||||
result.add(get_stack_file_path(stack).parent)
|
||||
else:
|
||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
||||
result.append(Path(os.path.join(pod_root_dir, "stack")))
|
||||
return result
|
||||
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
||||
return list(result)
|
||||
|
||||
|
||||
def get_pod_file_path(parsed_stack, pod_name: str):
|
||||
@ -139,6 +139,19 @@ def get_compose_file_dir():
|
||||
return source_compose_dir
|
||||
|
||||
|
||||
def get_config_file_dir():
|
||||
# TODO: refactor to use common code with deploy command
|
||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||
source_config_dir = data_dir.joinpath("config")
|
||||
return source_config_dir
|
||||
|
||||
|
||||
def get_k8s_dir():
|
||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||
source_config_dir = data_dir.joinpath("k8s")
|
||||
return source_config_dir
|
||||
|
||||
|
||||
def get_parsed_deployment_spec(spec_file):
|
||||
spec_file_path = Path(spec_file)
|
||||
try:
|
||||
@ -182,5 +195,10 @@ def error_exit(s):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def warn_exit(s):
|
||||
print(f"WARN: {s}")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def env_var_map_from_file(file: Path) -> Mapping[str, str]:
|
||||
return dotenv_values(file)
|
||||
|
146
tests/container-registry/run-test.sh
Executable file
146
tests/container-registry/run-test.sh
Executable file
@ -0,0 +1,146 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
# Dump environment variables for debugging
|
||||
echo "Environment variables:"
|
||||
env
|
||||
fi
|
||||
|
||||
stack="container-registry"
|
||||
|
||||
# Helper functions: TODO move into a separate file
|
||||
wait_for_pods_started () {
|
||||
for i in {1..50}
|
||||
do
|
||||
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||
|
||||
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for pods to start: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
wait_for_log_output () {
|
||||
for i in {1..50}
|
||||
do
|
||||
|
||||
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
|
||||
if [[ ! -z "$log_output" ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for pods log content: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
|
||||
delete_cluster_exit () {
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Note: eventually this test should be folded into ../deploy/
|
||||
# but keeping it separate for now for convenience
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
# Set a non-default repo dir
|
||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||
echo "Testing this package: $TEST_TARGET_SO"
|
||||
echo "Test version command"
|
||||
reported_version_string=$( $TEST_TARGET_SO version )
|
||||
echo "Version reported is: ${reported_version_string}"
|
||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||
rm -rf $CERC_REPO_BASE_DIR
|
||||
mkdir -p $CERC_REPO_BASE_DIR
|
||||
$TEST_TARGET_SO --stack ${stack} setup-repositories
|
||||
$TEST_TARGET_SO --stack ${stack} build-containers
|
||||
# Test basic stack-orchestrator deploy to k8s
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/${stack}-deployment-dir
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/${stack}-deployment-spec.yml
|
||||
$TEST_TARGET_SO --stack ${stack} deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
echo "deploy init test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
|
||||
# Switch to a full path for bind mount.
|
||||
volume_name="registry-data"
|
||||
sed -i "s|^\(\s*${volume_name}:$\)$|\1 ${test_deployment_dir}/data/${volume_name}|" $test_deployment_spec
|
||||
|
||||
# Add ingress config to the spec file
|
||||
ed $test_deployment_spec <<IngressSpec
|
||||
/network:/
|
||||
a
|
||||
http-proxy:
|
||||
- host-name: localhost
|
||||
routes:
|
||||
- path: /
|
||||
proxy-to: registry:5000
|
||||
.
|
||||
w
|
||||
q
|
||||
IngressSpec
|
||||
|
||||
$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
echo "deploy create test: deployment directory not present"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy create test: passed"
|
||||
|
||||
# Note: this isn't strictly necessary, except we end up trying to push the image into
|
||||
# the kind cluster then fails because it can't be found locally
|
||||
docker pull registry:2.8
|
||||
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
# Check logs command works
|
||||
wait_for_log_output
|
||||
sleep 1
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"listening on"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
echo $log_output_3
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check that we can use the registry
|
||||
# Note: since this pulls from the DockerCo registry without auth it's possible it'll run into rate limiting issues
|
||||
docker pull hello-world
|
||||
docker tag hello-world localhost:80/hello-world
|
||||
docker push localhost:80/hello-world
|
||||
# Then do a quick check that we actually pushed something there
|
||||
# See: https://stackoverflow.com/questions/31251356/how-to-get-a-list-of-images-on-docker-registry-v2
|
||||
registry_response=$(curl -s -X GET http://localhost:80/v2/_catalog)
|
||||
if [[ "$registry_response" == *"{\"repositories\":[\"hello-world\"]}"* ]]; then
|
||||
echo "registry content test: passed"
|
||||
else
|
||||
echo "registry content test: FAILED"
|
||||
echo $registry_response
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
131
tests/database/run-test.sh
Executable file
131
tests/database/run-test.sh
Executable file
@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
# Dump environment variables for debugging
|
||||
echo "Environment variables:"
|
||||
env
|
||||
fi
|
||||
|
||||
if [ "$1" == "from-path" ]; then
|
||||
TEST_TARGET_SO="laconic-so"
|
||||
else
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
fi
|
||||
|
||||
stack="test-database"
|
||||
spec_file=${stack}-spec.yml
|
||||
deployment_dir=${stack}-deployment
|
||||
|
||||
# Helper functions: TODO move into a separate file
|
||||
wait_for_pods_started () {
|
||||
for i in {1..50}
|
||||
do
|
||||
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||
|
||||
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for pods to start: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
wait_for_test_complete () {
|
||||
for i in {1..50}
|
||||
do
|
||||
|
||||
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
|
||||
if [[ "${log_output}" == *"Database test client: test complete"* ]]; then
|
||||
# if ready, return
|
||||
return
|
||||
else
|
||||
# if not ready, wait
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
# Timed out, error exit
|
||||
echo "waiting for test complete: FAILED"
|
||||
delete_cluster_exit
|
||||
}
|
||||
|
||||
|
||||
delete_cluster_exit () {
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Set a non-default repo dir
|
||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||
echo "Testing this package: $TEST_TARGET_SO"
|
||||
echo "Test version command"
|
||||
reported_version_string=$( $TEST_TARGET_SO version )
|
||||
echo "Version reported is: ${reported_version_string}"
|
||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||
rm -rf $CERC_REPO_BASE_DIR
|
||||
mkdir -p $CERC_REPO_BASE_DIR
|
||||
$TEST_TARGET_SO --stack ${stack} setup-repositories
|
||||
$TEST_TARGET_SO --stack ${stack} build-containers
|
||||
# Test basic stack-orchestrator deploy to k8s
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/${deployment_dir}
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/${spec_file}
|
||||
|
||||
$TEST_TARGET_SO --stack ${stack} deploy --deploy-to k8s-kind init --output $test_deployment_spec
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
echo "deploy init test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
|
||||
# Switch to a full path for the data dir so it gets provisioned as a host bind mounted volume and preserved beyond cluster lifetime
|
||||
sed -i "s|^\(\s*db-data:$\)$|\1 ${test_deployment_dir}/data/db-data|" $test_deployment_spec
|
||||
|
||||
$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
echo "deploy create test: deployment directory not present"
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy create test: passed"
|
||||
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
# Check logs command works
|
||||
wait_for_test_complete
|
||||
log_output_1=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_1" == *"Database test client: test data does not exist"* ]]; then
|
||||
echo "Create database content test: passed"
|
||||
else
|
||||
echo "Create database content test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop then start again and check the volume was preserved
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||
# Sleep a bit just in case
|
||||
sleep 20
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
wait_for_test_complete
|
||||
|
||||
log_output_2=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_2" == *"Database test client: test data already exists"* ]]; then
|
||||
echo "Retain database content test: passed"
|
||||
else
|
||||
echo "Retain database content test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
@ -6,6 +6,12 @@ fi
|
||||
# Dump environment variables for debugging
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
delete_cluster_exit () {
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Test basic stack-orchestrator deploy
|
||||
echo "Running stack-orchestrator deploy test"
|
||||
# Bit of a hack, test the most recent package
|
||||
@ -57,7 +63,7 @@ $TEST_TARGET_SO --stack test deploy down
|
||||
# The next time we bring the container up the volume will be old (from the previous run above)
|
||||
$TEST_TARGET_SO --stack test deploy up
|
||||
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
|
||||
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
|
||||
if [[ "$log_output_1" == *"filesystem is old"* ]]; then
|
||||
echo "Retain volumes test: passed"
|
||||
else
|
||||
echo "Retain volumes test: FAILED"
|
||||
@ -67,7 +73,7 @@ $TEST_TARGET_SO --stack test deploy down --delete-volumes
|
||||
# Now when we bring the container up the volume will be new again
|
||||
$TEST_TARGET_SO --stack test deploy up
|
||||
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
|
||||
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
|
||||
if [[ "$log_output_2" == *"filesystem is fresh"* ]]; then
|
||||
echo "Delete volumes test: passed"
|
||||
else
|
||||
echo "Delete volumes test: FAILED"
|
||||
@ -106,12 +112,16 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add a config file to be picked up by the ConfigMap before starting.
|
||||
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/data/test-config/test_config
|
||||
|
||||
echo "deploy create output file test: passed"
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
# Check logs command works
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then
|
||||
if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
@ -124,6 +134,37 @@ else
|
||||
echo "deployment config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
|
||||
if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
|
||||
echo "deployment compose config test: passed"
|
||||
else
|
||||
echo "deployment compose config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that the ConfigMap is mounted and contains the expected content.
|
||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
||||
echo "deployment ConfigMap test: passed"
|
||||
else
|
||||
echo "deployment ConfigMap test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop then start again and check the volume was preserved
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||
# Sleep a bit just in case
|
||||
# sleep for longer to check if that's why the subsequent create cluster fails
|
||||
sleep 20
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_5" == *"filesystem is old"* ]]; then
|
||||
echo "Retain volumes test: passed"
|
||||
else
|
||||
echo "Retain volumes test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
||||
|
@ -9,7 +9,7 @@ fi
|
||||
|
||||
# Helper functions: TODO move into a separate file
|
||||
wait_for_pods_started () {
|
||||
for i in {1..5}
|
||||
for i in {1..50}
|
||||
do
|
||||
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||
|
||||
@ -27,7 +27,7 @@ wait_for_pods_started () {
|
||||
}
|
||||
|
||||
wait_for_log_output () {
|
||||
for i in {1..5}
|
||||
for i in {1..50}
|
||||
do
|
||||
|
||||
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
@ -76,6 +76,10 @@ if [ ! -f "$test_deployment_spec" ]; then
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
|
||||
# Switch to a full path for bind mount.
|
||||
sed -i "s|^\(\s*test-data-bind:$\)$|\1 ${test_deployment_dir}/data/test-data-bind|" $test_deployment_spec
|
||||
|
||||
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
@ -97,19 +101,26 @@ if [ ! "$create_file_content" == "create-command-output-data" ]; then
|
||||
echo "deploy create test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add a config file to be picked up by the ConfigMap before starting.
|
||||
echo "dbfc7a4d-44a7-416d-b5f3-29842cc47650" > $test_deployment_dir/configmaps/test-config/test_config
|
||||
|
||||
echo "deploy create output file test: passed"
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
# Check logs command works
|
||||
wait_for_log_output
|
||||
sleep 1
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then
|
||||
if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
echo $log_output_3
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check the config variable CERC_TEST_PARAM_1 was passed correctly
|
||||
if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
|
||||
echo "deployment config test: passed"
|
||||
@ -117,20 +128,72 @@ else
|
||||
echo "deployment config test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check the config variable CERC_TEST_PARAM_2 was passed correctly from the compose file
|
||||
if [[ "$log_output_3" == *"Test-param-2: CERC_TEST_PARAM_2_VALUE"* ]]; then
|
||||
echo "deployment compose config test: passed"
|
||||
else
|
||||
echo "deployment compose config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that the ConfigMap is mounted and contains the expected content.
|
||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_4" == *"/config/test_config:"* ]] && [[ "$log_output_4" == *"dbfc7a4d-44a7-416d-b5f3-29842cc47650"* ]]; then
|
||||
echo "deployment ConfigMap test: passed"
|
||||
else
|
||||
echo "deployment ConfigMap test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check that the bind-mount volume is mounted.
|
||||
log_output_5=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_5" == *"/data: MOUNTED"* ]]; then
|
||||
echo "deployment bind volumes test: passed"
|
||||
else
|
||||
echo "deployment bind volumes test: FAILED"
|
||||
echo $log_output_5
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Check that the provisioner managed volume is mounted.
|
||||
log_output_6=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_6" == *"/data2: MOUNTED"* ]]; then
|
||||
echo "deployment provisioner volumes test: passed"
|
||||
else
|
||||
echo "deployment provisioner volumes test: FAILED"
|
||||
echo $log_output_6
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop then start again and check the volume was preserved
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||
# Sleep a bit just in case
|
||||
sleep 2
|
||||
# sleep for longer to check if that's why the subsequent create cluster fails
|
||||
sleep 20
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
wait_for_pods_started
|
||||
wait_for_log_output
|
||||
log_output_4=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_4" == *"Filesystem is old"* ]]; then
|
||||
echo "Retain volumes test: passed"
|
||||
sleep 1
|
||||
|
||||
log_output_10=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_10" == *"/data filesystem is old"* ]]; then
|
||||
echo "Retain bind volumes test: passed"
|
||||
else
|
||||
echo "Retain volumes test: FAILED"
|
||||
echo "Retain bind volumes test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# These volumes will be completely destroyed by the kind delete/create, because they lived inside
|
||||
# the kind container. So, unlike the bind-mount case, they will appear fresh after the restart.
|
||||
log_output_11=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_11" == *"/data2 filesystem is fresh"* ]]; then
|
||||
echo "Fresh provisioner volumes test: passed"
|
||||
else
|
||||
echo "Fresh provisioner volumes test: FAILED"
|
||||
delete_cluster_exit
|
||||
fi
|
||||
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
||||
|
@ -30,14 +30,14 @@ CHECK="SPECIAL_01234567890_TEST_STRING"
|
||||
|
||||
set +e
|
||||
|
||||
CONTAINER_ID=$(docker run -p 3000:3000 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local)
|
||||
CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local)
|
||||
sleep 3
|
||||
wget -t 7 -O test.before -m http://localhost:3000
|
||||
|
||||
docker logs $CONTAINER_ID
|
||||
docker remove -f $CONTAINER_ID
|
||||
|
||||
CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local)
|
||||
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local)
|
||||
sleep 3
|
||||
wget -t 7 -O test.after -m http://localhost:3000
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user