forked from cerc-io/stack-orchestrator
parent
b59beb66eb
commit
d9bb6b3588
52
.gitea/workflows/test-database-yml
Normal file
52
.gitea/workflows/test-database-yml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
name: Database Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/test-database'
|
||||||
|
- '.gitea/workflows/test-database.yml'
|
||||||
|
- 'tests/database/run-test.sh'
|
||||||
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
|
- cron: '5 18 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run database hosting test on kind/k8s"
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Check cgroups version"
|
||||||
|
run: mount | grep cgroup
|
||||||
|
- name: "Install kind"
|
||||||
|
run: ./tests/scripts/install-kind.sh
|
||||||
|
- name: "Install Kubectl"
|
||||||
|
run: ./tests/scripts/install-kubectl.sh
|
||||||
|
- name: "Run database deployment test"
|
||||||
|
run: |
|
||||||
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
|
join_cgroup
|
||||||
|
./tests/database/run-test.sh
|
||||||
|
|
1
.gitea/workflows/triggers/test-database
Normal file
1
.gitea/workflows/triggers/test-database
Normal file
@ -0,0 +1 @@
|
|||||||
|
Change this file to trigger running the test-database CI job
|
@ -0,0 +1,20 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
database:
|
||||||
|
image: cerc/test-database-container:local
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- db-data:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "test-user"
|
||||||
|
POSTGRES_DB: "test-db"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||||
|
ports:
|
||||||
|
- "5432"
|
||||||
|
|
||||||
|
test-client:
|
||||||
|
image: cerc/test-database-client:local
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
db-data:
|
@ -0,0 +1,12 @@
|
|||||||
|
FROM ubuntu:latest
|
||||||
|
|
||||||
|
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \
|
||||||
|
apt-get install -y software-properties-common && \
|
||||||
|
apt-get install -y postgresql-client && \
|
||||||
|
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
COPY run.sh /app/run.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/run.sh"]
|
@ -0,0 +1,5 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Build cerc/test-container
|
||||||
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
71
stack_orchestrator/data/container-build/cerc-test-database-client/run.sh
Executable file
71
stack_orchestrator/data/container-build/cerc-test-database-client/run.sh
Executable file
@ -0,0 +1,71 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TODO derive this from config
|
||||||
|
database_url="postgresql://test-user:password@localhost:5432/test-db"
|
||||||
|
psql_command="psql ${database_url}"
|
||||||
|
program_name="Database test client:"
|
||||||
|
|
||||||
|
wait_for_database_up () {
|
||||||
|
for i in {1..50}
|
||||||
|
do
|
||||||
|
${psql_command} -c "select 1;"
|
||||||
|
psql_succeeded=$?
|
||||||
|
if [[ ${psql_succeeded} == 0 ]]; then
|
||||||
|
# if ready, return
|
||||||
|
echo "${program_name} database up"
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# if not ready, wait
|
||||||
|
echo "${program_name} waiting for database: ${i}"
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Timed out, error exit
|
||||||
|
echo "${program_name} waiting for database: FAILED"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Used to synchronize with the test runner
|
||||||
|
notify_test_complete () {
|
||||||
|
echo "${program_name} test complete"
|
||||||
|
}
|
||||||
|
|
||||||
|
does_test_data_exist () {
|
||||||
|
query_result=$(${psql_command} -t -c "select count(*) from test_table_1 where key_column = 'test_key_1';" | head -1 | tr -d ' ')
|
||||||
|
if [[ "${query_result}" == "1" ]]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
create_test_data () {
|
||||||
|
${psql_command} -c "create table test_table_1 (key_column text, value_column text, primary key(key_column));"
|
||||||
|
${psql_command} -c "insert into test_table_1 values ('test_key_1', 'test_value_1');"
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_forever() {
|
||||||
|
# Loop to keep docker/k8s happy since this is the container entrypoint
|
||||||
|
while :; do sleep 600; done
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_database_up
|
||||||
|
|
||||||
|
# Check if the test database content exists already
|
||||||
|
if does_test_data_exist; then
|
||||||
|
# If so, log saying so. Test harness will look for this log output
|
||||||
|
echo "${program_name} test data already exists"
|
||||||
|
else
|
||||||
|
# Otherwise log saying the content was not present
|
||||||
|
echo "${program_name} test data does not exist"
|
||||||
|
echo "${program_name} creating test data"
|
||||||
|
# then create it
|
||||||
|
create_test_data
|
||||||
|
fi
|
||||||
|
|
||||||
|
notify_test_complete
|
||||||
|
wait_forever
|
@ -0,0 +1,3 @@
|
|||||||
|
FROM postgres:16-bullseye
|
||||||
|
|
||||||
|
EXPOSE 5432
|
@ -0,0 +1,5 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Build cerc/test-container
|
||||||
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
docker build -t cerc/test-database-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
3
stack_orchestrator/data/stacks/test-database/README.md
Normal file
3
stack_orchestrator/data/stacks/test-database/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Test Database Stack
|
||||||
|
|
||||||
|
A stack with a database for test/demo purposes.
|
9
stack_orchestrator/data/stacks/test-database/stack.yml
Normal file
9
stack_orchestrator/data/stacks/test-database/stack.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
version: "1.0"
|
||||||
|
name: test
|
||||||
|
description: "A test database stack"
|
||||||
|
repos:
|
||||||
|
containers:
|
||||||
|
- cerc/test-database-container
|
||||||
|
- cerc/test-database-client
|
||||||
|
pods:
|
||||||
|
- test-database
|
@ -20,7 +20,8 @@ from kubernetes import client, config
|
|||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||||
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
|
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
|
||||||
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config
|
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, containers_in_pod, log_stream_from_string
|
||||||
|
from stack_orchestrator.deploy.k8s.helpers import generate_kind_config
|
||||||
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
@ -382,9 +383,15 @@ class K8sDeployer(Deployer):
|
|||||||
log_data = "******* Pods not running ********\n"
|
log_data = "******* Pods not running ********\n"
|
||||||
else:
|
else:
|
||||||
k8s_pod_name = pods[0]
|
k8s_pod_name = pods[0]
|
||||||
|
containers = containers_in_pod(self.core_api, k8s_pod_name)
|
||||||
# If the pod is not yet started, the logs request below will throw an exception
|
# If the pod is not yet started, the logs request below will throw an exception
|
||||||
try:
|
try:
|
||||||
log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test")
|
log_data = ""
|
||||||
|
for container in containers:
|
||||||
|
container_log = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container=container)
|
||||||
|
container_log_lines = container_log.splitlines()
|
||||||
|
for line in container_log_lines:
|
||||||
|
log_data += f"{container}: {line}\n"
|
||||||
except client.exceptions.ApiException as e:
|
except client.exceptions.ApiException as e:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error from read_namespaced_pod_log: {e}")
|
print(f"Error from read_namespaced_pod_log: {e}")
|
||||||
|
@ -62,6 +62,17 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
|||||||
return pods
|
return pods
|
||||||
|
|
||||||
|
|
||||||
|
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str):
|
||||||
|
containers = []
|
||||||
|
pod_response = core_api.read_namespaced_pod(pod_name, namespace="default")
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"pod_response: {pod_response}")
|
||||||
|
pod_containers = pod_response.spec.containers
|
||||||
|
for pod_container in pod_containers:
|
||||||
|
containers.append(pod_container.name)
|
||||||
|
return containers
|
||||||
|
|
||||||
|
|
||||||
def log_stream_from_string(s: str):
|
def log_stream_from_string(s: str):
|
||||||
# Note response has to be UTF-8 encoded because the caller expects to decode it
|
# Note response has to be UTF-8 encoded because the caller expects to decode it
|
||||||
yield ("ignore", s.encode())
|
yield ("ignore", s.encode())
|
||||||
|
@ -26,7 +26,7 @@ import importlib.resources
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import yaml
|
import yaml
|
||||||
from stack_orchestrator.constants import stack_file_name
|
from stack_orchestrator.constants import stack_file_name
|
||||||
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit
|
from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit, warn_exit
|
||||||
|
|
||||||
|
|
||||||
class GitProgress(git.RemoteProgress):
|
class GitProgress(git.RemoteProgress):
|
||||||
@ -249,8 +249,8 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches
|
|||||||
error_exit(f"stack {stack} does not exist")
|
error_exit(f"stack {stack} does not exist")
|
||||||
with stack_file_path:
|
with stack_file_path:
|
||||||
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
stack_config = yaml.safe_load(open(stack_file_path, "r"))
|
||||||
if "repos" not in stack_config:
|
if "repos" not in stack_config or stack_config["repos"] is None:
|
||||||
error_exit(f"stack {stack} does not define any repositories")
|
warn_exit(f"stack {stack} does not define any repositories")
|
||||||
else:
|
else:
|
||||||
repos_in_scope = stack_config["repos"]
|
repos_in_scope = stack_config["repos"]
|
||||||
else:
|
else:
|
||||||
|
@ -189,5 +189,10 @@ def error_exit(s):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def warn_exit(s):
|
||||||
|
print(f"WARN: {s}")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def env_var_map_from_file(file: Path) -> Mapping[str, str]:
|
def env_var_map_from_file(file: Path) -> Mapping[str, str]:
|
||||||
return dotenv_values(file)
|
return dotenv_values(file)
|
||||||
|
128
tests/database/run-test.sh
Executable file
128
tests/database/run-test.sh
Executable file
@ -0,0 +1,128 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
# Dump environment variables for debugging
|
||||||
|
echo "Environment variables:"
|
||||||
|
env
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$1" == "from-path" ]; then
|
||||||
|
TEST_TARGET_SO="laconic-so"
|
||||||
|
else
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
fi
|
||||||
|
|
||||||
|
stack="test-database"
|
||||||
|
spec_file=${stack}-spec.yml
|
||||||
|
deployment_dir=${stack}-deployment
|
||||||
|
|
||||||
|
# Helper functions: TODO move into a separate file
|
||||||
|
wait_for_pods_started () {
|
||||||
|
for i in {1..50}
|
||||||
|
do
|
||||||
|
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||||
|
|
||||||
|
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
||||||
|
# if ready, return
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# if not ready, wait
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Timed out, error exit
|
||||||
|
echo "waiting for pods to start: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_test_complete () {
|
||||||
|
for i in {1..50}
|
||||||
|
do
|
||||||
|
|
||||||
|
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
|
||||||
|
if [[ "${log_output}" == *"Database test client: test complete"* ]]; then
|
||||||
|
# if ready, return
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# if not ready, wait
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Timed out, error exit
|
||||||
|
echo "waiting for test complete: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
delete_cluster_exit () {
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set a non-default repo dir
|
||||||
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
echo "Test version command"
|
||||||
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
echo "Version reported is: ${reported_version_string}"
|
||||||
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
mkdir -p $CERC_REPO_BASE_DIR
|
||||||
|
$TEST_TARGET_SO --stack ${stack} setup-repositories
|
||||||
|
$TEST_TARGET_SO --stack ${stack} build-containers
|
||||||
|
# Test basic stack-orchestrator deploy to k8s
|
||||||
|
test_deployment_dir=$CERC_REPO_BASE_DIR/test-${deployment_dir}
|
||||||
|
test_deployment_spec=$CERC_REPO_BASE_DIR/test-${spec_file}
|
||||||
|
|
||||||
|
$TEST_TARGET_SO --stack ${stack} deploy --deploy-to k8s-kind init --output $test_deployment_spec
|
||||||
|
# Check the file now exists
|
||||||
|
if [ ! -f "$test_deployment_spec" ]; then
|
||||||
|
echo "deploy init test: spec file not present"
|
||||||
|
echo "deploy init test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy init test: passed"
|
||||||
|
|
||||||
|
$TEST_TARGET_SO --stack ${stack} deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||||
|
# Check the deployment dir exists
|
||||||
|
if [ ! -d "$test_deployment_dir" ]; then
|
||||||
|
echo "deploy create test: deployment directory not present"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy create test: passed"
|
||||||
|
|
||||||
|
# Try to start the deployment
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
|
wait_for_pods_started
|
||||||
|
# Check logs command works
|
||||||
|
wait_for_test_complete
|
||||||
|
log_output_1=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_1" == *"Database test client: test data does not exist"* ]]; then
|
||||||
|
echo "Create database content test: passed"
|
||||||
|
else
|
||||||
|
echo "Create database content test: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop then start again and check the volume was preserved
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop
|
||||||
|
# Sleep a bit just in case
|
||||||
|
sleep 20
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
|
wait_for_pods_started
|
||||||
|
wait_for_test_complete
|
||||||
|
|
||||||
|
log_output_2=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_2" == *"Database test client: test data already exists"* ]]; then
|
||||||
|
echo "Retain database content test: passed"
|
||||||
|
else
|
||||||
|
echo "Retain database content test: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop and clean up
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
echo "Test passed"
|
Loading…
Reference in New Issue
Block a user