From 66ee66b7f58ae427cbedc55f14201d37e7db1f8b Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 14 Aug 2024 06:46:09 -0600 Subject: [PATCH] Add beginning of test --- tests/k8s-deployment-control/run-test.sh | 197 +++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100755 tests/k8s-deployment-control/run-test.sh diff --git a/tests/k8s-deployment-control/run-test.sh b/tests/k8s-deployment-control/run-test.sh new file mode 100755 index 00000000..ac9bf004 --- /dev/null +++ b/tests/k8s-deployment-control/run-test.sh @@ -0,0 +1,197 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x + # Dump environment variables for debugging + echo "Environment variables:" + env +fi + +if [ "$1" == "from-path" ]; then + TEST_TARGET_SO="laconic-so" +else + TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +fi + +# Helper functions: TODO move into a separate file +wait_for_pods_started () { + for i in {1..50} + do + local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps ) + + if [[ "$ps_output" == *"Running containers:"* ]]; then + # if ready, return + return + else + # if not ready, wait + sleep 5 + fi + done + # Timed out, error exit + echo "waiting for pods to start: FAILED" + delete_cluster_exit +} + +wait_for_log_output () { + for i in {1..50} + do + + local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) + + if [[ ! -z "$log_output" ]]; then + # if ready, return + return + else + # if not ready, wait + sleep 5 + fi + done + # Timed out, error exit + echo "waiting for pods log content: FAILED" + delete_cluster_exit +} + +delete_cluster_exit () { + $TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes + exit 1 +} + +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +$TEST_TARGET_SO --stack test setup-repositories +$TEST_TARGET_SO --stack test build-containers +# Test basic stack-orchestrator deploy to k8s +test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir +test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml + +# Create a deployment that we can use to check our test cases +$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec +# Check the file now exists +if [ ! -f "$test_deployment_spec" ]; then + echo "deploy init test: spec file not present" + echo "deploy init test: FAILED" + exit 1 +fi +echo "deploy init test: passed" + +$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir +# Check the deployment dir exists +if [ ! -d "$test_deployment_dir" ]; then + echo "deploy create test: deployment directory not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create test: passed" +# Check the file writted by the create command in the stack now exists +if [ ! -f "$test_deployment_dir/create-file" ]; then + echo "deploy create test: create output file not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create output file test: passed" + +# At this point the deployment's kind-config.yml will look like this: +# kind: Cluster +# apiVersion: kind.x-k8s.io/v1alpha4 +# nodes: +# - role: control-plane +# kubeadmConfigPatches: +# - | +# kind: InitConfiguration +# nodeRegistration: +# kubeletExtraArgs: +# node-labels: "ingress-ready=true" +# extraPortMappings: +# - containerPort: 80 +# hostPort: 80 + +# We need to change it to this: +kind_config_file=${test_deployment_dir}/kind-config.yml +cat << EOF > ${kind_config_file} +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 +- role: worker + labels: + nodetype: a +- role: worker + labels: + nodetype: b +- role: worker + labels: + nodetype: c + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + taints: + - key: "nodeavoid" + value: "a" + effect: "NoSchedule" +EOF + +# At this point we should have 4 nodes, three labeled like this: +# $ kubectl get nodes --show-labels=true +# NAME STATUS ROLES AGE VERSION LABELS +# laconic-3af549a3ba0e3a3c-control-plane Ready control-plane 2m37s v1.30.0 ...,ingress-ready=true +# laconic-3af549a3ba0e3a3c-worker Ready 2m18s v1.30.0 ...,nodetype=a +# laconic-3af549a3ba0e3a3c-worker2 Ready 2m18s v1.30.0 ...,nodetype=b +# laconic-3af549a3ba0e3a3c-worker3 Ready 2m18s v1.30.0 ...,nodetype=c + +# And with taints like this: +# $ kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints --no-headers +# laconic-3af549a3ba0e3a3c-control-plane [map[effect:NoSchedule key:node-role.kubernetes.io/control-plane]] +# laconic-3af549a3ba0e3a3c-worker +# laconic-3af549a3ba0e3a3c-worker2 +# laconic-3af549a3ba0e3a3c-worker3 [map[effect:NoSchedule key:nodeavoid value:a]] + +# We can now modify the deployment spec file to require a set of affinity and/or taint combinations +# then bring up the deployment and check that the pod is scheduled to an expected node. + +# Add a requirement to schedule on a node labeled nodetype=c +deployment_spec_file=${test_deployment_dir}/spec.yml +cat << EOF >> ${deployment_spec_file} +node-affinities: + - label: nodetype + value: c +EOF + +# Try to start the deployment +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +wait_for_pods_started +# Check logs command works +wait_for_log_output +sleep 1 +log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_3" == *"filesystem is fresh"* ]]; then + echo "deployment logs test: passed" +else + echo "deployment logs test: FAILED" + echo $log_output_3 + delete_cluster_exit +fi + +# The deployment's pod should be scheduled onto node: worker3 + +exit 1 + +# Stop and clean up +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes +echo "Test passed"