For some reason, Kubernetes tests haven't been running correctly. This
(hopefully) fixes it on Semaphore..

Signed-off-by: Charlie Drage <charlie@charliedrage.com>
This commit is contained in:
Charlie Drage 2020-04-23 13:56:02 -04:00 committed by GitHub
parent 88dde83044
commit 6b018fab7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 32 additions and 56 deletions

View File

@ -129,7 +129,6 @@ vendor-update:
# a field does not has json tag defined. # a field does not has json tag defined.
cp script/vendor-sync/types.go.txt vendor/k8s.io/kubernetes/pkg/api/types.go cp script/vendor-sync/types.go.txt vendor/k8s.io/kubernetes/pkg/api/types.go
.PHONY: test-k8s .PHONY: test-k8s
test-k8s: test-k8s:
./script/test_k8s/test.sh ./script/test_k8s/test.sh

View File

@ -16,40 +16,20 @@ start_k8s() {
return 1 return 1
fi fi
# Uses https://github.com/kubernetes/minikube/tree/master/deploy/docker if [ ! -f /usr/bin/kind ] && [ ! -f /usr/local/bin/kind ]; then
# In which we have to git clone and create the image.. echo "No kind bin exists? Please install."
# https://github.com/kubernetes/minikube return 1
# Thus we are using a public docker image fi
IMAGE=calpicow/localkube-image:v1.5.3 kind create cluster --name kompose-test
docker run -d \
--volume=/:/rootfs:ro \
--volume=/sys:/sys:rw \
--volume=/var/lib/docker:/var/lib/docker:rw \
--volume=/var/lib/kubelet:/var/lib/kubelet:rw \
--volume=/var/run:/var/run:rw \
--net=host \
--pid=host \
--privileged \
--name=minikube \
$IMAGE \
/localkube start \
--apiserver-insecure-address=0.0.0.0 \
--apiserver-insecure-port=8080 \
--logtostderr=true \
--containerized
until curl 127.0.0.1:8080 &>/dev/null; kubectl cluster-info --context kind-kompose-test
do
echo ...
sleep 1
done
# Set the appropriate .kube/config configuration # Set the appropriate .kube/config configuration
kubectl config set-cluster dev --server=http://localhost:8080 kubectl config set-cluster kind-kompose-test
kubectl config set-context dev --cluster=dev --user=default kubectl config use-context kind-kompose-test
kubectl config use-context dev
kubectl config set-credentials default --token=foobar kubectl proxy --port=6443 &
# Debug info: # Debug info:
# cat ~/.kube/config # cat ~/.kube/config
@ -65,16 +45,8 @@ stop_k8s() {
STOPPING KUBERNETES STOPPING KUBERNETES
########## ##########
" "
docker rm -f minikube
# Delete via image name k8s.gcr.io kind delete cluster --name kompose-test
# Delete all containers started (names start with k8s_)
# Run twice in-case a container is replicated during that time
for run in {0..2}
do
docker ps -a | grep 'k8s_' | awk '{print $1}' | xargs --no-run-if-empty docker rm -f
docker ps -a | grep 'k8s.gcr.io/hyperkube-amd64' | awk '{print $1}' | xargs --no-run-if-empty docker rm -f
done
} }
wait_k8s() { wait_k8s() {
@ -98,32 +70,32 @@ wait_k8s() {
test_k8s() { test_k8s() {
for f in examples/*.yaml for f in examples/*.yaml
do do
echo -e "\n${RED}kompose up -f $f ${NC}\n" echo -e "\n${RED}kompose up --server http://127.0.0.1:6443 -f $f ${NC}\n"
./kompose up -f $f ./kompose up --server http://127.0.0.1:6443 -f $f
sleep 2 # Sleep for k8s to catch up to deployment sleep 2 # Sleep for k8s to catch up to deployment
echo -e "\n${RED}kompose down -f $f ${NC}\n" echo -e "\n${RED}kompose down --server http://127.0.0.1:6443 -f $f ${NC}\n"
./kompose down -f $f ./kompose down --server http://127.0.0.1:6443 -f $f
echo -e "\nTesting controller=daemonset key\n" echo -e "\nTesting controller=daemonset key\n"
echo -e "\n${RED}kompose up -f $f --controller=daemonset ${NC}\n" echo -e "\n${RED}kompose up --server http://127.0.0.1:6443 -f $f --controller=daemonset ${NC}\n"
./kompose up -f $f --controller=daemonset ./kompose up --server http://127.0.0.1:6443 -f $f --controller=daemonset
sleep 2 # Sleep for k8s to catch up to deployment sleep 2 # Sleep for k8s to catch up to deployment
echo -e "\n${RED}kompose down -f $f --controller=daemonset ${NC}\n" echo -e "\n${RED}kompose down --server http://127.0.0.1:6443 -f $f --controller=daemonset ${NC}\n"
./kompose down -f $f --controller=daemonset ./kompose down --server http://127.0.0.1:6443 -f $f --controller=daemonset
echo -e "\nTesting controller=replicationcontroller key\n" echo -e "\nTesting controller=replicationcontroller key\n"
echo -e "\n${RED}kompose up -f $f --controller=replicationcontroller ${NC}\n" echo -e "\n${RED}kompose up --server http://127.0.0.1:6443 -f $f --controller=replicationcontroller ${NC}\n"
./kompose up -f $f --controller=replicationcontroller ./kompose up --server http://127.0.0.1:6443 -f $f --controller=replicationcontroller
sleep 2 # Sleep for k8s to catch up to deployment sleep 2 # Sleep for k8s to catch up to deployment
echo -e "\n${RED}kompose down -f $f --controller=replicationcontroller ${NC}\n" echo -e "\n${RED}kompose down --server http://127.0.0.1:6443 -f $f --controller=replicationcontroller ${NC}\n"
./kompose down -f $f --controller=replicationcontroller ./kompose down --server http://127.0.0.1:6443 -f $f --controller=replicationcontroller
done done
echo -e "\nTesting stdin to kompose\n" echo -e "\nTesting stdin to kompose\n"
echo -e "\n${RED}cat examples/docker-compose.yaml | ./kompose up -f -${NC}\n" echo -e "\n${RED}cat examples/docker-compose.yaml | ./kompose up --server http://127.0.0.1:6443 -f -${NC}\n"
cat examples/docker-compose.yaml | ./kompose up -f - cat examples/docker-compose.yaml | ./kompose up --server http://127.0.0.1:6443 -f -
sleep 2 # Sleep for k8s to catch up to deployment sleep 2 # Sleep for k8s to catch up to deployment
echo -e "\n${RED}cat examples/docker-compose.yaml | ./kompose down -f - ${NC}\n" echo -e "\n${RED}cat examples/docker-compose.yaml | ./kompose down --server http://127.0.0.1:6443 -f - ${NC}\n"
cat examples/docker-compose.yaml | ./kompose down -f - cat examples/docker-compose.yaml | ./kompose down --server http://127.0.0.1:6443 -f -
} }
if [[ $1 == "start" ]]; then if [[ $1 == "start" ]]; then

View File

@ -21,6 +21,11 @@ if ! hash kubectl 2>/dev/null; then
exit 1 exit 1
fi fi
if ! hash kind 2>/dev/null; then
echo "ERROR: kind required"
exit 1
fi
# First off, we have to compile the latest binary # First off, we have to compile the latest binary
# We *assume* that the binary has already been built # We *assume* that the binary has already been built
# make bin # make bin