inital role commit

This commit is contained in:
srwadleigh 2024-04-27 01:33:23 +00:00
commit ee36e21a9d
29 changed files with 1187 additions and 0 deletions

20
LICENSE Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2024 Shane Wadleigh
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

225
README.md Normal file
View File

@ -0,0 +1,225 @@
# ansible-roles-k8s
- https://docs.k3s.io/
- https://docs.rke2.io/
- https://kube-vip.io/
- https://github.com/sbstp/kubie
- https://kubernetes.io/docs/tasks/tools/
## Requirements
Install `yq` on the local system, this is required for the kubectl formatting handler which places an updated kubeconfig in the local ~/.kube
Recommended `kubie` for context management after deployment
## Cluster Example
cluster hosts
```
[k8s_somecluster]
somecluster_control k8s_node_type=bootstrap
somecluster_agent_smith k8s_node_type=agent k8s_external_ip=x.x.x.x
somecluster_agent_jones k8s_node_type=agent k8s_external_ip=x.x.x.x
```
cluster tasks
```
- name: Setup k8s server node
hosts: somehost
become: true
roles:
- role: k8s
k8s_type: rke2
k8s_cluster_name: somecluster
k8s_cluster_url: somecluster.somewhere
k8s_cni_interface: enp1s0
k8s_selinux: true
- role: firewalld
firewalld_add:
- name: internal
interfaces:
- enp1s0
masquerade: true
forward: true
interfaces:
- enp1s0
services:
- dhcpv6-client
- ssh
- http
- https
ports:
- 6443/tcp # kubernetes API
- 9345/tcp # supervisor API
- 10250/tcp # kubelet metrics
- 2379/tcp # etcd client
- 2380/tcp # etcd peer
- 30000-32767/tcp # NodePort range
- 8472/udp # canal/flannel vxlan
- 9099/tcp # canal health checks
- name: trusted
sources:
- 10.42.0.0/16
- 10.43.0.0/16
- name: public
masquerade: true
forward: true
interfaces:
- enp7s0
services:
- http
- https
firewalld_remove:
- name: public
interfaces:
- enp1s0
services:
- dhcpv6-client
- ssh
```
## Retrieve kube config from an existing cluster
This task will retrieve and format the kubectl config for an existing cluster, this runs automatically during cluster creation.
`k8s_cluster_name` sets the cluster context
`k8s_cluster_url` sets the server address
```
ansible-playbook -i prod/ site.yml --tags=k8s-get-config --limit=k8s_somecluster
```
## Basic Cluster Interaction
```
kubie ctx <cluster-name>
kubectl get node -o wide
kubectl get pods,svc,ds --all-namespaces
```
## Deployment and Removal
Deploy
```
ansible-playbook -i hosts site.yml --tags=firewalld,k8s --limit=somehost
```
Remove firewall role
```
ansible-playbook -i hosts site.yml --tags=firewalld,k8s --extra-vars "firewall_action=remove" --limit=somehost
```
There is a task to completely destroy an existing cluster, this will ask for interactive user confirmation and should be used with caution.
```
ansible-playbook -i prod/ site.yml --tags=k8s --extra-vars 'k8s_action=destroy' --limit=some_innocent_cluster
```
Manual removal commands
```
/usr/local/bin/k3s-uninstall.sh
/usr/local/bin/k3s-agent-uninstall.sh
/usr/local/bin/rke2-uninstall.sh
/usr/local/bin/rke2-agent-uninstall.sh
```
## Managing K3S Services
servers
```
systemctl status k3s.service
journalctl -u k3s.service -f
```
agents
```
systemctl status k3s-agent.service
journalctl -u k3s-agent -f
```
uninstall servers
```
/usr/local/bin/k3s-uninstall.sh
```
uninstall agents
```
/usr/local/bin/k3s-agent-uninstall.sh
```
## Managing RKE2 Services
servers
```
systemctl status rke2-server.service
journalctl -u rke2-server -f
```
agents
```
systemctl status rke2-agent.service
journalctl -u rke2-agent -f
```
uninstall servers
```
/usr/bin/rke2-uninstall.sh
```
uninstall agents
```
/usr/local/bin/rke2-uninstall.sh
```
override default cannal options
```
# /var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-canal
namespace: kube-system
spec:
valuesContent: |-
flannel:
iface: "eth1"
```
Enable flannels wireguard support under canal
`kubectl rollout restart ds rke2-canal -n kube-system`
```
# /var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-canal
namespace: kube-system
spec:
valuesContent: |-
flannel:
backend: "wireguard"
```

83
defaults/main.yml Normal file
View File

@ -0,0 +1,83 @@
---
# this toggle provides a dangerous way to quickly destroy an entire cluster
# ansible-playbook -i prod/ site.yml --tags=k8s --extra-vars 'k8s_action=destroy' --limit=k3s_innocent_cluster
# create | destroy
k8s_action: create
# k3s | rke2
k8s_type: k3s
k8s_channel: stable
k8s_cluster_name: default
k8s_cluster_url: localhost
# bootstrap | server | agent
k8s_node_type: bootstrap
k8s_node_ip: "{{ ansible_host }}"
# sysctl set fs.inotify.max_user_instances
k8s_inotify_max: 1024
# hardcoded kublet default value is 110
k8s_pod_limit: 110
# we can set this by platform later
k8s_selinux: false
# if the host is using network manager, see vars/sys/ for overrides
k8s_has_nm: false
# if the host is using an http proxy
k8s_http_proxy: false
# cni
# k8s_cni_type:
k8s_config_mode: 600
k8s_api_port: 6443
k8s_debug: false
k8s_skip_start: false
k8s_taint_servers: false
k8s_flannel_wireguard: false
k8s_disable_kube_proxy: false
# paths
k8s_install_script: /usr/local/bin/{{ k8s_type }}-install.sh
k8s_config_path: "/etc/rancher/{{ k8s_type }}"
k8s_cmd_path: /usr/local/bin
k8s_nm_path: /etc/NetworkManager/conf.d
k8s_manifests_path: "/var/lib/rancher/{{ k8s_type }}/server/manifests/"
# if defined, install manifests
# k8s_manifests:
# - name: cert-manager
# path: https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml
# Override
# k8s_cluster_name
# k8s_cluster_url
# Define
# k8s_cluster_token
# you can pre-generate this ina vault with the token.sh script
# k8s_node_taints
# --node-taint CriticalAddonsOnly=true:NoExecute
# k8s_node_taints:
# - name: CriticalAddonsOnly
# value: true
# effect: NoExecute
# these are provided simply for the opportunity to override in cases where some ajustment isnt supported by the config templates
# k8s_install_bootstrap: >-
# server --cluster-init --tls-san {{ k8s_cluster_url }} --node-taint CriticalAddonsOnly=true:NoExecute
# {% if k8s_disable is defined %}
# {% for disable in k8s_disable %}
# --disable={{ disable }}
# {% endfor %}
# {% endif %}
# k8s_install_agent: >-
# agent --kubelet-arg=config=/etc/rancher/k3s/kubelet.config --node-ip={{ ansible_host }}
# {% if k8s_external_ip is defined %}--node-external-ip={{ k8s_external_ip }}{% endif %}

4
files/config.sh Normal file
View File

@ -0,0 +1,4 @@
#!/bin/sh
yq e '.contexts[].name = "$K3S_CONTEXT"' -i config

View File

@ -0,0 +1,68 @@
---
firewalld_add:
- name: internal
masquerade: false
forward: true
interfaces:
- eth0
services:
- dhcpv6-client
- ssh
- http
- https
ports:
- 9100/tcp # node exporter
- 6443/tcp # kubernetes API
- 9345/tcp # supervisor API
- 10250/tcp # kubelet metrics
- 2379/tcp # etcd client
- 2380/tcp # etcd peer
- 30000-32767/tcp # NodePort range
# Spegel
- 5001/tcp # embedded distributed registry
# Flannel CNI
- 8472/udp # flannel vxlan
- 51820/udp # wireguard ipv4
- 51821/udp # wireguard ipv6
# Canal CNI
# - 8472/udp # canal vxlan
# - 9099/tcp # canal health checks
# - 51820/udp # canal WireGuard IPv4
# - 51821/udp # canal WireGuard IPv6/dual-stack
# Cilium CNI
#- 8472/udp # cilium vxlan
#- 4240/tcp # cilium health checks
#- 8/0/icmp # cilium health checks
#- 51871/udp # cilium wireguard
#- 4244/tcp # hubble relay
#- 4245/tcp # hubble relay
#- 9962/tcp # cilium agent prometheus
#- 9963/tcp # cilium operator prometheus
#- 9964/tcp # cilium proxy prometheus
#- 2379-2380/tcp # etcd access
# Calico CNI
# - 179/tcp # calico bgp
# - 4789/udp # calico vxlan
# - 5473/tcp # calico typha
# - 9098/tcp # calico typha health checks
# - 9099/tcp # calico health checks
# - 51820/udp # calico WireGuard IPv4
# - 51821/udp # calico WireGuard IPv6/dual-stack
- name: trusted
sources:
- 10.42.0.0/16
- 10.43.0.0/16
- 10.0.0.0/16
firewalld_remove:
- name: public
services:
- dhcpv6-client
- ssh

View File

@ -0,0 +1,64 @@
---
firewalld_add:
- name: internal
masquerade: false
forward: true
interfaces:
- eth0
services:
- dhcpv6-client
- ssh
- http
- https
ports:
- 9100/tcp # node exporter
- 6443/tcp # kubernetes API
- 9345/tcp # supervisor API
- 10250/tcp # kubelet metrics
- 2379/tcp # etcd client
- 2380/tcp # etcd peer
- 2381/tcp # etcd metrics
- 30000-32767/tcp # NodePort range
# Canal CNI - Default -
- 8472/udp # canal vxlan
- 9099/tcp # canal health checks
- 51820/udp # canal WireGuard IPv4
- 51821/udp # canal WireGuard IPv6/dual-stack
# Cilium CNI
#- 8472/udp # cilium vxlan
#- 4240/tcp # cilium health checks
#- 8/0/icmp # cilium health checks
#- 51871/udp # cilium wireguard
#- 4244/tcp # hubble relay
#- 4245/tcp # hubble relay
#- 9962/tcp # cilium agent prometheus
#- 9963/tcp # cilium operator prometheus
#- 9964/tcp # cilium proxy prometheus
#- 2379-2380/tcp # etcd access
# Calico CNI
# - 179/tcp # calico bgp
# - 4789/udp # calico vxlan
# - 5473/tcp # calico typha
# - 9098/tcp # calico typha health checks
# - 9099/tcp # calico health checks
# - 51820/udp # calico WireGuard IPv4
# - 51821/udp # calico WireGuard IPv6/dual-stack
# Flannel CNI
#- 8472/udp # flannel vxlan
#- 4789/udp
- name: trusted
sources:
- 10.42.0.0/16
- 10.43.0.0/16
- 10.0.0.0/16
firewalld_remove:
- name: public
services:
- dhcpv6-client
- ssh

17
files/get-kube-tools.sh Executable file
View File

@ -0,0 +1,17 @@
#!/bin/sh
INSTALL_PATH="/usr/local/bin/"
INSTALL_ARCH="amd64"
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
KUBIE_VERSION="latest"
YQ_VERSION="latest"
wget -qO ${INSTALL_PATH}/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${INSTALL_ARCH}/kubectl
chmod a+x ${INSTALL_PATH}/kubectl
wget -qO ${INSTALL_PATH}/kubie https://github.com/sbstp/kubie/releases/${KUBIE_VERSION}/download/kubie-linux-${INSTALL_ARCH}
chmod a+x ${INSTALL_PATH}/kubie
wget -qO ${INSTALL_PATH}/yq https://github.com/mikefarah/yq/releases/${YQ_VERSION}/download/yq_linux_${INSTALL_ARCH}
chmod a+x ${INSTALL_PATH}/yq

38
files/token.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
vault_output="$1"
vault_regex=".*\.yml$"
vault_var_name="k8s_cluster_token"
token="$(openssl rand -hex 16)"
print_token() {
echo "$token"
}
print_yaml() {
printf -- "---\n$vault_var_name: %s\n" "$token"
}
encrypt_token() {
ansible-vault encrypt_string "$token" --name "$vault_var_name"
}
encrypt_yaml() {
print_yaml | ansible-vault encrypt
}
if [ -n "$vault_output" ]; then
if [[ $vault_output =~ $vault_regex ]]; then
if [ -f "$vault_output" ]; then
echo "output file already exists, no token generated"
exit 0
else
encrypt_yaml > "$vault_output"
fi
else
echo "supplied output file should end with .yml"
exit 1
fi
else
encrypt_token
fi

11
handlers/main.yml Normal file
View File

@ -0,0 +1,11 @@
---
- name: Update k8s Local Config
listen: "k8s-update-local-config"
delegate_to: localhost
become: false
ansible.builtin.shell: |
yq e '.clusters[].name = "{{ k8s_cluster_name }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '.contexts[].name = "{{ k8s_cluster_context | d(k8s_cluster_name) }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '(.clusters[] | select(.name == "{{ k8s_cluster_name }}")).cluster.server = "https://{{ k8s_cluster_url }}:{{ k8s_api_port }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '(.contexts[] | select(.name == "{{ k8s_cluster_name }}")).context.cluster = "{{ k8s_cluster_name }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml

38
meta/main.yml Normal file
View File

@ -0,0 +1,38 @@
---
dependencies: []
galaxy_info:
role_name: k8s
author: srw
description: Ansible role for configuring k3s and rke2 kubernetes clusters
company: "NMD, LLC"
license: "license (BSD, MIT)"
min_ansible_version: "2.10"
platforms:
- name: Fedora
versions:
- all
- name: Debian
versions:
- buster
- bullseye
- bookworm
- name: Ubuntu
versions:
- bionic
- focal
- jammy
- name: Alpine
version:
- all
- name: ArchLinux
versions:
- all
galaxy_tags:
- server
- system
- containers
- kubernetes
- k8s
- k3s
- rke2

8
tasks/k3s/config.yml Normal file
View File

@ -0,0 +1,8 @@
---
# PRE-DEPLOY
- name: template k3s kubelet config
ansible.builtin.template:
src: "templates/k3s-kubelet.config.j2"
dest: "/etc/rancher/k3s/kubelet.config"
mode: 0644

22
tasks/k3s/main.yml Normal file
View File

@ -0,0 +1,22 @@
---
# BOOTSTRAP
- name: k3s boostrap initial server node
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_K3S_EXEC': '{{ k8s_install_bootstrap }}'}) }}"
when:
- k8s_node_type == "bootstrap"
# ADD SERVERS
- name: k3s add additional server nodes
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_K3S_EXEC': '{{ k8s_install_server }}'}) }}"
when:
- k8s_node_type == "server"
# ADD AGENTS
- name: k3s add agent nodes
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_K3S_EXEC': '{{ k8s_install_agent }}'}) }}"
when:
- k8s_node_type == "agent"

152
tasks/main.yml Normal file
View File

@ -0,0 +1,152 @@
---
- name: Setup Environment
tags:
- k8s
- k8s-config
block:
- name: gather local facts
set_fact:
local_user: "{{ lookup('env', 'USER') }}"
delegate_to: localhost
# useful to set this bc k8s_node_type has a special value bootstrap which is not an actual type
- name: set true node type
set_fact:
node_type: "{{ 'agent' if k8s_node_type == 'agent' else 'server' }}"
- name: load type specific values
ansible.builtin.include_vars:
file: "types/{{ k8s_type }}.yml"
- name: load system specific values
ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- files:
- "systems/{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml"
- "systems/{{ ansible_os_family }}.yml"
- "systems/{{ ansible_distribution }}.yml"
- "systems/{{ ansible_system }}.yml"
skip: true
#
# CREATE CLUSTER
#
- name: Cluster Creation
tags: k8s
block:
- name: load server node taints
ansible.builtin.include_vars:
file: "server-taint.yml"
when:
- k8s_taint_servers and k8s_node_type != "agent"
- name: increase open file limit
ansible.posix.sysctl:
name: fs.inotify.max_user_instances
value: "{{ k8s_inotify_max }}"
state: present
- name: download install script
ansible.builtin.get_url:
url: "{{ k8s_install_url }}"
timeout: 120
dest: "{{ k8s_install_script }}"
owner: root
group: root
mode: 0755
# CLUSTER CONFIG
- name: check config paths
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ k8s_config_path }}"
- "{{ k8s_manifests_path }}"
tags:
- k8s-config
- name: template cluster config
ansible.builtin.template:
src: "templates/{{ k8s_type }}-config.yaml.j2"
dest: "{{ k8s_config_path }}/config.yaml"
mode: 0600
tags:
- k8s-config
- name: type specific configuration
ansible.builtin.include_tasks: "{{ k8s_type }}/config.yml"
tags:
- k8s-config
# DEPLOY CLUSTER
- name: beging cluster creation
ansible.builtin.include_tasks: "{{ k8s_type }}/main.yml"
# KUBE CONFIG
- name: fetch kube config
ansible.builtin.fetch:
src: "{{ k8s_config_path }}/{{ k8s_type }}.yaml"
dest: "~/.kube/config-{{ k8s_cluster_name }}.yaml"
flat: yes
notify:
- k8s-update-local-config
when:
- k8s_node_type == "bootstrap"
tags:
- k8s-get-kubeconf
# DEPLOY MANIFESTS
# END Cluster Creation
when:
- k8s_action == "create"
#
# DESTORY CLUSTER
#
# this is very dangerous and should be handled with care when not actively testing with disposable cluster iterations
- name: Destroy K8s cluster
tags: k8s
block:
- name: confirm cluster destruction
delegate_to: localhost
run_once: true
become: false
pause:
prompt: "=== WARNING === Are you sure you want to DESTROY the cluster: {{ k8s_cluster_name | string | upper }}? (yes/no)"
register: destroy_confirmation
- name: set confirmation fact
set_fact:
cluster_destruction: "{{ destroy_confirmation.user_input }}"
- name: delete cluster config
delegate_to: localhost
run_once: true
become: false
file:
path: "~/.kube/config-{{ k8s_cluster_name }}.yaml"
state: absent
when:
- cluster_destruction
- name: destroy nodes
ansible.builtin.shell: "{{ k8s_cmd_path }}/{{ k8s_type }}-uninstall.sh"
when:
- k8s_node_type != "agent" or k8s_type == "rke2"
- cluster_destruction
- name: destroy k3s agent nodes
ansible.builtin.shell: "{{ k8s_cmd_path }}/{{ k8s_type }}-agent-uninstall.sh"
when:
- k8s_node_type == "agent" and k8s_type == "k3s"
- cluster_destruction
# END Cluster Destruction
when:
- k8s_action == "destroy"

19
tasks/rke2/config.yml Normal file
View File

@ -0,0 +1,19 @@
---
# HTTP PROXY
- name: http proxy tasks
ansible.builtin.include_tasks: "{{ k8s_type }}/proxy.yml"
tags:
- k8s-config
# CANAL NM CONFIG
- name: template nm canal config
ansible.builtin.template:
src: "templates/{{ k8s_type }}-canal.conf.j2"
dest: "{{ k8s_nm_path }}/{{ k8s_type }}-canal.conf"
mode: 0600
when:
- k8s_cni_type == "canal"
- k8s_has_nm
tags:
- k8s-config

41
tasks/rke2/main.yml Normal file
View File

@ -0,0 +1,41 @@
---
# BOOTSTRAP
- name: rke2 boostrap initial server node
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_RKE2_TYPE': 'server'}) }}"
when:
- k8s_node_type == "bootstrap"
- name: rke2 template cni manifests
ansible.builtin.template:
src: "templates/{{ k8s_type }}-{{ k8s_cni_type }}-config.yaml.j2"
dest: "{{ k8s_manifests_path }}/{{ k8s_type }}-{{ k8s_cni_type }}-config.yaml"
mode: 0600
when:
- k8s_node_type == "bootstrap"
- name: rke2 start bootstrap node
ansible.builtin.include_tasks: start.yml
when:
- k8s_node_type == "bootstrap"
# ADD SERVERS
- name: rke2 add additional server nodes
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_RKE2_TYPE': 'server'}) }}"
when:
- k8s_node_type == "server"
# ADD AGENTS
- name: rke2 add agent nodes
ansible.builtin.shell: "{{ k8s_install_script }}"
environment: "{{ k8s_env | combine({'INSTALL_RKE2_TYPE': 'agent'}) }}"
when:
- k8s_node_type == "agent"
# POST-DEPLOY
- name: rke2 start additional nodes
ansible.builtin.include_tasks: start.yml
when:
- k8s_node_type != "bootstrap"

50
tasks/rke2/proxy.yml Normal file
View File

@ -0,0 +1,50 @@
---
- name: http proxy detection and setup
tags:
- k8s
- k8s-config
block:
- name: check for existing http_proxy
shell: echo $http_proxy
register: http_proxy
ignore_errors: true
changed_when: false
- name: check for existing https_proxy
shell: echo $https_proxy
register: https_proxy
ignore_errors: true
changed_when: false
- name: check for existing no_proxy
shell: echo $no_proxy
register: no_proxy
ignore_errors: true
changed_when: false
- name: Set fact for HTTP_PROXY
set_fact:
k8s_http_proxy: "{{ http_proxy.stdout | default('') }}"
when:
- http_proxy.stdout != ""
- name: Set fact for HTTPS_PROXY
set_fact:
k8s_https_proxy: "{{ https_proxy.stdout | default('') }}"
when:
- https_proxy.stdout != ""
- name: Set fact for NO_PROXY
set_fact:
k8s_no_proxy: "{{ no_proxy.stdout | default('') }}"
when: no_proxy.stdout != ""
- name: template rke2 http proxy
ansible.builtin.template:
src: "templates/{{ k8s_type }}-proxy.j2"
dest: "/etc/default/rke2-{{ node_type }}"
mode: 0644
when:
- http_proxy.stdout != ""
- https_proxy.stdout != ""

8
tasks/start.yml Normal file
View File

@ -0,0 +1,8 @@
---
# handlers dont execute in time so we include this as a task
- name: enable "{{ k8s_type }}" service
ansible.builtin.systemd:
name: "{{ k8s_type }}-{{ node_type }}"
state: restarted
enabled: true

View File

@ -0,0 +1,78 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
token: {{ k8s_cluster_token }}
{% if k8s_cluster_url is defined and k8s_node_type != "bootstrap" -%}
server: https://{{ k8s_cluster_url }}:{{ k8s_api_port }}
{% endif -%}
{% if k8s_node_type == "bootstrap" -%}
cluster-init: true
{% endif -%}
debug: {{ k8s_debug | string | lower }}
{% if k8s_node_type != "agent" -%}
write-kubeconfig-mode: {{ k8s_config_mode }}
{% if k8s_tls_san is defined and k8s_node_type != "agent" -%}
{% for san in k8s_tls_san -%}
tls-san:
- "{{ san }}"
{% endfor -%}
{% elif k8s_cluster_url is defined and k8s_node_type != "agent" -%}
tls-san: {{ k8s_cluster_url }}
{% endif %}
{% if k8s_selinux -%}
selinux: true
{% endif -%}
{% if k8s_disable is defined and k8s_node_type != "agent" %}
# disable builtin services
{% for disable in k8s_disable %}
disable: {{ disable }}
{% endfor -%}
{% endif -%}
{% endif %}
# node network
{% if k8s_node_ip is defined -%}
node-ip: {{ k8s_node_ip }}
{% endif -%}
{% if k8s_external_ip is defined -%}
node-external-ip: {{ k8s_external_ip }}
{% endif -%}
{% if k8s_flannel_backend is defined and k8s_node_type != "agent" -%}
# cofigure or disable flannel cni
flannel-backend: {{ k8s_flannel_backend }}
flannel-ipv6-masq: {{ k8s_flannel_ipv6_masq }}
flannel-external-ip: {{ k8s_flannel_external_ip }}
{% endif %}
{% if k8s_node_taints is defined -%}
# initial node taints
{% for taint in k8s_node_taints -%}
node-taint:
- "{{ taint.name }}={{ taint.value }}:{{ taint.effect }}"
{% endfor -%}
{% endif %}
{% if k8s_node_lables is defined -%}
# initial node labels
{% for label in k8s_node_lables -%}
node-label:
- "{{ label.name }}={{ label.value }}"
{% endfor -%}
{% endif %}
{% if k8s_kubelet_args is defined %}
# kubelet configuration
{% for kublet_arg in k8s_kubelet_args %}
kubelet-arg:
- "{{ kublet_arg }}"
{% endfor -%}
{% endif %}
{% if k8s_additional_configs is defined %}
{% for k8s_config in k8s_additional_configs %}
{{ k8s_config.name }}:
- "{{ k8s_config.value }}"
{% endfor -%}
{% endif %}

View File

@ -0,0 +1,5 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
maxPods: {{ k8s_pod_limit }}

View File

@ -0,0 +1,13 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
# /var/lib/rancher/rke2/server/manifests/rke2-calico-config.yaml
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-calico
namespace: kube-system
spec:
valuesContent: |-
installation:
calicoNetwork:
mtu: 9000

View File

@ -0,0 +1,17 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
# /var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-canal
namespace: kube-system
spec:
valuesContent: |-
flannel:
{% if k8s_flannel_wireguard %}
backend: "wireguard"
{% else %}
iface: "{{ k8s_cni_interface }}"
{% endif %}

View File

@ -0,0 +1,2 @@
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:flannel*

View File

@ -0,0 +1,25 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
# /var/lib/rancher/rke2/server/manifests/rke2-cilium-config.yaml
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-cilium
namespace: kube-system
spec:
valuesContent: |-
eni:
enabled: true
{% if k8s_disable_kube_proxy %}
kubeProxyReplacement: true
k8sServiceHost: {{ k8s_cluster_url }}
k8sServicePort: {{ k8s_api_port }}
{% endif %}
{% if k8s_cilium_hubble %}
hubble:
enabled: true
relay:
enabled: true
ui:
enabled: true
{% endif %}

View File

@ -0,0 +1,81 @@
# template generated via ansible by {{ local_user }} at {{ ansible_date_time.date }} {{ ansible_date_time.time }}
token: {{ k8s_cluster_token }}
{% if k8s_cluster_url is defined and k8s_node_type != "bootstrap" -%}
server: https://{{ k8s_cluster_url }}:{{ k8s_supervisor_port }}
{% endif -%}
debug: {{ k8s_debug | string | lower }}
{% if k8s_node_type != "agent" -%}
write-kubeconfig-mode: {{ k8s_config_mode }}
{% if k8s_tls_san is defined and k8s_node_type != "agent" -%}
{% for san in k8s_tls_san -%}
tls-san:
- "{{ san }}"
{% endfor -%}
{% elif k8s_cluster_url is defined and k8s_node_type != "agent" -%}
tls-san: {{ k8s_cluster_url }}
{% endif %}
{% if k8s_selinux -%}
selinux: true
{% endif -%}
{% if k8s_cni_type is defined -%}
cni: {{ k8s_cni_type }}
{% endif -%}
{% if k8s_disable_kube_proxy %}
disable-kube-proxy: true
{% endif -%}
{% if k8s_disable is defined and k8s_node_type != "agent" %}
# disable builtin services
{% for disable in k8s_disable %}
disable: {{ disable }}
{% endfor -%}
{% endif -%}
{% endif %}
# node network
{% if k8s_node_ip is defined -%}
node-ip: {{ k8s_node_ip }}
{% endif -%}
{% if k8s_external_ip is defined -%}
node-external-ip: {{ k8s_external_ip }}
{% endif -%}
{% if k8s_flannel_backend is defined and k8s_node_type != "agent" -%}
# cofigure or disable flannel cni
flannel-backend: {{ k8s_flannel_backend }}
flannel-ipv6-masq: {{ k8s_flannel_ipv6_masq }}
flannel-external-ip: {{ k8s_flannel_external_ip }}
{% endif %}
{% if k8s_node_taints is defined -%}
# initial node taints
{% for taint in k8s_node_taints -%}
node-taint:
- "{{ taint.name }}={{ taint.value }}:{{ taint.effect }}"
{% endfor -%}
{% endif %}
{% if k8s_node_lables is defined -%}
# initial node labels
{% for label in k8s_node_lables -%}
node-label:
- "{{ label.name }}={{ label.value }}"
{% endfor -%}
{% endif %}
{% if k8s_kubelet_args is defined %}
# kubelet configuration
{% for kublet_arg in k8s_kubelet_args %}
kubelet-arg:
- "{{ kublet_arg }}"
{% endfor -%}
{% endif %}
{% if k8s_additional_configs is defined %}
{% for k8s_config in k8s_additional_configs %}
{{ k8s_config.name }}:
- "{{ k8s_config.value }}"
{% endfor -%}
{% endif %}

3
templates/rke2-proxy.j2 Normal file
View File

@ -0,0 +1,3 @@
HTTP_PROXY={{ k8s_http_proxy | d() }}
HTTPS_PROXY={{ k8s_https_proxy | d() }}
NO_PROXY={{ k8s_no_proxy | d() }}

5
vars/server-taint.yml Normal file
View File

@ -0,0 +1,5 @@
---
k8s_node_taints:
- name: CriticalAddonsOnly
value: true
effect: NoExecute

4
vars/systems/RedHat.yml Normal file
View File

@ -0,0 +1,4 @@
---
k8s_selinux: true
k8s_has_nm: true
k8s_cmd_path: /usr/bin

45
vars/types/k3s.yml Normal file
View File

@ -0,0 +1,45 @@
---
# See https://docs.k3s.io/
# define k8s_version to deploy a specific version
# channel: stable, latest, testing
k8s_install_url: https://get.k3s.io
k8s_channel_url: https://update.k3s.io/v1-release/channels
# cluster network (cni)
# flannel-backend: 'vxlan', 'host-gw', 'wireguard-native', 'none'
k8s_flannel_backend: vxlan
k8s_flannel_ipv6_masq: false
k8s_flannel_external_ip: false
# disable builtin services
k8s_disable:
- "traefik"
# kubelet configs
# - "kube-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "system-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "eviction-hard=memory.available<500Mi,nodefs.available<10%"
k8s_kubelet_args:
- config=/etc/rancher/k3s/kubelet.config
k8s_env:
INSTALL_K3S_CHANNEL_URL: "{{ k8s_channel_url }}"
INSTALL_K3S_CHANNEL: "{{ k8s_channel }}"
INSTALL_K3S_SKIP_START: "{{ k8s_skip_start }}"
# will attempt to download from channel if not specified
INSTALL_K3S_VERSION: "{{ k8s_version | d() }}"
# there is some consider for where the token lives after initial node creation, this could get pruned from env or config most likely
#K3S_TOKEN: "{{ k8s_cluster_token }}"
K3S_KUBECONFIG_MODE: "{{ k8s_config_mode }}"
k8s_install_bootstrap: >-
server
k8s_install_server: >-
server
k8s_install_agent: >-
agent

41
vars/types/rke2.yml Normal file
View File

@ -0,0 +1,41 @@
---
# See https://docs.rke2.io/
# define k8s_version to deploy a specific version
# channel: stable, latest, testing
k8s_install_url: https://get.rke2.io
k8s_channel_url: https://update.rke2.io/v1-release/channels
k8s_cmd_path: /usr/bin
# rke2 server listens on a dedicatged port for new nodes to register
k8s_supervisor_port: 9345
# canal, cilium, calico, flannel
k8s_cni_type: canal
# disable builtin services
# k8s_disable:
# - rke2-coredns
# - rke2-ingress-nginx
# - rke2-metrics-server
# - rke2-snapshot-controller
# - rke2-snapshot-controller-crd
# - rke2-snapshot-validation-webhook
# kubelet configs
# - "kube-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "system-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "eviction-hard=memory.available<500Mi,nodefs.available<10%"
k8s_kubelet_args:
- "max-pods={{ k8s_pod_limit }}"
k8s_env:
INSTALL_RKE2_CHANNEL_URL: "{{ k8s_channel_url }}"
INSTALL_RKE2_CHANNEL: "{{ k8s_channel }}"
# will attempt to download from channel if not specified
INSTALL_RKE2_VERSION: "{{ k8s_version | d() }}"
# server or agent
#INSTALL_RKE2_TYPE: "{{ k8s_channel }}"