sync role with upstream

This commit is contained in:
srw 2025-06-13 05:08:55 +00:00
parent 7ffd4393ff
commit 192c151635
12 changed files with 697 additions and 215 deletions

View File

@ -1,7 +1,7 @@
---
# this toggle provides a dangerous way to quickly destroy an entire cluster
# ansible-playbook -i prod/ site.yml --tags=k8s --extra-vars 'k8s_action=destroy' --limit=k3s_innocent_cluster
# create | destroy
# create | destroy
k8s_action: create
# k3s | rke2
@ -10,9 +10,6 @@ k8s_type: k3s
k8s_cluster_name: default
k8s_cluster_url: localhost
# Additionally define k8s_external_ip to provide a specific node an external route
k8s_node_ip: "{{ ansible_host }}"
# paths
# used for placing nm related configs
k8s_nm_path: /etc/NetworkManager/conf.d
@ -30,6 +27,15 @@ k8s_config_path: "/etc/rancher/{{ k8s_type }}"
k8s_helm_install_url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
k8s_helm_install_script: "{{ k8s_install_path }}/get_helm.sh"
# settings
k8s_disable_kube_proxy: false
k8s_debug: false
k8s_kublet_log: false
k8s_kubelet_args:
- "max-pods={{ k8s_pod_limit }}"
- "alsologtostderr={{ k8s_kublet_log }}"
# automatically fetch kubeconfig and update context according to k8s_cluster_name
k8s_kubeconfig_fetch: true
k8s_kubeconfig_update_context: true
@ -52,125 +58,113 @@ k8s_inotify_max: 1024
# hardcoded kublet default value is 110
k8s_pod_limit: 110
# if the host is using an http proxy for external access
k8s_http_proxy: false
# kubeconfig chmod
k8s_config_mode: 600
k8s_disable_kube_proxy: false
k8s_debug: false
# if the host is using an http proxy for external access
#k8s_http_proxy: false
k8s_kubelet_args:
- "max-pods={{ k8s_pod_limit }}"
# nodes with multiple interfaces may need to specify the interface to use for cluster communication
# k8s_node_ip: "{{ ansible_host }}"
# local-path-storage default settings, see templates/shared/local-path-storage.yaml.j2
# k8s_local_path_image: rancher/local-path-provisioner:master-head
# k8s_local_path_image_pull_policy: IfNotPresent
# k8s_local_path_default_class: true
# k8s_local_path_reclaim_policy: Retain
# k8s_local_path_bind_mode: WaitForFirstConsumer
# k8s_local_path_priority_class: system-node-critical
# k8s_local_path_dir: /opt/local-path-provisioner
# http proxy settings
# by default values are automatically detected from /etc/environment
# define the proxy here if you need to override or /etc/environment is not used
# currently these settings is only relevant for rke2
# k8s_http_proxy: http://proxy.example.com:8080
# k8s_https_proxy: http://proxy.example.com:8080
# k8s_no_proxy: "10.42.0.0/16,10.43.0.0/16,localhost"
# cluster issuers
# k8s_cluster_issuers:
# - name: letsencrypt-prod
# url: https://acme-v02.api.letsencrypt.org/directory
# solvers:
# - type: http
# ingress: nginx
# - type: dns
# provider: cloudflare
# tokenref: apiTokenSecretRef
# secret_name: cloudflare-api-token
# secret_ley: api-token
# KUBECONFIG
k8s_kubeconfig: "~/.kube/config-{{ k8s_cluster_name }}.yaml"
k8s_kubeconfig_context: "{{ k8s_cluster_name }}"
# cluster secrets
# k8s_secrets:
# - name: cloudflare-api-token
# namespace: cert-manager
# data: api-token
# value: ZG9wX3Y...
k8s_yq_fetch: true
k8s_yq_version: latest
k8s_yq_url: https://github.com/mikefarah/yq/releases/{{ k8s_yq_version }}/download/yq_linux_{{ k8s_arch }}
# k8s_kubelet_args
# - "kube-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "system-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "eviction-hard=memory.available<500Mi,nodefs.available<10%"
# - "max-pods={{ k8s_pod_limit }}"
# - "v=2"
# MANIFEST DEFAULTS
# Define
# jetstack/cert-manager
# ansible helm manages the chart_ref differently than the cli helm command, may need to remove chart prefixes
# by default chart_ref is the same as the chart name
# k8s_cm_chart_ref: cert-manager
k8s_cm_version: v1.16.3
k8s_cm_manifest_url: https://github.com/cert-manager/cert-manager/releases/download/{{ k8s_cm_version }}/cert-manager.yaml
k8s_cm_chart_repo: jetstack
k8s_cm_chart_repo_url: https://charts.jetstack.io
# Default is assumed false, set by vars/sysetms/
# k8s_selinux: false
# nginx-ingress
k8s_ingress_ngx_version: controller-v1.10.1
k8s_ingress_ngx_manifest_url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/{{ k8s_ingress_version }}/deploy/static/provider/cloud/deploy.yaml
# k8s_acme_email
# metal-lb
k8s_mlb_version: v0.14.8
k8s_mlb_manifest_url: https://raw.githubusercontent.com/metallb/metallb/{{ k8s_mlb_version }}/manifests/metallb-native.yaml
# you can pre-generate this ina vault with the token.sh script
# k8s_cluster_token
# local-path-provisioner
# k8s_lpp_version: v0.0.11
k8s_lpp_manifest_url: https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
# stable, latest, testing, ...
# k8s_channel: stable
# longhorn.io
k8s_lh_version: v1.7.2
k8s_lh_manifest_url: https://raw.githubusercontent.com/longhorn/longhorn/{{ k8s_lh_version }}/deploy/longhorn.yaml
# k8s_version to deploy a specific version
# k8s_version: v1.27.7+k3s2
# TEMPLATE DEFAULTS
# define k8s_*_image_pull_policy to override for specific templates
k8s_image_pull_policy: IfNotPresent
# bootstrap | server | agent
# k8s_node_type: bootstrap
# local-path-provisioner
k8s_local_path_template: shared/local-path-provisioner.yaml
k8s_local_path_image: rancher/local-path-provisioner:master-head
k8s_local_path_bind_mode: WaitForFirstConsumer
k8s_local_path_reclaim_policy: Retain
k8s_local_path_priority_class: system-node-critical
k8s_local_path_dir: /opt/local-path-provisioner
k8s_local_path_default_class: true
# if defined, install manifests from the supplied url, currently this task only supports fetching from a url
# k8s_manifests:
# - name: cert-manager
# url: https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml
# namespace for local-path-provisioner: kube-system, local-path-storage
# k8s_local_path_namespace: local-path-storage
# k8s_node_taints
# --node-taint CriticalAddonsOnly=true:NoExecute
# k8s_node_taints:
# - name: CriticalAddonsOnly
# value: true
# effect: NoExecute
# clusterissuer
k8s_ci_template: shared/clusterissuer-acme.yaml
k8s_ci_le_prod_url: https://acme-v02.api.letsencrypt.org/directory
k8s_ci_le_staging_url: https://acme-staging-v02.api.letsencrypt.org/directory
# fluent-bit
k8s_fb_template: shared/fluent-bit.yaml
k8s_fb_image: fluent/fluent-bit:latest
k8s_fb_journal_path: /run/log/journal
k8s_fb_log_path: /var/log
k8s_fb_log_files: /var/log/containers/*.log
# K3S
# namespace for fluent-bit: kube-system, fluent-bit, victoria-logs, loki, logging
k8s_fb_namespace: logging
# flannel-backend: 'vxlan', 'host-gw', 'wireguard-native', 'none'
# k8s_flannel_backend: vxlan
# k8s_flannel_ipv6_masq: false
# k8s_flannel_external_ip: false
# settings for logging, this would typically be victoria-logs or loki
# define: k8s_fb_log_host, k8s_fb_log_port, k8s_fb_log_user, k8s_fb_log_password
# user and password may need to be base64 encoded
k8s_fb_log_port: 443
# k8s_disable_network_policy: true
# kube-vip
k8s_vip_template: shared/kube-vip-ds.yaml
k8s_vip_rbac_manifest: https://kube-vip.io/manifests/rbac.yaml
k8s_vip_image: ghcr.io/kube-vip/kube-vip
k8s_vip_version: v0.8.4
k8s_vip_capabilities:
- NET_ADMIN
- NET_RAW
# disable builtin services
# k8s_disable:
# - traefik
# - servicelb
# define k8s_vip_interface, k8s_vip_address
k8s_vip_cidr: 32
k8s_vip_leader_election: true
k8s_vip_control_plane: true
k8s_vip_services: false
# arp mode
k8s_vip_arp: true
# RKE2
# Default is false, if the host is using network manager, overriden by vars/sysetms/
# k8s_has_nm: true
# canal, cilium, calico, flannel
# k8s_cni_type: canal
# apply cni custom template
# canal-config.yaml | cilium-config.yaml | calico-config.yaml
# k8s_cni_custom_template: canal-config.yaml
# when using canal enable wg backend
# k8s_canal_wireguard: true
# cilium
# k8s_cilium_hubble: true
# k8s_cilium_eni: true
# disable builtin services
# k8s_disable:
# - rke2-coredns
# - rke2-ingress-nginx
# - rke2-metrics-server
# - rke2-snapshot-controller
# - rke2-snapshot-controller-crd
# - rke2-snapshot-validation-webhook
# bgp mode
# define k8s_vip_bgp_router_id, k8s_vip_bgp_as, k8s_vip_bgp_peer_as, k8s_vip_bgp_address
# define k8s_vip_bgp_peers: 192.168.0.10:65000::false,192.168.0.11:65000::false
k8s_vip_bgp: false

113
defaults/reference.yml Normal file
View File

@ -0,0 +1,113 @@
---
# Additionally define node addresses as needed
# k8s_node_ip: "{{ ansible_host }}"
# k8s_external_ip:
# local-path-storage default settings, see templates/shared/local-path-storage.yaml.j2
# k8s_local_path_image: rancher/local-path-provisioner:master-head
# k8s_local_path_image_pull_policy: IfNotPresent
# k8s_local_path_default_class: true
# k8s_local_path_reclaim_policy: Retain
# k8s_local_path_bind_mode: WaitForFirstConsumer
# k8s_local_path_priority_class: system-node-critical
# k8s_local_path_dir: /opt/local-path-provisioner
# cluster issuers
# k8s_cluster_issuers:
# - name: letsencrypt-prod
# url: https://acme-v02.api.letsencrypt.org/directory
# solvers:
# - type: http
# ingress: nginx
# - type: dns
# provider: cloudflare
# tokenref: apiTokenSecretRef
# secret_name: cloudflare-api-token
# secret_ley: api-token
# cluster secrets
# k8s_secrets:
# - name: cloudflare-api-token
# namespace: cert-manager
# data: api-token
# value: ZG9wX3Y...
# k8s_kubelet_args
# - "kube-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "system-reserved=cpu=500m,memory=1Gi,ephemeral-storage=2Gi"
# - "eviction-hard=memory.available<500Mi,nodefs.available<10%"
# - "max-pods={{ k8s_pod_limit }}"
# - "v=2"
# Define
# Default is assumed false, set by vars/sysetms/
# k8s_selinux: false
# k8s_acme_email
# you can pre-generate this ina vault with the token.sh script
# k8s_cluster_token
# stable, latest, testing, ...
# k8s_channel: stable
# k8s_version to deploy a specific version
# k8s_version: v1.27.7+k3s2
# bootstrap | server | agent
# k8s_node_type: bootstrap
# if defined, install manifests from the supplied url, currently this task only supports fetching from a url
# k8s_manifests:
# - name: cert-manager
# url: https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml
# k8s_node_taints
# --node-taint CriticalAddonsOnly=true:NoExecute
# k8s_node_taints:
# - name: CriticalAddonsOnly
# value: true
# effect: NoExecute
# K3S
# flannel-backend: 'vxlan', 'host-gw', 'wireguard-native', 'none'
# k8s_flannel_backend: vxlan
# k8s_flannel_ipv6_masq: false
# k8s_flannel_external_ip: false
# k8s_disable_network_policy: true
# disable builtin services
# k8s_disable:
# - traefik
# - servicelb
# RKE2
# Default is false, if the host is using network manager, overriden by vars/sysetms/
# k8s_has_nm: true
# canal, cilium, calico, flannel
# k8s_cni_type: canal
# apply cni custom template
# canal-config.yaml | cilium-config.yaml | calico-config.yaml
# k8s_cni_custom_template: canal-config.yaml
# when using canal enable wg backend
# k8s_canal_wireguard: true
# cilium
# k8s_cilium_hubble: true
# k8s_cilium_eni: true
# disable builtin services
# k8s_disable:
# - rke2-coredns
# - rke2-ingress-nginx
# - rke2-metrics-server
# - rke2-snapshot-controller
# - rke2-snapshot-controller-crd
# - rke2-snapshot-validation-webhook

View File

@ -1,21 +1,47 @@
#!/bin/sh
INSTALL_PATH="/usr/local/bin"
INSTALL_ARCH="amd64"
# Fetch latest versions dynamically
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
KUBIE_VERSION="latest"
YQ_VERSION="latest"
KUBIE_VERSION=$(curl -s https://api.github.com/repos/sbstp/kubie/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
KTAIL_VERSION=$(curl -s https://api.github.com/repos/atombender/ktail/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
K9S_VERSION=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
YQ_VERSION=$(curl -s https://api.github.com/repos/mikefarah/yq/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')
HELM_URL="https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3"
HELM_CMD="/tmp/get_helm.sh"
sudo wget -qO ${INSTALL_PATH}/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${INSTALL_ARCH}/kubectl
sudo chmod a+x ${INSTALL_PATH}/kubectl
# Detect sudo or doas
SUDO_CMD=$(command -v sudo || command -v doas) || { echo "Neither sudo nor doas found. Please install one of them."; exit 1; }
sudo wget -qO ${INSTALL_PATH}/kubie https://github.com/sbstp/kubie/releases/${KUBIE_VERSION}/download/kubie-linux-${INSTALL_ARCH}
sudo chmod a+x ${INSTALL_PATH}/kubie
# Detect system architecture
INSTALL_ARCH=$(uname -m | sed -E 's/x86_64/amd64/; s/aarch64/arm64/; s/armv7l/arm/; s/ppc64le/ppc64le/; s/s390x/s390x/')
if [ -z "$INSTALL_ARCH" ]; then
echo "Unsupported architecture: $(uname -m)"
exit 1
fi
sudo wget -qO ${INSTALL_PATH}/yq https://github.com/mikefarah/yq/releases/${YQ_VERSION}/download/yq_linux_${INSTALL_ARCH}
sudo chmod a+x ${INSTALL_PATH}/yq
# Fetch binaries
$SUDO_CMD wget -qO ${INSTALL_PATH}/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${INSTALL_ARCH}/kubectl
$SUDO_CMD wget -qO ${INSTALL_PATH}/kubie https://github.com/sbstp/kubie/releases/download/${KUBIE_VERSION}/kubie-linux-${INSTALL_ARCH}
$SUDO_CMD wget -qO ${INSTALL_PATH}/yq https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_${INSTALL_ARCH}
$SUDO_CMD wget -qO ${INSTALL_PATH}/ktail https://github.com/atombender/ktail/releases/download/${KTAIL_VERSION}/ktail-linux-${INSTALL_ARCH}
curl -fsSL -o get_helm.sh ${HELM_URL}
chmod 700 get_helm.sh
./get_helm.sh
# k9s (tar.gz format)
$SUDO_CMD wget -qO ${INSTALL_PATH}/k9s_Linux_${INSTALL_ARCH}.tar.gz https://github.com/derailed/k9s/releases/download/${K9S_VERSION}/k9s_linux_${INSTALL_ARCH}.tar.gz
$SUDO_CMD tar -xzf ${INSTALL_PATH}/k9s_Linux_${INSTALL_ARCH}.tar.gz -C ${INSTALL_PATH}/ k9s
$SUDO_CMD chown root:root ${INSTALL_PATH}/k9s
$SUDO_CMD rm -f ${INSTALL_PATH}/k9s_Linux_${INSTALL_ARCH}.tar.gz
# Set execution permissions
$SUDO_CMD chmod a+x ${INSTALL_PATH}/kubectl ${INSTALL_PATH}/kubie ${INSTALL_PATH}/yq ${INSTALL_PATH}/k9s ${INSTALL_PATH}/ktail
# Install Helm
echo "Installing Helm..."
curl -fsSL -o ${HELM_CMD} ${HELM_URL}
chmod 700 ${HELM_CMD}
$SUDO_CMD ${HELM_CMD} --no-sudo
rm -f ${HELM_CMD}
echo "Installation complete"

View File

@ -1,7 +1,7 @@
#!/bin/bash
vault_output="$1"
vault_regex=".*\.yml$"
vault_regex=".*\.(yml|yaml)$"
vault_var_name="k8s_cluster_token"
token="$(openssl rand -hex 16)"

View File

@ -4,47 +4,55 @@
- k8s
- k8s-config
block:
- name: check for existing http_proxy
shell: echo $http_proxy
register: http_proxy
ignore_errors: true
changed_when: false
when:
- k8s_http_proxy is not defined
- name: check for existing http_proxy
shell: echo $http_proxy
register: http_proxy
ignore_errors: true
changed_when: false
- name: check for existing https_proxy
shell: echo $https_proxy
register: https_proxy
ignore_errors: true
changed_when: false
- name: check for existing https_proxy
shell: echo $https_proxy
register: https_proxy
ignore_errors: true
changed_when: false
when:
- k8s_https_proxy is not defined
- name: check for existing no_proxy
shell: echo $no_proxy
register: no_proxy
ignore_errors: true
changed_when: false
- name: Set fact for HTTP_PROXY
set_fact:
k8s_http_proxy: "{{ http_proxy.stdout | default('') }}"
when:
- http_proxy.stdout != ""
- name: check for existing no_proxy
shell: echo $no_proxy
register: no_proxy
ignore_errors: true
changed_when: false
when:
- k8s_no_proxy is not defined
- name: Set fact for HTTPS_PROXY
set_fact:
k8s_https_proxy: "{{ https_proxy.stdout | default('') }}"
when:
- https_proxy.stdout != ""
- name: set fact for HTTP_PROXY
set_fact:
k8s_http_proxy: "{{ http_proxy.stdout | default('') }}"
when:
- k8s_http_proxy is not defined
- http_proxy.stdout != ""
- name: Set fact for NO_PROXY
set_fact:
k8s_no_proxy: "{{ no_proxy.stdout | default('') }}"
when: no_proxy.stdout != ""
- name: set fact for HTTPS_PROXY
set_fact:
k8s_https_proxy: "{{ https_proxy.stdout | default('') }}"
when:
- k8s_https_proxy is not defined
- https_proxy.stdout != ""
- name: template rke2 http proxy
ansible.builtin.template:
src: "templates/{{ k8s_type }}/proxy.j2"
dest: "/etc/default/{{ k8s_type }}-{{ node_type }}"
mode: 0644
when:
- http_proxy.stdout != ""
- https_proxy.stdout != ""
- name: set fact for NO_PROXY
set_fact:
k8s_no_proxy: "{{ no_proxy.stdout | default('') }}"
when:
- k8s_no_proxy is not defined
- no_proxy.stdout != ""
- name: template rke2 http proxy
ansible.builtin.template:
src: "templates/{{ k8s_type }}/proxy.j2"
dest: "/etc/default/{{ k8s_type }}-{{ node_type }}"
mode: 0644
when:
- k8s_http_proxy is defined or k8s_https_proxy is defined or k8s_no_proxy is defined

View File

@ -1,43 +1,74 @@
---
- name: begining chart deployments
run_once: true
tags:
tags:
- k8s
- k8s-apply-charts
block:
- name: download helm install script
ansible.builtin.get_url:
url: "{{ k8s_helm_install_url }}"
timeout: 120
dest: "{{ k8s_helm_install_script }}"
owner: root
group: root
mode: 0700
- name: download helm install script
ansible.builtin.get_url:
url: "{{ k8s_helm_install_url }}"
timeout: 120
dest: "{{ k8s_helm_install_script }}"
owner: root
group: root
mode: 0700
- name: install helm
ansible.builtin.shell: "{{ k8s_helm_install_script }}"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
- name: install helm
ansible.builtin.shell: "{{ k8s_helm_install_script }}"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
- name: add chart repos
kubernetes.core.helm_repository:
name: "{{ item.repo_name }}"
repo_url: "{{ item.repo_url }}"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
loop: "{{ k8s_charts }}"
when:
- item.repo_name is defined
- item.repo_url is defined
- name: apply helm charts
ansible.builtin.shell: |
helm repo update
helm upgrade --kubeconfig {{ k8s_config_path }}/{{ k8s_type }}.yaml --namespace {{ item.namespace | d('default') }} --create-namespace --install {{ item.name }} {{ item.chart }} {% if item.chart_version is defined %}--version {{ item.chart_version }}{% endif %} {% if item.settings is defined %}{% for setting in item.settings %}--set {{ setting.key }}={{ setting.value }} {% endfor %}{% endif %}
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
loop: "{{ k8s_charts }}"
when:
- item.name is defined
- item.chart is defined
- name: add chart repos
kubernetes.core.helm_repository:
name: "{{ item.repo_name }}"
repo_url: "{{ item.repo_url }}"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
loop: "{{ k8s_charts }}"
when:
- item.repo_name is defined
- item.repo_url is defined
- name: apply helm charts
kubernetes.core.helm:
kubeconfig_path: "{{ k8s_config_path }}/{{ k8s_type }}.yaml"
name: "{{ item.name }}"
namespace: "{{ item.namespace | d('default') }}"
chart_ref: "{{ item.chart_ref | d(item.name) }}"
chart_repo_url: "{{ item.repo_url }}"
chart_version: "{{ item.chart_version }}"
values: "{{ item.chart_values | d({}) }}"
create_namespace: true
update_repo_cache: true
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
loop: "{{ k8s_charts }}"
when:
- item.name is defined
#- name: debug helm command
# debug:
# msg: "helm upgrade --kubeconfig {{ k8s_config_path }}/{{ k8s_type }}.yaml --namespace {{ item.namespace | d('default') }} --create-namespace --install {{ item.name }} {{ item.chart_ref | d(item.name) }}{% if item.chart_version is defined %} --version {{ item.chart_version }}{% endif %}{% if item.settings is defined %}{% for setting in item.settings %} --set {{ setting }}{% endfor %}{% endif %}"
# loop: "{{ k8s_charts }}"
#- name: update helm cache
# ansible.builtin.command:
# cmd: helm repo update
# environment:
# PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
#- name: apply helm charts
# ansible.builtin.shell: |
# helm repo update
# helm upgrade --kubeconfig {{ k8s_config_path }}/{{ k8s_type }}.yaml \
# --namespace {{ item.namespace | d('default') }} --create-namespace \
# --install {{ item.name }} {{ item.chart }} \
# {% if item.chart_version is defined %}--version {{ item.chart_version }}{% endif %} \
# {% if item.settings is defined %}{% for setting in item.settings %}--set {{ setting }} {% endfor %}{% endif %}
# environment:
# PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
# loop: "{{ k8s_charts }}"
# when:
# - item.name is defined
# - item.chart is defined

View File

@ -1,25 +1,66 @@
---
- name: fetch and update kubeconf
run_once: true
tags:
tags:
- k8s
- k8s-get-kubeconf
block:
- name: fetching yq-- check local path
delegate_to: localhost
connection: local
become: false
ansible.builtin.file:
path: "~/.local/bin"
state: directory
mode: "0755"
when:
- k8s_yq_fetch
- name: fetch kubeconfig
ansible.builtin.fetch:
src: "{{ k8s_config_path }}/{{ k8s_type }}.yaml"
dest: "~/.kube/config-{{ k8s_cluster_name }}.yaml"
flat: yes
- name: fetching yq-- set local architecture
delegate_to: localhost
connection: local
become: false
ansible.builtin.set_fact:
k8s_arch: "{{ {'x86_64': 'amd64', 'aarch64': 'arm64', 'armv7l': 'arm', 'ppc64le': 'ppc64le', 's390x': 's390x'}.get(ansible_architecture, 'amd64') }}"
when:
- k8s_yq_fetch
- name: update local kubeconfig
delegate_to: localhost
connection: local
become: false
ansible.builtin.shell: |
yq e '.clusters[].name = "{{ k8s_cluster_name }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '.contexts[].name = "{{ k8s_cluster_context | d(k8s_cluster_name) }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '(.clusters[] | select(.name == "{{ k8s_cluster_name }}")).cluster.server = "https://{{ k8s_cluster_url }}:{{ k8s_api_port }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
yq e '(.contexts[] | select(.name == "{{ k8s_cluster_name }}")).context.cluster = "{{ k8s_cluster_name }}"' -i ~/.kube/config-{{ k8s_cluster_name }}.yaml
when:
- k8s_kubeconfig_update_context
- name: fetching yq-- set command
delegate_to: localhost
connection: local
become: false
ansible.builtin.set_fact:
k8s_yq_bin: "~/.local/bin/yq"
when:
- k8s_yq_fetch
- name: fetching yq-- download binary
delegate_to: localhost
connection: local
become: false
ansible.builtin.get_url:
url: "{{ k8s_yq_url }}"
dest: "~/.local/bin/yq"
mode: "0755"
when:
- k8s_yq_fetch
- name: fetch kubeconfig
ansible.builtin.fetch:
src: "{{ k8s_config_path }}/{{ k8s_type }}.yaml"
dest: "{{ k8s_kubeconfig }}"
flat: yes
- name: update local kubeconfig
delegate_to: localhost
connection: local
become: false
ansible.builtin.shell: |
{{ k8s_yq_bin | d('yq') }} e '.clusters[].name = "{{ k8s_cluster_name }}"' -i {{ k8s_kubeconfig }}
{{ k8s_yq_bin | d('yq') }} e '.contexts[].name = "{{ k8s_cluster_context | d(k8s_cluster_name) }}"' -i {{ k8s_kubeconfig }}
{{ k8s_yq_bin | d('yq') }} e '(.clusters[] | select(.name == "{{ k8s_cluster_name }}")).cluster.server = "https://{{ k8s_cluster_url }}:{{ k8s_api_port }}"' -i {{ k8s_kubeconfig }}
{{ k8s_yq_bin | d('yq') }} e '(.contexts[] | select(.name == "{{ k8s_cluster_name }}")).context.cluster = "{{ k8s_cluster_name }}"' -i {{ k8s_kubeconfig }}
environment:
PATH: "~/.local/bin:{{ ansible_env.PATH }}"
when:
- k8s_kubeconfig_update_context

View File

@ -0,0 +1,161 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ k8s_fb_namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluent-bit
namespace: {{ k8s_fb_namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluent-bit-read
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fluent-bit-read
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fluent-bit-read
subjects:
- kind: ServiceAccount
name: fluent-bit
namespace: {{ k8s_fb_namespace }}
---
apiVersion: v1
kind: Secret
metadata:
name: fluent-bit-auth
namespace: {{ k8s_fb_namespace }}
type: Opaque
data:
username: {{ k8s_fb_log_user }}
password: {{ k8s_fb_log_password }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: {{ k8s_fb_namespace }}
data:
fluent-bit.conf: |
[SERVICE]
Flush 1
Log_Level error
Parsers_File parsers.conf
[INPUT]
Name tail
Path {{ k8s_fb_log_path }}
Parser docker
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.journald
Path {{ k8s_fb_journal_path }}
DB /var/log/flb_journald.db
Read_From_Tail On
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc:443
Merge_Log On
K8S-Logging.Exclude Off
Labels On
Annotations On
[FILTER]
Name modify
Match host.journald
Add log_type host
Add environment production
[OUTPUT]
Name http
Match *
Host {{ k8s_fb_log_host }}
Port {{ k8s_fb_log_port }}
URI /insert/jsonline?_stream_fields=stream&_msg_field=log&_time_field=date
Format json_lines
json_date_format iso8601
HTTP_User ${FLUENTBIT_USER}
HTTP_Passwd ${FLUENTBIT_PASSWORD}
tls On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluent-bit
namespace: {{ k8s_fb_namespace }}
labels:
k8s-app: fluent-bit
spec:
selector:
matchLabels:
k8s-app: fluent-bit
template:
metadata:
labels:
k8s-app: fluent-bit
spec:
serviceAccountName: fluent-bit
containers:
- name: fluent-bit
image: {{ k8s_fb_image }}
volumeMounts:
- name: varlog
mountPath: {{ k8s_fb_log_path }}
- name: journal
mountPath: {{ k8s_fb_journal_path }}
- name: config
mountPath: /fluent-bit/etc/
env:
- name: FLUENTBIT_USER
valueFrom:
secretKeyRef:
name: fluent-bit-auth
key: username
- name: FLUENTBIT_PASSWORD
valueFrom:
secretKeyRef:
name: fluent-bit-auth
key: password
terminationGracePeriodSeconds: 10
volumes:
- name: varlog
hostPath:
path: {{ k8s_fb_log_path }}
- name: journal
hostPath:
path: {{ k8s_fb_journal_path }}
- name: config
configMap:
name: fluent-bit-config

View File

@ -0,0 +1,108 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: {{ k8s_vip_version }}
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-vip-ds
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: {{ k8s_vip_version }}
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- manager
env:
{%- if vip_arp %}
- name: vip_arp
value: "{{ k8s_vip_arp | lower }}"
{%- else if k8s_vip_bgp %}
- name: bgp_enable
value: "{{ k8s_vip_bgp | lower }}"
- name: bgp_routerid
value: {{ k8s_vip_bgp_routerid }}
- name: bgp_as
value: "{{ k8s_vip_bgp_as | d("65000") }}"
- name: bgp_peeraddress
- name: bgp_peerpass
- name: bgp_peeras
value: "{{ k8s_vip_bgp_peer_as | d("65000") }}"
- name: bgp_peers
value: {{ k8s_vip_bgp_peers }}
- name: address
value: {{ k8s_vip_address }}
{%- endif %}
- name: port
value: "{{ k8s_api_port }}"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: {{ k8s_vip_interface }}
- name: vip_cidr
value: "{{ k8s_vip_cidr }}"
- name: dns_mode
value: first
- name: cp_enable
value: "{{ k8s_vip_control_plane | lower }}"
- name: cp_namespace
value: kube-system
- name: svc_enable
value: "{{ k8s_vip_services | lower }}"
- name: svc_leasename
value: plndr-svcs-lock
- name: vip_leaderelection
value: "{{ k8s_vip_leader_election | lower }}"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: {{ k8s_vip_address }}
- name: prometheus_server
value: :2112
image: {{ k8s_vip_image }}:{{ k8s_vip_version }}
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
{%- if k8s_vip_capabilities is defined %}
capabilities:
add:
{%- for capability in k8s_vip_capabilities %}
- {{ capability }}
{%- endfor %}
{%- endif %}
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}

View File

@ -8,14 +8,14 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
rules:
- apiGroups: [""]
resources: ["pods"]
@ -45,7 +45,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -53,7 +53,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
---
apiVersion: rbac.authorization.k8s.io/v1
@ -67,14 +67,14 @@ roleRef:
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
spec:
replicas: 1
selector:
@ -89,7 +89,7 @@ spec:
containers:
- name: local-path-provisioner
image: {{ k8s_local_path_image | default('rancher/local-path-provisioner:master-head') }}
imagePullPolicy: {{ k8s_local_path_image_pull_policy | default('IfNotPresent') }}
imagePullPolicy: {{ k8s_local_path_image_pull_policy | default(k8s_image_pull_policy) }}
command:
- local-path-provisioner
- --debug
@ -125,7 +125,7 @@ kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
namespace: {{ k8s_local_path_namespace | default('local-path-storage') }}
data:
config.json: |-
{
@ -158,4 +158,4 @@ data:
containers:
- name: helper-pod
image: busybox
imagePullPolicy: {{ k8s_local_path_image_pull_policy | default('IfNotPresent') }}
imagePullPolicy: {{ k8s_local_path_image_pull_policy | default(k8s_image_pull_policy) }}