project: fix var-spacing ansible rule (#10266)

* project: fix var-spacing ansible rule

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing on the beginning/end of jinja template

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing of default filter

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing between filter arguments

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix double space at beginning/end of jinja

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix remaining jinja[spacing] ansible-lint warning

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
This commit is contained in:
Arthur Outhenin-Chalandre
2023-07-05 05:36:54 +02:00
committed by GitHub
parent f8b93fa88a
commit 5d00b851ce
178 changed files with 767 additions and 733 deletions

View File

@@ -20,8 +20,8 @@ addusers:
adduser:
name: "{{ user.name }}"
group: "{{ user.name|default(None) }}"
comment: "{{ user.comment|default(None) }}"
shell: "{{ user.shell|default(None) }}"
system: "{{ user.system|default(None) }}"
create_home: "{{ user.create_home|default(None) }}"
group: "{{ user.name | default(None) }}"
comment: "{{ user.comment | default(None) }}"
shell: "{{ user.shell | default(None) }}"
system: "{{ user.system | default(None) }}"
create_home: "{{ user.create_home | default(None) }}"

View File

@@ -1,16 +1,16 @@
---
- name: User | Create User Group
group:
name: "{{ user.group|default(user.name) }}"
system: "{{ user.system|default(omit) }}"
name: "{{ user.group | default(user.name) }}"
system: "{{ user.system | default(omit) }}"
- name: User | Create User
user:
comment: "{{ user.comment|default(omit) }}"
create_home: "{{ user.create_home|default(omit) }}"
group: "{{ user.group|default(user.name) }}"
home: "{{ user.home|default(omit) }}"
shell: "{{ user.shell|default(omit) }}"
comment: "{{ user.comment | default(omit) }}"
create_home: "{{ user.create_home | default(omit) }}"
group: "{{ user.group | default(user.name) }}"
home: "{{ user.home | default(omit) }}"
shell: "{{ user.shell | default(omit) }}"
name: "{{ user.name }}"
system: "{{ user.system|default(omit) }}"
system: "{{ user.system | default(omit) }}"
when: user.name != "root"

View File

@@ -6,7 +6,7 @@
- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
community.general.ini_file:
path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}"
section: main
option: proxy
value: "{{ http_proxy | default(omit) }}"
@@ -23,7 +23,7 @@
dest: /etc/yum.repos.d/public-yum-ol7.repo
mode: 0644
when:
- use_oracle_public_repo|default(true)
- use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) < 7.6
environment: "{{ proxy_env }}"
@@ -40,7 +40,7 @@
- ol7_addons
- ol7_developer_EPEL
when:
- use_oracle_public_repo|default(true)
- use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) < 7.6
@@ -49,7 +49,7 @@
name: "oracle-epel-release-el{{ ansible_distribution_major_version }}"
state: present
when:
- use_oracle_public_repo|default(true)
- use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) >= 7.6
@@ -65,7 +65,7 @@
- { option: "enabled", value: "1" }
- { option: "baseurl", value: "http://yum.oracle.com/repo/OracleLinux/OL{{ ansible_distribution_major_version }}/addons/$basearch/" }
when:
- use_oracle_public_repo|default(true)
- use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) >= 7.6
@@ -80,9 +80,9 @@
- { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
- { option: "enabled", value: "1" }
- { option: "gpgcheck", value: "0" }
- { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version|int > 7 %}os/{% endif %}" }
- { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
when:
- use_oracle_public_repo|default(true)
- use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) >= 7.6
- (ansible_distribution_version | float) < 9
@@ -113,6 +113,6 @@
# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
- name: Install libselinux python package
package:
name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
state: present
become: true

View File

@@ -20,7 +20,7 @@
when: need_bootstrap.rc != 0
- name: Install required packages on fedora coreos
raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages|join(' ') }}"
raw: "export http_proxy={{ http_proxy | default('') }};rpm-ostree install --allow-inactive {{ fedora_coreos_packages | join(' ') }}"
become: true
when: need_bootstrap.rc != 0

View File

@@ -6,7 +6,7 @@
- name: Add proxy to yum.conf or dnf.conf if http_proxy is defined
community.general.ini_file:
path: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf','/etc/dnf/dnf.conf') }}"
path: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('/etc/yum.conf', '/etc/dnf/dnf.conf') }}"
section: main
option: proxy
value: "{{ http_proxy | default(omit) }}"
@@ -57,7 +57,7 @@
sync: true
notify: RHEL auto-attach subscription
become: true
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
when:
- rh_subscription_username is defined
- rh_subscription_status.changed
@@ -108,6 +108,6 @@
# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
- name: Install libselinux python package
package:
name: "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}"
name: "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
state: present
become: true

View File

@@ -89,7 +89,7 @@
name:
- ceph-common
state: present
when: rbd_provisioner_enabled|default(false)
when: rbd_provisioner_enabled | default(false)
- name: Ensure bash_completion.d folder exists
file:

View File

@@ -15,14 +15,14 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family | lower }}.yml"
- defaults.yml
paths:
- ../vars

View File

@@ -36,7 +36,7 @@ containerd_default_base_runtime_spec_patch:
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
containerd_base_runtime_specs:
cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}"
cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch, recursive=1) }}"
containerd_grpc_max_recv_message_size: 16777216
containerd_grpc_max_send_message_size: 16777216

View File

@@ -130,7 +130,7 @@
capabilities = ["pull", "resolve", "push"]
skip_verify = true
with_dict: "{{ containerd_insecure_registries }}"
when: containerd_use_config_path is defined and containerd_use_config_path|bool and containerd_insecure_registries is defined
when: containerd_use_config_path is defined and containerd_use_config_path | bool and containerd_insecure_registries is defined
# you can sometimes end up in a state where everything is installed
# but containerd was not started / enabled

View File

@@ -3,5 +3,5 @@ containerd_repo_info:
repos:
- >
deb {{ containerd_debian_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ ansible_distribution_release | lower }}
{{ containerd_debian_repo_component }}

View File

@@ -3,5 +3,5 @@ containerd_repo_info:
repos:
- >
deb {{ containerd_ubuntu_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ ansible_distribution_release | lower }}
{{ containerd_ubuntu_repo_component }}

View File

@@ -27,7 +27,7 @@ crio_registry_auth: []
# password: pass
crio_seccomp_profile: ""
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing')|lower }}"
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
# Override system default for storage driver

View File

@@ -2,7 +2,7 @@
# TODO(cristicalin): drop this file after 2.21
- name: CRI-O kubic repo name for debian os family
set_fact:
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
when: ansible_os_family == "Debian"
- name: Remove legacy CRI-O kubic apt repo key

View File

@@ -32,7 +32,7 @@
- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
set_fact:
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
when:
- kata_containers_enabled

View File

@@ -1,7 +1,7 @@
---
- name: CRI-O | Kubic repo name for debian os family
set_fact:
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
when: ansible_os_family == "Debian"
tags:
- reset_crio

View File

@@ -22,16 +22,16 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_distribution.split(' ')[0]|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_distribution.split(' ')[0] | lower }}.yml"
- "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_os_family | lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family | lower }}.yml"
- defaults.yml
paths:
- ../vars
@@ -121,7 +121,7 @@
when:
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- not is_ostree
- docker_package_info.pkgs|length > 0
- docker_package_info.pkgs | length > 0
# This is required to ensure any apt upgrade will not break kubernetes
- name: Tell Debian hosts not to change the docker version with apt upgrade

View File

@@ -19,7 +19,7 @@
changed_when: true
delay: 5
ignore_errors: true # noqa ignore-errors
when: docker_packages_list|length>0
when: docker_packages_list | length>0
- name: reset | remove all containers
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
@@ -29,7 +29,7 @@
retries: 4
until: remove_all_containers.rc == 0
delay: 5
when: docker_packages_list|length>0
when: docker_packages_list | length>0
- name: Docker | Stop docker service
service:
@@ -40,7 +40,7 @@
- docker
- docker.socket
- containerd
when: docker_packages_list|length>0
when: docker_packages_list | length>0
- name: Docker | Remove dpkg hold
dpkg_selections:
@@ -63,7 +63,7 @@
when:
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- not is_ostree
- docker_packages_list|length > 0
- docker_packages_list | length > 0
- name: Docker | ensure docker-ce repository is removed
apt_repository:

View File

@@ -10,12 +10,12 @@
- name: add upstream dns servers
set_fact:
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
when: dns_mode in ['coredns', 'coredns_dual']
- name: add global searchdomains
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
- name: check system nameservers
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
@@ -42,25 +42,25 @@
- name: add system search domains to docker options
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}"
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
when: system_search_domains.stdout
- name: check number of nameservers
fail:
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
- name: rtrim number of nameservers to 3
set_fact:
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool
when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
- name: check number of search domains
fail:
msg: "Too many search domains"
when: docker_dns_search_domains|length > 6
when: docker_dns_search_domains | length > 6
- name: check length of search domains
fail:
msg: "Search domains exceeded limit of 256 characters"
when: docker_dns_search_domains|join(' ')|length > 256
when: docker_dns_search_domains | join(' ') | length > 256

View File

@@ -17,17 +17,17 @@ containerd_versioned_pkg:
# https://download.docker.com/linux/debian/
docker_versioned_pkg:
'latest': docker-ce
'23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release|lower }}
'23.0': docker-ce=5:23.0.6-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'24.0': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'stable': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
'edge': docker-ce=5:24.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }}
docker_package_info:
pkgs:
@@ -44,5 +44,5 @@ docker_repo_info:
repos:
- >
deb {{ docker_debian_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ ansible_distribution_release | lower }}
stable

View File

@@ -16,19 +16,19 @@ containerd_versioned_pkg:
# https://download.docker.com/linux/debian/
docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
'20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'18.09': docker-ce=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }}
'19.03': docker-ce=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }}
'20.10': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
'stable': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
'edge': docker-ce=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release|lower }}
'20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release|lower }}
'18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release | lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~debian-{{ ansible_distribution_release | lower }}
'20.10': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
'stable': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
'edge': docker-ce-cli=5:20.10.20~3-0~debian-{{ ansible_distribution_release | lower }}
docker_package_info:
pkgs:
@@ -45,5 +45,5 @@ docker_repo_info:
repos:
- >
deb {{ docker_debian_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ ansible_distribution_release | lower }}
stable

View File

@@ -16,19 +16,19 @@ containerd_versioned_pkg:
# https://download.docker.com/linux/ubuntu/
docker_versioned_pkg:
'latest': docker-ce
'18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'18.09': docker-ce=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'19.03': docker-ce=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'20.10': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'stable': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'edge': docker-ce=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'19.03': docker-ce-cli=5:19.03.15~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'20.10': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'stable': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
'edge': docker-ce-cli=5:20.10.20~3-0~ubuntu-{{ ansible_distribution_release | lower }}
docker_package_info:
pkgs:
@@ -45,5 +45,5 @@ docker_repo_info:
repos:
- >
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
{{ ansible_distribution_release|lower }}
{{ ansible_distribution_release | lower }}
stable

View File

@@ -70,10 +70,10 @@ image_pull_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localh
image_info_command_on_localhost: "{{ lookup('vars', image_command_tool_on_localhost + '_image_info_command') }}"
# Arch of Docker images and needed packages
image_arch: "{{host_architecture | default('amd64')}}"
image_arch: "{{ host_architecture | default('amd64') }}"
# Nerdctl insecure flag set
nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 -%}--insecure-registry{%- else -%}{%- endif -%}'
nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and containerd_insecure_registries | length > 0 -%}--insecure-registry{%- else -%}{%- endif -%}'
# Versions
kubeadm_version: "{{ kube_version }}"
@@ -277,10 +277,10 @@ haproxy_image_tag: 2.6.6-alpine
# Coredns version should be supported by corefile-migration (or at least work with)
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0','>=')) else 'v1.9.3' }}"
coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}"
coredns_version: "{{ 'v1.10.1' if (kube_version is version('v1.27.0', '>=')) else 'v1.9.3' }}"
coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1', '>=')) }}"
coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
coredns_image_repo: "{{ kube_image_repo }}{{ '/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
coredns_image_tag: "{{ coredns_version if (coredns_image_is_namespaced | bool) else (coredns_version | regex_replace('^v', '')) }}"
nodelocaldns_version: "1.22.20"
@@ -389,7 +389,7 @@ downloads:
container: true
repo: "{{ netcheck_server_image_repo }}"
tag: "{{ netcheck_server_image_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
sha256: "{{ netcheck_server_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -398,7 +398,7 @@ downloads:
container: true
repo: "{{ netcheck_agent_image_repo }}"
tag: "{{ netcheck_agent_image_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
sha256: "{{ netcheck_agent_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -412,7 +412,7 @@ downloads:
tag: "{{ etcd_image_tag }}"
sha256: >-
{{ etcd_binary_checksum if (etcd_deployment_type == 'host')
else etcd_digest_checksum|d(None) }}
else etcd_digest_checksum | d(None) }}
url: "{{ etcd_download_url }}"
unarchive: "{{ etcd_deployment_type == 'host' }}"
owner: "root"
@@ -635,7 +635,7 @@ downloads:
container: true
repo: "{{ cilium_image_repo }}"
tag: "{{ cilium_image_tag }}"
sha256: "{{ cilium_digest_checksum|default(None) }}"
sha256: "{{ cilium_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -644,7 +644,7 @@ downloads:
container: true
repo: "{{ cilium_operator_image_repo }}"
tag: "{{ cilium_operator_image_tag }}"
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
sha256: "{{ cilium_operator_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -653,7 +653,7 @@ downloads:
container: true
repo: "{{ cilium_hubble_relay_image_repo }}"
tag: "{{ cilium_hubble_relay_image_tag }}"
sha256: "{{ cilium_hubble_relay_digest_checksum|default(None) }}"
sha256: "{{ cilium_hubble_relay_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -662,7 +662,7 @@ downloads:
container: true
repo: "{{ cilium_hubble_certgen_image_repo }}"
tag: "{{ cilium_hubble_certgen_image_tag }}"
sha256: "{{ cilium_hubble_certgen_digest_checksum|default(None) }}"
sha256: "{{ cilium_hubble_certgen_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -671,7 +671,7 @@ downloads:
container: true
repo: "{{ cilium_hubble_ui_image_repo }}"
tag: "{{ cilium_hubble_ui_image_tag }}"
sha256: "{{ cilium_hubble_ui_digest_checksum|default(None) }}"
sha256: "{{ cilium_hubble_ui_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -680,7 +680,7 @@ downloads:
container: true
repo: "{{ cilium_hubble_ui_backend_image_repo }}"
tag: "{{ cilium_hubble_ui_backend_image_tag }}"
sha256: "{{ cilium_hubble_ui_backend_digest_checksum|default(None) }}"
sha256: "{{ cilium_hubble_ui_backend_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -689,7 +689,7 @@ downloads:
container: true
repo: "{{ cilium_hubble_envoy_image_repo }}"
tag: "{{ cilium_hubble_envoy_image_tag }}"
sha256: "{{ cilium_hubble_envoy_digest_checksum|default(None) }}"
sha256: "{{ cilium_hubble_envoy_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -711,7 +711,7 @@ downloads:
container: true
repo: "{{ multus_image_repo }}"
tag: "{{ multus_image_tag }}"
sha256: "{{ multus_digest_checksum|default(None) }}"
sha256: "{{ multus_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -720,7 +720,7 @@ downloads:
container: true
repo: "{{ flannel_image_repo }}"
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}"
sha256: "{{ flannel_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -729,7 +729,7 @@ downloads:
container: true
repo: "{{ flannel_init_image_repo }}"
tag: "{{ flannel_init_image_tag }}"
sha256: "{{ flannel_init_digest_checksum|default(None) }}"
sha256: "{{ flannel_init_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -753,7 +753,7 @@ downloads:
container: true
repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}"
sha256: "{{ calico_node_digest_checksum|default(None) }}"
sha256: "{{ calico_node_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -762,7 +762,7 @@ downloads:
container: true
repo: "{{ calico_cni_image_repo }}"
tag: "{{ calico_cni_image_tag }}"
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
sha256: "{{ calico_cni_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -771,7 +771,7 @@ downloads:
container: true
repo: "{{ calico_flexvol_image_repo }}"
tag: "{{ calico_flexvol_image_tag }}"
sha256: "{{ calico_flexvol_digest_checksum|default(None) }}"
sha256: "{{ calico_flexvol_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -780,7 +780,7 @@ downloads:
container: true
repo: "{{ calico_policy_image_repo }}"
tag: "{{ calico_policy_image_tag }}"
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
sha256: "{{ calico_policy_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -789,7 +789,7 @@ downloads:
container: true
repo: "{{ calico_typha_image_repo }}"
tag: "{{ calico_typha_image_tag }}"
sha256: "{{ calico_typha_digest_checksum|default(None) }}"
sha256: "{{ calico_typha_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -798,7 +798,7 @@ downloads:
container: true
repo: "{{ calico_apiserver_image_repo }}"
tag: "{{ calico_apiserver_image_tag }}"
sha256: "{{ calico_apiserver_digest_checksum|default(None) }}"
sha256: "{{ calico_apiserver_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -811,9 +811,9 @@ downloads:
url: "{{ calico_crds_download_url }}"
unarchive: true
unarchive_extra_opts:
- "{{ '--strip=6' if (calico_version is version('v3.22.3','<')) else '--strip=3' }}"
- "{{ '--strip=6' if (calico_version is version('v3.22.3', '<')) else '--strip=3' }}"
- "--wildcards"
- "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3','<')) else '*/libcalico-go/config/crd/' }}"
- "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3', '<')) else '*/libcalico-go/config/crd/' }}"
owner: "root"
mode: "0755"
groups:
@@ -824,7 +824,7 @@ downloads:
container: true
repo: "{{ weave_kube_image_repo }}"
tag: "{{ weave_kube_image_tag }}"
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
sha256: "{{ weave_kube_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -833,7 +833,7 @@ downloads:
container: true
repo: "{{ weave_npc_image_repo }}"
tag: "{{ weave_npc_image_tag }}"
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
sha256: "{{ weave_npc_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -842,7 +842,7 @@ downloads:
container: true
repo: "{{ kube_ovn_container_image_repo }}"
tag: "{{ kube_ovn_container_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
sha256: "{{ kube_ovn_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -851,7 +851,7 @@ downloads:
container: true
repo: "{{ kube_router_image_repo }}"
tag: "{{ kube_router_image_tag }}"
sha256: "{{ kube_router_digest_checksum|default(None) }}"
sha256: "{{ kube_router_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -860,7 +860,7 @@ downloads:
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
sha256: "{{ pod_infra_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -869,7 +869,7 @@ downloads:
container: true
repo: "{{ kube_vip_image_repo }}"
tag: "{{ kube_vip_image_tag }}"
sha256: "{{ kube_vip_digest_checksum|default(None) }}"
sha256: "{{ kube_vip_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -878,7 +878,7 @@ downloads:
container: true
repo: "{{ nginx_image_repo }}"
tag: "{{ nginx_image_tag }}"
sha256: "{{ nginx_digest_checksum|default(None) }}"
sha256: "{{ nginx_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -887,7 +887,7 @@ downloads:
container: true
repo: "{{ haproxy_image_repo }}"
tag: "{{ haproxy_image_tag }}"
sha256: "{{ haproxy_digest_checksum|default(None) }}"
sha256: "{{ haproxy_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -896,7 +896,7 @@ downloads:
container: true
repo: "{{ coredns_image_repo }}"
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
sha256: "{{ coredns_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -905,7 +905,7 @@ downloads:
container: true
repo: "{{ nodelocaldns_image_repo }}"
tag: "{{ nodelocaldns_image_tag }}"
sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
sha256: "{{ nodelocaldns_digest_checksum | default(None) }}"
groups:
- k8s_cluster
@@ -914,7 +914,7 @@ downloads:
container: true
repo: "{{ dnsautoscaler_image_repo }}"
tag: "{{ dnsautoscaler_image_tag }}"
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
sha256: "{{ dnsautoscaler_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -949,7 +949,7 @@ downloads:
container: true
repo: "{{ registry_image_repo }}"
tag: "{{ registry_image_tag }}"
sha256: "{{ registry_digest_checksum|default(None) }}"
sha256: "{{ registry_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -958,7 +958,7 @@ downloads:
container: true
repo: "{{ metrics_server_image_repo }}"
tag: "{{ metrics_server_image_tag }}"
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
sha256: "{{ metrics_server_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -967,7 +967,7 @@ downloads:
container: true
repo: "{{ local_volume_provisioner_image_repo }}"
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
sha256: "{{ local_volume_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -976,7 +976,7 @@ downloads:
container: true
repo: "{{ cephfs_provisioner_image_repo }}"
tag: "{{ cephfs_provisioner_image_tag }}"
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
sha256: "{{ cephfs_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -985,7 +985,7 @@ downloads:
container: true
repo: "{{ rbd_provisioner_image_repo }}"
tag: "{{ rbd_provisioner_image_tag }}"
sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
sha256: "{{ rbd_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -994,7 +994,7 @@ downloads:
container: true
repo: "{{ local_path_provisioner_image_repo }}"
tag: "{{ local_path_provisioner_image_tag }}"
sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
sha256: "{{ local_path_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1003,7 +1003,7 @@ downloads:
container: true
repo: "{{ ingress_nginx_controller_image_repo }}"
tag: "{{ ingress_nginx_controller_image_tag }}"
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
sha256: "{{ ingress_nginx_controller_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1012,7 +1012,7 @@ downloads:
container: true
repo: "{{ alb_ingress_image_repo }}"
tag: "{{ alb_ingress_image_tag }}"
sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}"
sha256: "{{ ingress_alb_controller_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1021,7 +1021,7 @@ downloads:
container: true
repo: "{{ cert_manager_controller_image_repo }}"
tag: "{{ cert_manager_controller_image_tag }}"
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
sha256: "{{ cert_manager_controller_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1030,7 +1030,7 @@ downloads:
container: true
repo: "{{ cert_manager_cainjector_image_repo }}"
tag: "{{ cert_manager_cainjector_image_tag }}"
sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}"
sha256: "{{ cert_manager_cainjector_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1039,7 +1039,7 @@ downloads:
container: true
repo: "{{ cert_manager_webhook_image_repo }}"
tag: "{{ cert_manager_webhook_image_tag }}"
sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}"
sha256: "{{ cert_manager_webhook_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1048,7 +1048,7 @@ downloads:
container: true
repo: "{{ csi_attacher_image_repo }}"
tag: "{{ csi_attacher_image_tag }}"
sha256: "{{ csi_attacher_digest_checksum|default(None) }}"
sha256: "{{ csi_attacher_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1057,7 +1057,7 @@ downloads:
container: true
repo: "{{ csi_provisioner_image_repo }}"
tag: "{{ csi_provisioner_image_tag }}"
sha256: "{{ csi_provisioner_digest_checksum|default(None) }}"
sha256: "{{ csi_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1066,7 +1066,7 @@ downloads:
container: true
repo: "{{ csi_snapshotter_image_repo }}"
tag: "{{ csi_snapshotter_image_tag }}"
sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}"
sha256: "{{ csi_snapshotter_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1075,7 +1075,7 @@ downloads:
container: true
repo: "{{ snapshot_controller_image_repo }}"
tag: "{{ snapshot_controller_image_tag }}"
sha256: "{{ snapshot_controller_digest_checksum|default(None) }}"
sha256: "{{ snapshot_controller_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1084,7 +1084,7 @@ downloads:
container: true
repo: "{{ csi_resizer_image_repo }}"
tag: "{{ csi_resizer_image_tag }}"
sha256: "{{ csi_resizer_digest_checksum|default(None) }}"
sha256: "{{ csi_resizer_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1093,7 +1093,7 @@ downloads:
container: true
repo: "{{ csi_node_driver_registrar_image_repo }}"
tag: "{{ csi_node_driver_registrar_image_tag }}"
sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}"
sha256: "{{ csi_node_driver_registrar_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1102,7 +1102,7 @@ downloads:
container: true
repo: "{{ cinder_csi_plugin_image_repo }}"
tag: "{{ cinder_csi_plugin_image_tag }}"
sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}"
sha256: "{{ cinder_csi_plugin_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1111,7 +1111,7 @@ downloads:
container: true
repo: "{{ aws_ebs_csi_plugin_image_repo }}"
tag: "{{ aws_ebs_csi_plugin_image_tag }}"
sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}"
sha256: "{{ aws_ebs_csi_plugin_digest_checksum | default(None) }}"
groups:
- kube_node
@@ -1120,7 +1120,7 @@ downloads:
container: true
repo: "{{ dashboard_image_repo }}"
tag: "{{ dashboard_image_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
sha256: "{{ dashboard_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -1129,7 +1129,7 @@ downloads:
container: true
repo: "{{ dashboard_metrics_scraper_repo }}"
tag: "{{ dashboard_metrics_scraper_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
sha256: "{{ dashboard_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -1138,7 +1138,7 @@ downloads:
container: true
repo: "{{ metallb_speaker_image_repo }}"
tag: "{{ metallb_version }}"
sha256: "{{ metallb_speaker_digest_checksum|default(None) }}"
sha256: "{{ metallb_speaker_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -1147,7 +1147,7 @@ downloads:
container: true
repo: "{{ metallb_controller_image_repo }}"
tag: "{{ metallb_version }}"
sha256: "{{ metallb_controller_digest_checksum|default(None) }}"
sha256: "{{ metallb_controller_digest_checksum | default(None) }}"
groups:
- kube_control_plane
@@ -1156,7 +1156,7 @@ downloads:
file: true
version: "{{ yq_version }}"
dest: "{{ local_release_dir }}/yq-{{ yq_version }}-{{ image_arch }}"
sha256: "{{ yq_binary_checksum|default(None) }}"
sha256: "{{ yq_binary_checksum | default(None) }}"
url: "{{ yq_download_url }}"
unarchive: false
owner: "root"

View File

@@ -11,7 +11,7 @@
- name: check_pull_required | Set pull_required if the desired image is not yet loaded
set_fact:
pull_required: >-
{%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
{%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
when: not download_always_pull
- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag

View File

@@ -68,7 +68,7 @@
retries: "{{ download_retries }}"
delay: "{{ retry_stagger | default(5) }}"
environment: "{{ proxy_env }}"
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
loop: "{{ download.mirrors | default([download.url]) }}"
loop_control:
loop_var: mirror
@@ -102,7 +102,7 @@
retries: "{{ download_retries }}"
delay: "{{ retry_stagger | default(5) }}"
environment: "{{ proxy_env }}"
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
- name: download_file | Copy file back to ansible host file cache
ansible.posix.synchronize:

View File

@@ -6,6 +6,6 @@
owner: "{{ download.owner | default(omit) }}"
mode: "{{ download.mode | default(omit) }}"
copy: no
extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}"
extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}"
when:
- download.unarchive | default(false)

View File

@@ -2,7 +2,7 @@
- name: download | Prepare working directories and variables
import_tasks: prep_download.yml
when:
- not skip_downloads|default(false)
- not skip_downloads | default(false)
tags:
- download
- upload
@@ -10,7 +10,7 @@
- name: download | Get kubeadm binary and list of required images
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads|default(false)
- not skip_downloads | default(false)
- inventory_hostname in groups['kube_control_plane']
tags:
- download

View File

@@ -58,7 +58,7 @@
- name: prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
register: docker_images
failed_when: false
changed_when: false

View File

@@ -20,7 +20,7 @@
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
mode: 0644
when:
- not skip_kubeadm_images|default(false)
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path
copy:
@@ -36,36 +36,36 @@
state: file
- name: prep_kubeadm_images | Generate list of required images
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns | pause'"
args:
executable: /bin/bash
register: kubeadm_images_raw
run_once: true
changed_when: false
when:
- not skip_kubeadm_images|default(false)
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Parse list of images
vars:
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
set_fact:
kubeadm_image:
key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*','')).split(':')[0] }}"
key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
value:
enabled: true
container: true
repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
groups: k8s_cluster
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
register: kubeadm_images_cooked
run_once: true
when:
- not skip_kubeadm_images|default(false)
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Convert list of images to dict for later use
set_fact:
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
run_once: true
when:
- not skip_kubeadm_images|default(false)
- not skip_kubeadm_images | default(false)

View File

@@ -21,5 +21,5 @@ etcd:
{% endif %}
dns:
type: CoreDNS
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
imageTag: {{ coredns_image_tag }}

View File

@@ -42,7 +42,7 @@
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)"
set_fact:
gen_certs: true
when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
when: force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list
run_once: true
with_items: "{{ expected_files }}"
vars:
@@ -59,7 +59,7 @@
{% for host in k8s_nodes %}
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% if not loop.last %}{{ ',' }}{% endif %}
{% endfor %}]
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)"
@@ -77,28 +77,29 @@
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
{% endfor %}
{% set k8s_nodes = groups['k8s_cluster']|unique|sort %}
{% set k8s_nodes = groups['k8s_cluster'] | unique | sort %}
{% for host in k8s_nodes %}
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% if not loop.last %}{{ ',' }}{% endif %}
{% endfor %}]
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
- force_etcd_cert_refresh or not item in etcdcert_master.files | map(attribute='path') | list
- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
set_fact:
# noqa: jinja[spacing]
gen_master_certs: |-
{
{% set etcd_members = groups['etcd'] -%}
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
{% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %}
{% for host in etcd_members -%}
{% set member_cert = "%s/member-%s.pem"|format(etcd_cert_dir, host) %}
{% set member_key = "%s/member-%s-key.pem"|format(etcd_cert_dir, host) %}
{% set admin_cert = "%s/admin-%s.pem"|format(etcd_cert_dir, host) %}
{% set admin_key = "%s/admin-%s-key.pem"|format(etcd_cert_dir, host) %}
{% set member_cert = "%s/member-%s.pem" | format(etcd_cert_dir, host) %}
{% set member_key = "%s/member-%s-key.pem" | format(etcd_cert_dir, host) %}
{% set admin_cert = "%s/admin-%s.pem" | format(etcd_cert_dir, host) %}
{% set admin_key = "%s/admin-%s-key.pem" | format(etcd_cert_dir, host) %}
{% if force_etcd_cert_refresh -%}
"{{ host }}": True,
{% elif member_cert in existing_certs and member_key in existing_certs and admin_cert in existing_certs and admin_key in existing_certs -%}
@@ -112,13 +113,14 @@
- name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node"
set_fact:
# noqa: jinja[spacing]
gen_node_certs: |-
{
{% set k8s_nodes = groups['k8s_cluster'] -%}
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
{% set existing_certs = etcdcert_master.files | map(attribute='path') | list | sort %}
{% for host in k8s_nodes -%}
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
{% set host_key = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %}
{% set host_cert = "%s/node-%s.pem" | format(etcd_cert_dir, host) %}
{% set host_key = "%s/node-%s-key.pem" | format(etcd_cert_dir, host) %}
{% if force_etcd_cert_refresh -%}
"{{ host }}": True,
{% elif host_cert in existing_certs and host_key in existing_certs -%}
@@ -135,16 +137,16 @@
etcd_member_requires_sync: true
when:
- inventory_hostname in groups['etcd']
- (not etcd_member_certs.results[0].stat.exists|default(false)) or
(not etcd_member_certs.results[1].stat.exists|default(false)) or
(not etcd_member_certs.results[2].stat.exists|default(false)) or
(not etcd_member_certs.results[3].stat.exists|default(false)) or
(not etcd_member_certs.results[4].stat.exists|default(false)) or
(etcd_member_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_member_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_member_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[2].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_member_certs.results[3].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[3].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_member_certs.results[4].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_member_certs.results[4].stat.path)|map(attribute="checksum")|first|default(''))
- (not etcd_member_certs.results[0].stat.exists | default(false)) or
(not etcd_member_certs.results[1].stat.exists | default(false)) or
(not etcd_member_certs.results[2].stat.exists | default(false)) or
(not etcd_member_certs.results[3].stat.exists | default(false)) or
(not etcd_member_certs.results[4].stat.exists | default(false)) or
(etcd_member_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_member_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_member_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[2].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_member_certs.results[3].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[3].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_member_certs.results[4].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_member_certs.results[4].stat.path) | map(attribute="checksum") | first | default(''))
- name: "Check_certs | Set 'kubernetes_host_requires_sync' to true if ca or node cert and key don't exist on kubernetes host or checksum doesn't match"
set_fact:
@@ -152,18 +154,18 @@
when:
- inventory_hostname in groups['k8s_cluster'] and
inventory_hostname not in groups['etcd']
- (not etcd_node_certs.results[0].stat.exists|default(false)) or
(not etcd_node_certs.results[1].stat.exists|default(false)) or
(not etcd_node_certs.results[2].stat.exists|default(false)) or
(etcd_node_certs.results[0].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[0].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_node_certs.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[1].stat.path)|map(attribute="checksum")|first|default('')) or
(etcd_node_certs.results[2].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcd_node_certs.results[2].stat.path)|map(attribute="checksum")|first|default(''))
- (not etcd_node_certs.results[0].stat.exists | default(false)) or
(not etcd_node_certs.results[1].stat.exists | default(false)) or
(not etcd_node_certs.results[2].stat.exists | default(false)) or
(etcd_node_certs.results[0].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[0].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_node_certs.results[1].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[1].stat.path) | map(attribute="checksum") | first | default('')) or
(etcd_node_certs.results[2].stat.checksum | default('') != etcdcert_master.files | selectattr("path", "equalto", etcd_node_certs.results[2].stat.path) | map(attribute="checksum") | first | default(''))
- name: "Check_certs | Set 'sync_certs' to true"
set_fact:
sync_certs: true
when:
- etcd_member_requires_sync|default(false) or
kubernetes_host_requires_sync|default(false) or
- etcd_member_requires_sync | default(false) or
kubernetes_host_requires_sync | default(false) or
(inventory_hostname in gen_master_certs and gen_master_certs[inventory_hostname]) or
(inventory_hostname in gen_node_certs and gen_node_certs[inventory_hostname])

View File

@@ -25,7 +25,7 @@
run_once: yes
delegate_to: "{{ groups['etcd'][0] }}"
when:
- gen_certs|default(false)
- gen_certs | default(false)
- inventory_hostname == groups['etcd'][0]
- name: Gen_certs | copy certs generation script
@@ -35,7 +35,7 @@
mode: 0700
run_once: yes
when:
- gen_certs|default(false)
- gen_certs | default(false)
- inventory_hostname == groups['etcd'][0]
- name: Gen_certs | run cert generation script for etcd and kube control plane nodes
@@ -55,7 +55,7 @@
{% endfor %}
run_once: yes
delegate_to: "{{ groups['etcd'][0] }}"
when: gen_certs|default(false)
when: gen_certs | default(false)
notify: set etcd_secret_changed
- name: Gen_certs | run cert generation script for all clients
@@ -72,7 +72,7 @@
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- gen_certs|default(false)
- gen_certs | default(false)
notify: set etcd_secret_changed
- name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
@@ -95,7 +95,7 @@
delegate_to: "{{ groups['etcd'][0] }}"
when:
- inventory_hostname in groups['etcd']
- sync_certs|default(false)
- sync_certs | default(false)
- inventory_hostname != groups['etcd'][0]
notify: set etcd_secret_changed
@@ -109,7 +109,7 @@
with_items: "{{ etcd_master_certs.results }}"
when:
- inventory_hostname in groups['etcd']
- sync_certs|default(false)
- sync_certs | default(false)
- inventory_hostname != groups['etcd'][0]
loop_control:
label: "{{ item.item }}"
@@ -150,14 +150,14 @@
- include_tasks: gen_nodes_certs_script.yml
when:
- inventory_hostname in groups['kube_control_plane'] and
sync_certs|default(false) and inventory_hostname not in groups['etcd']
sync_certs | default(false) and inventory_hostname not in groups['etcd']
- include_tasks: gen_nodes_certs_script.yml
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster'] and
sync_certs|default(false) and inventory_hostname not in groups['etcd']
sync_certs | default(false) and inventory_hostname not in groups['etcd']
- name: Gen_certs | check certificate permissions
file:

View File

@@ -14,18 +14,18 @@
- "{{ my_etcd_node_certs }}"
- name: Gen_certs | Gather node certs
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs | join(' ') }} | base64 --wrap=0"
args:
executable: /bin/bash
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
register: etcd_node_certs
check_mode: no
delegate_to: "{{ groups['etcd'][0] }}"
changed_when: false
- name: Gen_certs | Copy certs on nodes
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout | quote }}' | tar xz -C {{ etcd_cert_dir }}"
args:
executable: /bin/bash
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
changed_when: false

View File

@@ -17,14 +17,14 @@
notify: restart etcd
when:
- etcd_cluster_setup
- etcd_image_tag not in etcd_current_docker_image.stdout|default('')
- etcd_image_tag not in etcd_current_docker_image.stdout | default('')
- name: Restart etcd-events if necessary
command: /bin/true
notify: restart etcd-events
when:
- etcd_events_cluster_setup
- etcd_image_tag not in etcd_events_current_docker_image.stdout|default('')
- etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')
- name: Install etcd launch script
template:

View File

@@ -11,14 +11,14 @@
notify: restart etcd
when:
- etcd_cluster_setup
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('')
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
- name: Restart etcd-events if necessary
command: /bin/true
notify: restart etcd-events
when:
- etcd_events_cluster_setup
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('')
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
- name: install | Download etcd and etcdctl
include_tasks: "../../download/tasks/download_file.yml"

View File

@@ -14,10 +14,11 @@
- include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_events_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382,
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2382,
{%- endif -%}
{%- if loop.last -%}
{{ etcd_member_name }}={{ etcd_events_peer_url }}

View File

@@ -15,10 +15,11 @@
- include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380,
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(fallback_ips[host])) }}:2380,
{%- endif -%}
{%- if loop.last -%}
{{ etcd_member_name }}={{ etcd_peer_url }}

View File

@@ -7,13 +7,13 @@
- include_tasks: "gen_certs_script.yml"
when:
- cert_management |d('script') == "script"
- cert_management | d('script') == "script"
tags:
- etcd-secrets
- include_tasks: upd_ca_trust.yml
when:
- inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
tags:
- etcd-secrets
@@ -63,12 +63,12 @@
- name: Restart etcd if certs changed
command: /bin/true
notify: restart etcd
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false)
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
- name: Restart etcd-events if certs changed
command: /bin/true
notify: restart etcd
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
# After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing`

View File

@@ -6,10 +6,10 @@
dns_memory_limit: 300Mi
dns_cpu_requests: 100m
dns_memory_requests: 70Mi
dns_min_replicas: "{{ [ 2, groups['k8s_cluster'] | length ] | min }}"
dns_min_replicas: "{{ [2, groups['k8s_cluster'] | length] | min }}"
dns_nodes_per_replica: 16
dns_cores_per_replica: 256
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas|int > 1 else 'false' }}"
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}"
enable_coredns_reverse_dns_lookups: true
coredns_ordinal_suffix: ""
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]

View File

@@ -1,6 +1,7 @@
---
- name: Kubernetes Apps | set up necessary nodelocaldns parameters
set_fact:
# noqa: jinja[spacing]
primaryClusterIP: >-
{%- if dns_mode in ['coredns', 'coredns_dual'] -%}
{{ skydns_server }}
@@ -26,6 +27,7 @@
- { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset }
register: nodelocaldns_manifests
vars:
# noqa: jinja[spacing]
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
@@ -33,8 +35,8 @@
{{ primaryClusterIP }}
{%- endif -%}
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%}
{{ upstream_dns_servers | join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}
@@ -54,15 +56,17 @@
- { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset }
register: nodelocaldns_second_manifests
vars:
# noqa: jinja[spacing]
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
# noqa: jinja[spacing]
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers|length > 0 -%}
{{ upstream_dns_servers|join(' ') }}
{%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%}
{{ upstream_dns_servers | join(' ') }}
{%- else -%}
/etc/resolv.conf
{%- endif -%}

View File

@@ -8,12 +8,12 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
{% if coredns_external_zones is defined and coredns_external_zones|length > 0 %}
{% if coredns_external_zones is defined and coredns_external_zones | length > 0 %}
{% for block in coredns_external_zones %}
{{ block['zones'] | join(' ') }} {
log
errors
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
@@ -57,7 +57,7 @@ data:
{% endif %}
}
prometheus :9153
forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} {
forward . {{ upstream_dns_servers | join(' ') if upstream_dns_servers is defined and upstream_dns_servers | length > 0 else '/etc/resolv.conf' }} {
prefer_udp
max_concurrent 1000
{% if dns_upstream_forward_extra_opts is defined %}

View File

@@ -32,7 +32,7 @@ spec:
annotations:
spec:
nodeSelector:
{{ dns_autoscaler_deployment_nodeselector}}
{{ dns_autoscaler_deployment_nodeselector }}
priorityClassName: system-cluster-critical
securityContext:
seccompProfile:

View File

@@ -15,7 +15,7 @@ spec:
labels:
app: netchecker-agent
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
tolerations:
- effect: NoSchedule
operator: Exists

View File

@@ -19,7 +19,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
tolerations:
- effect: NoSchedule
operator: Exists

View File

@@ -16,7 +16,7 @@ spec:
labels:
app: netchecker-server
spec:
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
volumes:
- name: etcd-data
emptyDir: {}

View File

@@ -8,13 +8,13 @@ metadata:
data:
Corefile: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors
cache {{ block['cache'] | default(30) }}
reload
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
{% if block['rewrite'] is defined and block['rewrite'] | length > 0 %}
{% for rewrite_match in block['rewrite'] %}
rewrite {{ rewrite_match }}
{% endfor %}
@@ -95,7 +95,7 @@ data:
}
{% if enable_nodelocaldns_secondary %}
Corefile-second: |
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones|length > 0 %}
{% if nodelocaldns_external_zones is defined and nodelocaldns_external_zones | length > 0 %}
{% for block in nodelocaldns_external_zones %}
{{ block['zones'] | join(' ') }} {
errors

View File

@@ -63,7 +63,7 @@ loadBalancer:
# inbound traffic to load balancers.
securityListManagementMode: {{ oci_security_list_management }}
{% if oci_security_lists is defined and oci_security_lists|length > 0 %}
{% if oci_security_lists is defined and oci_security_lists | length > 0 %}
# Optional specification of which security lists to modify per subnet. This does not apply if security list management is off.
securityLists:
{% for subnet_ocid, list_ocid in oci_security_lists.items() %}
@@ -71,7 +71,7 @@ loadBalancer:
{% endfor %}
{% endif %}
{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %}
{% if oci_rate_limit is defined and oci_rate_limit | length > 0 %}
# Optional rate limit controls for accessing OCI API
rateLimiter:
{% if oci_rate_limit.rate_limit_qps_read %}

View File

@@ -30,7 +30,7 @@ spec:
spec:
{% if oci_cloud_controller_pull_secret is defined %}
imagePullSecrets:
- name: {{oci_cloud_controller_pull_secret}}
- name: {{ oci_cloud_controller_pull_secret }}
{% endif %}
serviceAccountName: cloud-controller-manager
hostNetwork: true
@@ -56,7 +56,7 @@ spec:
path: /etc/kubernetes
containers:
- name: oci-cloud-controller-manager
image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}}
image: {{ oci_cloud_controller_pull_source }}:{{ oci_cloud_controller_version }}
command: ["/usr/local/bin/oci-cloud-controller-manager"]
args:
- --cloud-config=/etc/oci/cloud-provider.yaml

View File

@@ -70,7 +70,7 @@
src: k8s-cluster-critical-pc.yml
dest: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
mode: 0640
when: inventory_hostname == groups['kube_control_plane']|last
when: inventory_hostname == groups['kube_control_plane'] | last
- name: PriorityClass | Create k8s-cluster-critical
kube:
@@ -79,4 +79,4 @@
resource: "PriorityClass"
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
state: latest
when: inventory_hostname == groups['kube_control_plane']|last
when: inventory_hostname == groups['kube_control_plane'] | last

View File

@@ -1,25 +1,25 @@
---
- name: Container Engine Acceleration Nvidia GPU| gather os specific variables
- name: Container Engine Acceleration Nvidia GPU | gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}.yml"
skip: true
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
set_fact:
nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
when: nvidia_gpu_flavor|lower == "tesla"
when: nvidia_gpu_flavor | lower == "tesla"
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
set_fact:
nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
when: nvidia_gpu_flavor|lower == "gtx"
when: nvidia_gpu_flavor | lower == "gtx"
- name: Container Engine Acceleration Nvidia GPU | Create addon dir
file:

View File

@@ -2,18 +2,18 @@
# To access Cinder, the CSI controller will need credentials to access
# openstack apis. Per default this values will be
# read from the environment.
cinder_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
cinder_username: "{{ lookup('env','OS_USERNAME') }}"
cinder_password: "{{ lookup('env','OS_PASSWORD') }}"
cinder_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}"
cinder_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}"
cinder_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}"
cinder_region: "{{ lookup('env','OS_REGION_NAME') }}"
cinder_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
cinder_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
cinder_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
cinder_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
cinder_cacert: "{{ lookup('env','OS_CACERT') }}"
cinder_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
cinder_username: "{{ lookup('env', 'OS_USERNAME') }}"
cinder_password: "{{ lookup('env', 'OS_PASSWORD') }}"
cinder_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}"
cinder_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}"
cinder_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}"
cinder_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
cinder_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}"
cinder_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}"
cinder_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
cinder_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
cinder_cacert: "{{ lookup('env', 'OS_CACERT') }}"
# For now, only Cinder v3 is supported in Cinder CSI driver
cinder_blockstorage_version: "v3"

View File

@@ -16,7 +16,7 @@
msg: "cinder_application_credential_id is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_name | length > 0
- cinder_application_credential_id is not defined or not cinder_application_credential_id
- name: Cinder CSI Driver | check cinder_application_credential_secret value
@@ -24,7 +24,7 @@
msg: "cinder_application_credential_secret is missing"
when:
- cinder_application_credential_name is defined
- cinder_application_credential_name|length > 0
- cinder_application_credential_name | length > 0
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- name: Cinder CSI Driver | check cinder_password value
@@ -32,7 +32,7 @@
msg: "cinder_password is missing"
when:
- cinder_username is defined
- cinder_username|length > 0
- cinder_username | length > 0
- cinder_application_credential_name is not defined or not cinder_application_credential_name
- cinder_application_credential_secret is not defined or not cinder_application_credential_secret
- cinder_password is not defined or not cinder_password

View File

@@ -133,7 +133,7 @@ spec:
- name: ca-certs
mountPath: /etc/ssl/certs
readOnly: true
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
mountPath: {{ dir }}
@@ -155,7 +155,7 @@ spec:
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:

View File

@@ -89,7 +89,7 @@ spec:
- name: ca-certs
mountPath: /etc/ssl/certs
readOnly: true
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
mountPath: {{ dir }}
@@ -125,7 +125,7 @@ spec:
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:

View File

@@ -5,12 +5,12 @@ upcloud_csi_attacher_image_tag: "v3.4.0"
upcloud_csi_resizer_image_tag: "v1.4.0"
upcloud_csi_plugin_image_tag: "v0.3.3"
upcloud_csi_node_image_tag: "v2.5.0"
upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}"
upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}"
upcloud_username: "{{ lookup('env', 'UPCLOUD_USERNAME') }}"
upcloud_password: "{{ lookup('env', 'UPCLOUD_PASSWORD') }}"
upcloud_tolerations: []
upcloud_csi_enable_volume_snapshot: false
upcloud_csi_snapshot_controller_replicas: 2
upcloud_csi_snapshotter_image_tag: "v4.2.1"
upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"
upcloud_cacert: "{{ lookup('env', 'OS_CACERT') }}"

View File

@@ -9,7 +9,7 @@
msg: "UpCloud password is missing. Env UPCLOUD_PASSWORD is mandatory"
when:
- upcloud_username is defined
- upcloud_username|length > 0
- upcloud_username | length > 0
- upcloud_password is not defined or not upcloud_password
- name: UpCloud CSI Driver | Generate Manifests

View File

@@ -36,8 +36,8 @@ unsafe_show_logs: false
# according to the above link , we can controler the block-volume-snapshot parameter
vsphere_csi_block_volume_snapshot: false
external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}"
external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}"
external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
# Controller resources
vsphere_csi_snapshotter_resources: {}

View File

@@ -44,11 +44,11 @@
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
- name: vSphere CSI Driver | Apply a CSI secret manifest
command:
cmd: "{{ kubectl }} apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs|bool) }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"

View File

@@ -9,7 +9,7 @@
- {name: external-hcloud-cloud-secret, file: external-hcloud-cloud-secret.yml}
- {name: external-hcloud-cloud-service-account, file: external-hcloud-cloud-service-account.yml}
- {name: external-hcloud-cloud-role-bindings, file: external-hcloud-cloud-role-bindings.yml}
- {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"}
- {name: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds' }}", file: "{{ 'external-hcloud-cloud-controller-manager-ds-with-networks.yml' if external_hcloud_cloud.with_networks else 'external-hcloud-cloud-controller-manager-ds.yml' }}"}
register: external_hcloud_manifests
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -7,5 +7,5 @@ metadata:
data:
token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}"
{% if external_hcloud_cloud.with_networks %}
network: "{{ network_id|b64encode }}"
network: "{{ network_id | b64encode }}"
{% endif %}

View File

@@ -2,18 +2,18 @@
# The external cloud controller will need credentials to access
# openstack apis. Per default these values will be
# read from the environment.
external_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
external_openstack_username: "{{ lookup('env','OS_USERNAME') }}"
external_openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
external_openstack_application_credential_id: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_ID') }}"
external_openstack_application_credential_name: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_NAME') }}"
external_openstack_application_credential_secret: "{{ lookup('env','OS_APPLICATION_CREDENTIAL_SECRET') }}"
external_openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
external_openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID'),true) }}"
external_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME')| default(lookup('env','OS_PROJECT_NAME'),true) }}"
external_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
external_openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
external_openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
external_openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
external_openstack_username: "{{ lookup('env', 'OS_USERNAME') }}"
external_openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}"
external_openstack_application_credential_id: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_ID') }}"
external_openstack_application_credential_name: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_NAME') }}"
external_openstack_application_credential_secret: "{{ lookup('env', 'OS_APPLICATION_CREDENTIAL_SECRET') }}"
external_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
external_openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID'), true) }}"
external_openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') | default(lookup('env', 'OS_PROJECT_NAME'), true) }}"
external_openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
external_openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
external_openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
## Format:

View File

@@ -18,7 +18,7 @@
msg: "external_openstack_application_credential_id is missing"
when:
- external_openstack_application_credential_name is defined
- external_openstack_application_credential_name|length > 0
- external_openstack_application_credential_name | length > 0
- external_openstack_application_credential_id is not defined or not external_openstack_application_credential_id
@@ -27,7 +27,7 @@
msg: "external_openstack_application_credential_secret is missing"
when:
- external_openstack_application_credential_name is defined
- external_openstack_application_credential_name|length > 0
- external_openstack_application_credential_name | length > 0
- external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret
@@ -36,7 +36,7 @@
msg: "external_openstack_password is missing"
when:
- external_openstack_username is defined
- external_openstack_username|length > 0
- external_openstack_username | length > 0
- external_openstack_application_credential_name is not defined or not external_openstack_application_credential_name
- external_openstack_application_credential_secret is not defined or not external_openstack_application_credential_secret
- external_openstack_password is not defined or not external_openstack_password

View File

@@ -57,7 +57,7 @@ spec:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
mountPath: {{ dir }}
@@ -98,7 +98,7 @@ spec:
hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:

View File

@@ -10,5 +10,5 @@ external_vsphere_insecure: "true"
external_vsphere_cloud_controller_extra_args: {}
external_vsphere_cloud_controller_image_tag: "latest"
external_vsphere_user: "{{ lookup('env','VSPHERE_USER') }}"
external_vsphere_password: "{{ lookup('env','VSPHERE_PASSWORD') }}"
external_vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
external_vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"

View File

@@ -19,7 +19,7 @@ spec:
app: cephfs-provisioner
version: {{ cephfs_provisioner_image_tag }}
spec:
priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
serviceAccount: cephfs-provisioner
containers:
- name: cephfs-provisioner

View File

@@ -24,7 +24,7 @@ spec:
- start
- --config
- /etc/config/config.json
{% if local_path_provisioner_debug|default(false) %}
{% if local_path_provisioner_debug | default(false) %}
- --debug
{% endif %}
volumeMounts:

View File

@@ -12,7 +12,7 @@ local_volume_provisioner_use_node_name_only: false
local_volume_provisioner_storage_classes: |
{
"{{ local_volume_provisioner_storage_class | default('local-storage') }}": {
"host_dir": "{{ local_volume_provisioner_base_dir | default ('/mnt/disks') }}",
"host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}",
"mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}",
"volume_mode": "Filesystem",
"fs_type": "ext4"

View File

@@ -1,8 +1,8 @@
# Macro to convert camelCase dictionary keys to snake_case keys
{% macro convert_keys(mydict) -%}
{% for key in mydict.keys()|list -%}
{% for key in mydict.keys() | list -%}
{% set key_split = key.split('_') -%}
{% set new_key = key_split[0] + key_split[1:]|map('capitalize')|join -%}
{% set new_key = key_split[0] + key_split[1:] | map('capitalize') | join -%}
{% set value = mydict.pop(key) -%}
{{ mydict.__setitem__(new_key, value) -}}
{{ convert_keys(value) if value is mapping else None -}}

View File

@@ -18,7 +18,7 @@ spec:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
spec:
priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
serviceAccountName: local-volume-provisioner
nodeSelector:
kubernetes.io/os: linux

View File

@@ -21,7 +21,7 @@ spec:
app: rbd-provisioner
version: {{ rbd_provisioner_image_tag }}
spec:
priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if rbd_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
serviceAccount: rbd-provisioner
containers:
- name: rbd-provisioner

View File

@@ -3,11 +3,11 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}.yml"
- defaults.yml
paths:
- ../vars

View File

@@ -10,9 +10,9 @@ cert_manager_controller_extra_args: []
## Allow http_proxy, https_proxy and no_proxy environment variables
## Details https://github.com/kubernetes-sigs/kubespray/blob/master/docs/proxy.md
cert_manager_http_proxy: "{{ http_proxy|default('') }}"
cert_manager_https_proxy: "{{ https_proxy|default('') }}"
cert_manager_no_proxy: "{{ no_proxy|default('') }}"
cert_manager_http_proxy: "{{ http_proxy | default('') }}"
cert_manager_https_proxy: "{{ https_proxy | default('') }}"
cert_manager_no_proxy: "{{ no_proxy | default('') }}"
## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace.
## See https://github.com/jetstack/cert-manager/issues/3717

View File

@@ -35,7 +35,7 @@ spec:
tolerations:
{{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
{% endif %}
priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
containers:
- name: ingress-nginx-controller
image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}

View File

@@ -41,7 +41,7 @@
name: "MetalLB"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/metallb.yaml"
state: "{{ metallb_rendering.changed | ternary('latest','present') }}"
state: "{{ metallb_rendering.changed | ternary('latest', 'present') }}"
wait: true
become: true
when:
@@ -67,7 +67,7 @@
name: "MetalLB"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/pools.yaml"
state: "{{ pools_rendering.changed | ternary('latest','present') }}"
state: "{{ pools_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -87,7 +87,7 @@
name: "MetalLB"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/layer2.yaml"
state: "{{ layer2_rendering.changed | ternary('latest','present') }}"
state: "{{ layer2_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
@@ -107,7 +107,7 @@
name: "MetalLB"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/layer3.yaml"
state: "{{ layer3_rendering.changed | ternary('latest','present') }}"
state: "{{ layer3_rendering.changed | ternary('latest', 'present') }}"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -57,7 +57,7 @@ spec:
aggregationLengthV6: 128
communities:
- no-advertise
localpref: "{{ peer.localpref | default ("100") }}"
localpref: "{{ peer.localpref | default("100") }}"
ipAddressPools:
{% for address_pool in peer.address_pool %}
- "{{ address_pool }}"

View File

@@ -9,10 +9,10 @@
state: "latest"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
with_items: "{{ multus_manifest_1.results + (multus_nodes_list|map('extract', hostvars, 'multus_manifest_2')|list|json_query('[].results')) }}"
with_items: "{{ multus_manifest_1.results + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | list | json_query('[].results')) }}"
loop_control:
label: "{{ item.item.name }}"
vars:
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch|length == ansible_play_hosts_all|length else ansible_play_batch }}"
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
when:
- not item is skipped

View File

@@ -8,21 +8,21 @@
fail:
msg: "registry_service_cluster_ip support only compatible with ClusterIP."
when:
- registry_service_cluster_ip is defined and registry_service_cluster_ip|length > 0
- registry_service_cluster_ip is defined and registry_service_cluster_ip | length > 0
- registry_service_type != "ClusterIP"
- name: Registry | Stop if registry_service_loadbalancer_ip is defined when registry_service_type is not 'LoadBalancer'
fail:
msg: "registry_service_loadbalancer_ip support only compatible with LoadBalancer."
when:
- registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip|length > 0
- registry_service_loadbalancer_ip is defined and registry_service_loadbalancer_ip | length > 0
- registry_service_type != "LoadBalancer"
- name: Registry | Stop if registry_service_nodeport is defined when registry_service_type is not 'NodePort'
fail:
msg: "registry_service_nodeport support only compatible with NodePort."
when:
- registry_service_nodeport is defined and registry_service_nodeport|length > 0
- registry_service_nodeport is defined and registry_service_nodeport | length > 0
- registry_service_type != "NodePort"
- name: Registry | Create addon dir

View File

@@ -24,7 +24,7 @@ spec:
k8s-app: registry
version: v{{ registry_image_tag }}
spec:
priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
serviceAccountName: registry
securityContext:
fsGroup: 1000

View File

@@ -1,6 +1,7 @@
---
- name: Set external kube-apiserver endpoint
set_fact:
# noqa: jinja[spacing]
external_apiserver_address: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
{{ loadbalancer_apiserver.address }}
@@ -9,9 +10,10 @@
{%- else -%}
{{ kube_apiserver_access_address }}
{%- endif -%}
# noqa: jinja[spacing]
external_apiserver_port: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{%- else -%}
{{ kube_apiserver_port }}
{%- endif -%}
@@ -69,9 +71,9 @@
user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
username: "kubernetes-admin-{{ cluster_name }}"
context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
override_cluster_name: "{{ { 'clusters': [ { 'cluster': (cluster_infos|combine({'server': 'https://'+external_apiserver_address+':'+(external_apiserver_port|string)})), 'name': cluster_name } ] } }}"
override_context: "{{ { 'contexts': [ { 'context': { 'user': username, 'cluster': cluster_name }, 'name': context } ], 'current-context': context } }}"
override_user: "{{ { 'users': [ { 'name': username, 'user': user_certs } ] } }}"
override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}"
override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}"
when: kubeconfig_localhost
- name: Write admin kubeconfig on ansible host

View File

@@ -111,4 +111,4 @@ kube_proxy_oom_score_adj: -999
# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen.
kube_proxy_port_range: ''
kube_proxy_port_range: ''

View File

@@ -5,7 +5,7 @@ upgrade_cluster_setup: false
# By default the external API listens on all interfaces, this can be changed to
# listen on a specific address/interface.
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} too.
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
# A port range to reserve for services with NodePort visibility.
@@ -181,12 +181,12 @@ kube_encryption_resources: [secrets]
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}
secrets_encryption_query: "resources[*].providers[0].{{kube_encryption_algorithm}}.keys[0].secret"
secrets_encryption_query: "resources[*].providers[0].{{ kube_encryption_algorithm }}.keys[0].secret"
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
# tls_min_version: ""

View File

@@ -8,7 +8,7 @@
- name: Set fact joined_control_panes
set_fact:
joined_control_planes: "{{ ((kube_control_planes_raw.stdout| from_json)['items'])| default([]) | map (attribute='metadata') | map (attribute='name') | list }}"
joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}"
delegate_to: item
loop: "{{ groups['kube_control_plane'] }}"
when: kube_control_planes_raw is succeeded
@@ -16,4 +16,4 @@
- name: Set fact first_kube_control_plane
set_fact:
first_kube_control_plane: "{{ joined_control_planes|default([]) | first | default(groups['kube_control_plane']|first) }}"
first_kube_control_plane: "{{ joined_control_planes | default([]) | first | default(groups['kube_control_plane'] | first) }}"

View File

@@ -1,6 +1,7 @@
---
- name: Set kubeadm_discovery_address
set_fact:
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}

View File

@@ -52,26 +52,26 @@
path: "{{ audit_policy_file | dirname }}"
state: directory
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
when: kubernetes_audit | default(false) or kubernetes_audit_webhook | default(false)
- name: Write api audit webhook config yaml
template:
src: apiserver-audit-webhook-config.yaml.j2
dest: "{{ audit_webhook_config_file }}"
mode: 0640
when: kubernetes_audit_webhook|default(false)
when: kubernetes_audit_webhook | default(false)
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
- name: set kubeadm_config_api_fqdn define
set_fact:
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
when: loadbalancer_apiserver is defined
- name: Set kubeadm api version to v1beta3
@@ -100,8 +100,8 @@
- name: kubeadm | Push admission control config files
template:
src: "{{ item|lower }}.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml"
src: "{{ item | lower }}.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
mode: 0640
when:
- kube_apiserver_admission_control_config_file
@@ -123,8 +123,8 @@
register: apiserver_sans_host_check
changed_when: apiserver_sans_host_check.stdout is not search('does match certificate')
vars:
apiserver_ips: "{{ apiserver_sans|map('ipaddr')|reject('equalto', False)|list }}"
apiserver_hosts: "{{ apiserver_sans|difference(apiserver_ips) }}"
apiserver_ips: "{{ apiserver_sans | map('ipaddr') | reject('equalto', False) | list }}"
apiserver_hosts: "{{ apiserver_sans | difference(apiserver_ips) }}"
when:
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
@@ -186,7 +186,7 @@
- name: set kubeadm certificate key
set_fact:
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
when:
- kubeadm_certificate_key is not defined

View File

@@ -8,14 +8,14 @@
src: webhook-token-auth-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
mode: 0640
when: kube_webhook_token_auth|default(false)
when: kube_webhook_token_auth | default(false)
- name: Create webhook authorization config
template:
src: webhook-authorization-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
mode: 0640
when: kube_webhook_authorization|default(false)
when: kube_webhook_authorization | default(false)
- name: Create kube-scheduler config
template:

View File

@@ -6,7 +6,7 @@
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed|default(false)
when: etcd_secret_changed | default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"

View File

@@ -4,6 +4,6 @@ plugins:
{% for plugin in kube_apiserver_enable_admission_plugins %}
{% if plugin in kube_apiserver_admission_plugins_needs_configuration %}
- name: {{ plugin }}
path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml
path: {{ kube_config_dir }}/{{ plugin | lower }}.yaml
{% endif %}
{% endfor %}

View File

@@ -13,7 +13,7 @@ localAPIEndpoint:
certificateKey: {{ kubeadm_certificate_key }}
{% endif %}
nodeRegistration:
{% if kube_override_hostname|default('') %}
{% if kube_override_hostname | default('') %}
name: {{ kube_override_hostname }}
{% endif %}
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
@@ -89,7 +89,7 @@ etcd:
{% endfor %}
{% endif %}
dns:
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$','') }}
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
imageTag: {{ coredns_image_tag }}
networking:
dnsDomain: {{ dns_domain }}
@@ -100,7 +100,7 @@ networking:
{% if kubeadm_feature_gates %}
featureGates:
{% for feature in kubeadm_feature_gates %}
{{ feature|replace("=", ": ") }}
{{ feature | replace("=", ": ") }}
{% endfor %}
{% endif %}
kubernetesVersion: {{ kube_version }}
@@ -124,13 +124,13 @@ apiServer:
{% endif %}
authorization-mode: {{ authorization_modes | join(',') }}
bind-address: {{ kube_apiserver_bind_address }}
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
{% if kube_apiserver_enable_admission_plugins | length > 0 %}
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
{% endif %}
{% if kube_apiserver_admission_control_config_file %}
admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml
{% endif %}
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
{% if kube_apiserver_disable_admission_plugins | length > 0 %}
disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
{% endif %}
apiserver-count: "{{ kube_apiserver_count }}"
@@ -144,13 +144,13 @@ apiServer:
profiling: "{{ kube_profiling }}"
request-timeout: "{{ kube_apiserver_request_timeout }}"
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
{% if kube_token_auth|default(true) %}
{% if kube_token_auth | default(true) %}
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
{% endif %}
{% if kube_apiserver_service_account_lookup %}
service-account-lookup: "{{ kube_apiserver_service_account_lookup }}"
{% endif %}
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
{% if kube_oidc_auth | default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
oidc-issuer-url: "{{ kube_oidc_url }}"
oidc-client-id: "{{ kube_oidc_client_id }}"
{% if kube_oidc_ca_file is defined %}
@@ -169,17 +169,17 @@ apiServer:
oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
{% endif %}
{% endif %}
{% if kube_webhook_token_auth|default(false) %}
{% if kube_webhook_token_auth | default(false) %}
authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
{% endif %}
{% if kube_webhook_authorization|default(false) %}
{% if kube_webhook_authorization | default(false) %}
authorization-webhook-config-file: {{ kube_config_dir }}/webhook-authorization-config.yaml
{% endif %}
{% if kube_encrypt_secret_data %}
encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
{% endif %}
storage-backend: {{ kube_apiserver_storage_backend }}
{% if kube_api_runtime_config|length > 0 %}
{% if kube_api_runtime_config | length > 0 %}
runtime-config: {{ kube_api_runtime_config | join(',') }}
{% endif %}
allow-privileged: "true"
@@ -223,24 +223,24 @@ apiServer:
{% if kubelet_rotate_server_certificates %}
kubelet-certificate-authority: {{ kube_cert_dir }}/ca.crt
{% endif %}
{% if kubernetes_audit or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs|length %}
{% if kubernetes_audit or kube_token_auth | default(true) or kube_webhook_token_auth | default(false) or ( cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] ) or apiserver_extra_volumes or ssl_ca_dirs | length %}
extraVolumes:
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "gce"] %}
- name: cloud-config
hostPath: {{ kube_config_dir }}/cloud_config
mountPath: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_token_auth|default(true) %}
{% if kube_token_auth | default(true) %}
- name: token-auth-config
hostPath: {{ kube_token_dir }}
mountPath: {{ kube_token_dir }}
{% endif %}
{% if kube_webhook_token_auth|default(false) %}
{% if kube_webhook_token_auth | default(false) %}
- name: webhook-token-auth-config
hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
{% endif %}
{% if kube_webhook_authorization|default(false) %}
{% if kube_webhook_authorization | default(false) %}
- name: webhook-authorization-config
hostPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
mountPath: {{ kube_config_dir }}/webhook-authorization-config.yaml
@@ -269,7 +269,7 @@ apiServer:
mountPath: {{ volume.mountPath }}
readOnly: {{ volume.readOnly | d(not (volume.writable | d(false))) }}
{% endfor %}
{% if ssl_ca_dirs|length %}
{% if ssl_ca_dirs | length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath: {{ dir }}
@@ -316,7 +316,7 @@ controllerManager:
configure-cloud-routes: "false"
{% endif %}
{% if kubelet_flexvolumes_plugins_dir is defined %}
flex-volume-plugin-dir: {{kubelet_flexvolumes_plugins_dir}}
flex-volume-plugin-dir: {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
{% if tls_min_version is defined %}
tls-min-version: {{ tls_min_version }}
@@ -352,7 +352,7 @@ scheduler:
feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
{% endif %}
profiling: "{{ kube_profiling }}"
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
{% if kube_kubeadm_scheduler_extra_args | length > 0 %}
{% for key in kube_kubeadm_scheduler_extra_args %}
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
{% endfor %}
@@ -422,7 +422,7 @@ portRange: {{ kube_proxy_port_range }}
{% set feature_gates = ( kube_proxy_feature_gates | default(kube_feature_gates, true) ) %}
featureGates:
{% for feature in feature_gates %}
{{ feature|replace("=", ": ") }}
{{ feature | replace("=", ": ") }}
{% endfor %}
{% endif %}
{# DNS settings for kubelet #}
@@ -448,6 +448,6 @@ clusterDNS:
{% set feature_gates = ( kubelet_feature_gates | default(kube_feature_gates, true) ) %}
featureGates:
{% for feature in feature_gates %}
{{ feature|replace("=", ": ") }}
{{ feature | replace("=", ": ") }}
{% endfor %}
{% endif %}

View File

@@ -17,7 +17,7 @@ controlPlane:
bindPort: {{ kube_apiserver_port }}
certificateKey: {{ kubeadm_certificate_key }}
nodeRegistration:
name: {{ kube_override_hostname|default(inventory_hostname) }}
name: {{ kube_override_hostname | default(inventory_hostname) }}
criSocket: {{ cri_socket }}
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
taints:

View File

@@ -1,5 +1,5 @@
{% set kubescheduler_config_api_version = "v1beta3" %}
apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version|d('v1') }}
apiVersion: kubescheduler.config.k8s.io/{{ kubescheduler_config_api_version | d('v1') }}
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: "{{ kube_config_dir }}/scheduler.conf"

View File

@@ -9,9 +9,9 @@ defaults:
warn: "{{ kube_pod_security_default_warn }}"
warn-version: "{{ kube_pod_security_default_warn_version }}"
exemptions:
usernames: {{ kube_pod_security_exemptions_usernames|to_json }}
runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names|to_json }}
namespaces: {{ kube_pod_security_exemptions_namespaces|to_json }}
usernames: {{ kube_pod_security_exemptions_usernames | to_json }}
runtimeClasses: {{ kube_pod_security_exemptions_runtime_class_names | to_json }}
namespaces: {{ kube_pod_security_exemptions_namespaces | to_json }}
{% else %}
# This file is intentinally left empty as kube_pod_security_use_default={{ kube_pod_security_use_default }}
{% endif %}

View File

@@ -2,7 +2,7 @@ apiVersion: apiserver.config.k8s.io/v1
kind: EncryptionConfiguration
resources:
- resources:
{{ kube_encryption_resources|to_nice_yaml|indent(4, True) }}
{{ kube_encryption_resources | to_nice_yaml | indent(4, True) }}
providers:
- {{ kube_encryption_algorithm }}:
keys:

View File

@@ -6,7 +6,7 @@ kubeadm_join_timeout: 120s
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}

View File

@@ -51,7 +51,7 @@
register: "etcd_client_cert_serial_result"
changed_when: false
when:
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
- inventory_hostname in groups['k8s_cluster'] | union(groups['calico_rr'] | default([])) | unique | sort
tags:
- network

View File

@@ -1,6 +1,7 @@
---
- name: Set kubeadm_discovery_address
set_fact:
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
@@ -138,7 +139,7 @@
args:
executable: /bin/bash
run_once: true
delegate_to: "{{ groups['kube_control_plane']|first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined
@@ -158,7 +159,7 @@
- name: Restart all kube-proxy pods to ensure that they load the new configmap
command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube_control_plane']|first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined

View File

@@ -17,10 +17,10 @@
- name: Node label for nvidia GPU nodes
set_fact:
role_node_labels: "{{ role_node_labels + [ 'nvidia.com/gpu=true' ] }}"
role_node_labels: "{{ role_node_labels + ['nvidia.com/gpu=true'] }}"
when:
- nvidia_gpu_nodes is defined
- nvidia_accelerator_enabled|bool
- nvidia_accelerator_enabled | bool
- inventory_hostname in nvidia_gpu_nodes
- name: Set inventory node label to empty list
@@ -29,8 +29,8 @@
- name: Populate inventory node label
set_fact:
inventory_node_labels: "{{ inventory_node_labels + [ '%s=%s'|format(item.key, item.value) ] }}"
loop: "{{ node_labels|d({})|dict2items }}"
inventory_node_labels: "{{ inventory_node_labels + ['%s=%s' | format(item.key, item.value)] }}"
loop: "{{ node_labels | d({}) | dict2items }}"
when:
- node_labels is defined
- node_labels is mapping

View File

@@ -141,7 +141,7 @@ kubelet_node_custom_flags: []
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
{%- else -%}
{{ inventory_hostname }}
{%- endif -%}
@@ -161,14 +161,14 @@ sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true),true) }}"
openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
openstack_auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env', 'OS_USERNAME') }}"
openstack_password: "{{ lookup('env', 'OS_PASSWORD') }}"
openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env', 'OS_TENANT_ID') | default(lookup('env', 'OS_PROJECT_ID') | default(lookup('env', 'OS_PROJECT_NAME'), true), true) }}"
openstack_tenant_name: "{{ lookup('env', 'OS_TENANT_NAME') }}"
openstack_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env', 'OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
@@ -186,7 +186,7 @@ vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default('') }}"
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values

View File

@@ -61,15 +61,15 @@
- name: "check azure_exclude_master_from_standard_lb is a bool"
assert:
that: azure_exclude_master_from_standard_lb |type_debug == 'bool'
that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
- name: "check azure_disable_outbound_snat is a bool"
assert:
that: azure_disable_outbound_snat |type_debug == 'bool'
that: azure_disable_outbound_snat | type_debug == 'bool'
- name: "check azure_use_instance_metadata is a bool"
assert:
that: azure_use_instance_metadata |type_debug == 'bool'
that: azure_use_instance_metadata | type_debug == 'bool'
- name: check azure_vmtype value
fail:

View File

@@ -52,9 +52,9 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}.yml"
skip: true

View File

@@ -151,7 +151,7 @@
- name: Test if openstack_cacert is a base64 string
set_fact:
openstack_cacert_is_base64: "{% if openstack_cacert is search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
openstack_cacert_is_base64: "{% if openstack_cacert is search('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}= | [A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
when:
- cloud_provider is defined
- cloud_provider == 'openstack'

View File

@@ -34,13 +34,13 @@ healthzPort: {{ kubelet_healthz_port }}
healthzBindAddress: {{ kubelet_healthz_bind_address }}
kubeletCgroups: {{ kubelet_kubelet_cgroups }}
clusterDomain: {{ dns_domain }}
{% if kubelet_protect_kernel_defaults|bool %}
{% if kubelet_protect_kernel_defaults | bool %}
protectKernelDefaults: true
{% endif %}
{% if kubelet_rotate_certificates|bool %}
{% if kubelet_rotate_certificates | bool %}
rotateCertificates: true
{% endif %}
{% if kubelet_rotate_server_certificates|bool %}
{% if kubelet_rotate_server_certificates | bool %}
serverTLSBootstrap: true
{% endif %}
{# DNS settings for kubelet #}
@@ -60,10 +60,10 @@ clusterDNS:
- {{ dns_address }}
{% endfor %}
{# Node reserved CPU/memory #}
{% if kube_reserved|bool %}
{% if kube_reserved | bool %}
kubeReservedCgroup: {{ kube_reserved_cgroups }}
kubeReserved:
{% if is_kube_master|bool %}
{% if is_kube_master | bool %}
cpu: {{ kube_master_cpu_reserved }}
memory: {{ kube_master_memory_reserved }}
{% if kube_master_ephemeral_storage_reserved is defined %}
@@ -83,10 +83,10 @@ kubeReserved:
{% endif %}
{% endif %}
{% endif %}
{% if system_reserved|bool %}
{% if system_reserved | bool %}
systemReservedCgroup: {{ system_reserved_cgroups }}
systemReserved:
{% if is_kube_master|bool %}
{% if is_kube_master | bool %}
cpu: {{ system_master_cpu_reserved }}
memory: {{ system_master_memory_reserved }}
{% if system_master_ephemeral_storage_reserved is defined %}
@@ -106,10 +106,10 @@ systemReserved:
{% endif %}
{% endif %}
{% endif %}
{% if is_kube_master|bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
evictionHard:
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
{% elif not is_kube_master|bool and eviction_hard is defined and eviction_hard %}
{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
evictionHard:
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
{% endif %}
@@ -123,7 +123,7 @@ resolvConf: "{{ kube_resolv_conf }}"
{% if kubelet_feature_gates or kube_feature_gates %}
featureGates:
{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %}
{{ feature|replace("=", ": ") }}
{{ feature | replace("=", ": ") }}
{% endfor %}
{% endif %}
{% if tls_min_version is defined %}

Some files were not shown because too many files have changed in this diff Show More