Resolve ansible-lint name errors (#10253)

* project: fix ansible-lint name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: ignore jinja template error in names

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: capitalize ansible name

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: update notify after name capitalization

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
This commit is contained in:
Arthur Outhenin-Chalandre
2023-07-26 16:36:22 +02:00
committed by GitHub
parent b9e3861385
commit 36e5d742dc
162 changed files with 842 additions and 675 deletions

View File

@@ -1,5 +1,5 @@
---
- name: set bastion host IP and port
- name: Set bastion host IP and port
set_fact:
bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}"
bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}"
@@ -12,7 +12,7 @@
set_fact:
real_user: "{{ ansible_user }}"
- name: create ssh bastion conf
- name: Create ssh bastion conf
become: false
delegate_to: localhost
connection: local

View File

@@ -6,37 +6,46 @@
# This command should always run, even in check mode
check_mode: false
- include_tasks: bootstrap-centos.yml
- name: Bootstrap CentOS
include_tasks: bootstrap-centos.yml
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
- include_tasks: bootstrap-amazon.yml
- name: Bootstrap Amazon
include_tasks: bootstrap-amazon.yml
when: '''ID="amzn"'' in os_release.stdout_lines'
- include_tasks: bootstrap-redhat.yml
- name: Bootstrap RedHat
include_tasks: bootstrap-redhat.yml
when: '''ID="rhel"'' in os_release.stdout_lines'
- include_tasks: bootstrap-clearlinux.yml
- name: Bootstrap Clear Linux
include_tasks: bootstrap-clearlinux.yml
when: '''ID=clear-linux-os'' in os_release.stdout_lines'
# Fedora CoreOS
- include_tasks: bootstrap-fedora-coreos.yml
- name: Bootstrap Fedora CoreOS
include_tasks: bootstrap-fedora-coreos.yml
when:
- '''ID=fedora'' in os_release.stdout_lines'
- '''VARIANT_ID=coreos'' in os_release.stdout_lines'
- include_tasks: bootstrap-flatcar.yml
- name: Bootstrap Flatcar
include_tasks: bootstrap-flatcar.yml
when: '''ID=flatcar'' in os_release.stdout_lines'
- include_tasks: bootstrap-debian.yml
- name: Bootstrap Debian
include_tasks: bootstrap-debian.yml
when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
# Fedora "classic"
- include_tasks: bootstrap-fedora.yml
- name: Boostrap Fedora
include_tasks: bootstrap-fedora.yml
when:
- '''ID=fedora'' in os_release.stdout_lines'
- '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
- include_tasks: bootstrap-opensuse.yml
- name: Bootstrap OpenSUSE
include_tasks: bootstrap-opensuse.yml
when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
- name: Create remote_tmp for it is used by another module

View File

@@ -1,5 +1,5 @@
---
- name: containerd-common | check if fedora coreos
- name: Containerd-common | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,11 +7,11 @@
get_mime: no
register: ostree
- name: containerd-common | set is_ostree
- name: Containerd-common | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: containerd-common | gather os specific variables
- name: Containerd-common | gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:

View File

@@ -1,5 +1,5 @@
---
- name: restart containerd
- name: Restart containerd
command: /bin/true
notify:
- Containerd | restart containerd

View File

@@ -12,7 +12,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -5,33 +5,33 @@
when:
- not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions)
- name: containerd | Remove any package manager controlled containerd package
- name: Containerd | Remove any package manager controlled containerd package
package:
name: "{{ containerd_package }}"
state: absent
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- name: containerd | Remove containerd repository
- name: Containerd | Remove containerd repository
file:
path: "{{ yum_repo_dir }}/containerd.repo"
state: absent
when:
- ansible_os_family in ['RedHat']
- name: containerd | Remove containerd repository
- name: Containerd | Remove containerd repository
apt_repository:
repo: "{{ item }}"
state: absent
with_items: "{{ containerd_repo_info.repos }}"
when: ansible_pkg_mgr == 'apt'
- name: containerd | Download containerd
- name: Containerd | Download containerd
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.containerd) }}"
- name: containerd | Unpack containerd archive
- name: Containerd | Unpack containerd archive
unarchive:
src: "{{ downloads.containerd.dest }}"
dest: "{{ containerd_bin_dir }}"
@@ -39,9 +39,9 @@
remote_src: yes
extra_opts:
- --strip-components=1
notify: restart containerd
notify: Restart containerd
- name: containerd | Remove orphaned binary
- name: Containerd | Remove orphaned binary
file:
path: "/usr/bin/{{ item }}"
state: absent
@@ -56,14 +56,14 @@
- containerd-shim-runc-v2
- ctr
- name: containerd | Generate systemd service for containerd
- name: Containerd | Generate systemd service for containerd
template:
src: containerd.service.j2
dest: /etc/systemd/system/containerd.service
mode: 0644
notify: restart containerd
notify: Restart containerd
- name: containerd | Ensure containerd directories exist
- name: Containerd | Ensure containerd directories exist
file:
dest: "{{ item }}"
state: directory
@@ -76,50 +76,51 @@
- "{{ containerd_storage_dir }}"
- "{{ containerd_state_dir }}"
- name: containerd | Write containerd proxy drop-in
- name: Containerd | Write containerd proxy drop-in
template:
src: http-proxy.conf.j2
dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
mode: 0644
notify: restart containerd
notify: Restart containerd
when: http_proxy is defined or https_proxy is defined
- name: containerd | Generate default base_runtime_spec
- name: Containerd | Generate default base_runtime_spec
register: ctr_oci_spec
command: "{{ containerd_bin_dir }}/ctr oci spec"
check_mode: false
changed_when: false
- name: containerd | Store generated default base_runtime_spec
- name: Containerd | Store generated default base_runtime_spec
set_fact:
containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
- name: containerd | Write base_runtime_specs
- name: Containerd | Write base_runtime_specs
copy:
content: "{{ item.value }}"
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
owner: "root"
mode: 0644
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
notify: restart containerd
notify: Restart containerd
- name: containerd | Copy containerd config file
- name: Containerd | Copy containerd config file
template:
src: config.toml.j2
dest: "{{ containerd_cfg_dir }}/config.toml"
owner: "root"
mode: 0640
notify: restart containerd
notify: Restart containerd
- block:
- name: containerd Create registry directories
- name: Containerd | Configure containerd registries
block:
- name: Containerd Create registry directories
file:
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
state: directory
mode: 0755
recurse: true
with_dict: "{{ containerd_insecure_registries }}"
- name: containerd Write hosts.toml file
- name: Containerd Write hosts.toml file
blockinfile:
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
mode: 0640
@@ -134,10 +135,10 @@
# you can sometimes end up in a state where everything is installed
# but containerd was not started / enabled
- name: containerd | Flush handlers
- name: Containerd | Flush handlers
meta: flush_handlers
- name: containerd | Ensure containerd is started and enabled
- name: Containerd | Ensure containerd is started and enabled
systemd:
name: containerd
daemon_reload: yes

View File

@@ -1,5 +1,5 @@
---
- name: containerd | Remove containerd repository for RedHat os family
- name: Containerd | Remove containerd repository for RedHat os family
file:
path: "{{ yum_repo_dir }}/containerd.repo"
state: absent
@@ -8,7 +8,7 @@
tags:
- reset_containerd
- name: containerd | Remove containerd repository for Debian os family
- name: Containerd | Remove containerd repository for Debian os family
apt_repository:
repo: "{{ item }}"
state: absent
@@ -17,7 +17,7 @@
tags:
- reset_containerd
- name: containerd | Stop containerd service
- name: Containerd | Stop containerd service
service:
name: containerd
daemon_reload: true
@@ -26,7 +26,7 @@
tags:
- reset_containerd
- name: containerd | Remove configuration files
- name: Containerd | Remove configuration files
file:
path: "{{ item }}"
state: absent

View File

@@ -1,35 +1,35 @@
---
- name: restart and enable cri-dockerd
- name: Restart and enable cri-dockerd
command: /bin/true
notify:
- cri-dockerd | reload systemd
- cri-dockerd | restart docker.service
- cri-dockerd | reload cri-dockerd.socket
- cri-dockerd | reload cri-dockerd.service
- cri-dockerd | enable cri-dockerd service
- Cri-dockerd | reload systemd
- Cri-dockerd | restart docker.service
- Cri-dockerd | reload cri-dockerd.socket
- Cri-dockerd | reload cri-dockerd.service
- Cri-dockerd | enable cri-dockerd service
- name: cri-dockerd | reload systemd
- name: Cri-dockerd | reload systemd
systemd:
name: cri-dockerd
daemon_reload: true
masked: no
- name: cri-dockerd | restart docker.service
- name: Cri-dockerd | restart docker.service
service:
name: docker.service
state: restarted
- name: cri-dockerd | reload cri-dockerd.socket
- name: Cri-dockerd | reload cri-dockerd.socket
service:
name: cri-dockerd.socket
state: restarted
- name: cri-dockerd | reload cri-dockerd.service
- name: Cri-dockerd | reload cri-dockerd.service
service:
name: cri-dockerd.service
state: restarted
- name: cri-dockerd | enable cri-dockerd service
- name: Cri-dockerd | enable cri-dockerd service
service:
name: cri-dockerd.service
enabled: yes

View File

@@ -8,7 +8,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -1,5 +1,5 @@
---
- name: runc | Download cri-dockerd binary
- name: Runc | Download cri-dockerd binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cri_dockerd) }}"
@@ -11,7 +11,7 @@
mode: 0755
remote_src: true
notify:
- restart and enable cri-dockerd
- Restart and enable cri-dockerd
- name: Generate cri-dockerd systemd unit files
template:
@@ -22,7 +22,7 @@
- cri-dockerd.service
- cri-dockerd.socket
notify:
- restart and enable cri-dockerd
- Restart and enable cri-dockerd
- name: Flush handlers
meta: flush_handlers

View File

@@ -1,5 +1,5 @@
---
- name: restart crio
- name: Restart crio
command: /bin/true
notify:
- CRI-O | reload systemd

View File

@@ -12,7 +12,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -109,7 +109,7 @@
- 1.23
- 1.24
- name: cri-o | remove installed packages
- name: Cri-o | remove installed packages
package:
name: "{{ item }}"
state: absent

View File

@@ -1,5 +1,5 @@
---
- name: cri-o | check if fedora coreos
- name: Cri-o | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,48 +7,48 @@
get_mime: no
register: ostree
- name: cri-o | set is_ostree
- name: Cri-o | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: cri-o | get ostree version
- name: Cri-o | get ostree version
shell: "set -o pipefail && rpm-ostree --version | awk -F\\' '/Version/{print $2}'"
args:
executable: /bin/bash
register: ostree_version
when: is_ostree
- name: cri-o | Download cri-o
- name: Cri-o | Download cri-o
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.crio) }}"
- name: cri-o | special handling for amazon linux
- name: Cri-o | special handling for amazon linux
import_tasks: "setup-amazon.yaml"
when: ansible_distribution in ["Amazon"]
- name: cri-o | clean up reglacy repos
- name: Cri-o | clean up reglacy repos
import_tasks: "cleanup.yaml"
- name: cri-o | build a list of crio runtimes with Katacontainers runtimes
- name: Cri-o | build a list of crio runtimes with Katacontainers runtimes
set_fact:
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
when:
- kata_containers_enabled
- name: cri-o | build a list of crio runtimes with crun runtime
- name: Cri-o | build a list of crio runtimes with crun runtime
set_fact:
crio_runtimes: "{{ crio_runtimes + [crun_runtime] }}"
when:
- crun_enabled
- name: cri-o | build a list of crio runtimes with youki runtime
- name: Cri-o | build a list of crio runtimes with youki runtime
set_fact:
crio_runtimes: "{{ crio_runtimes + [youki_runtime] }}"
when:
- youki_enabled
- name: cri-o | make sure needed folders exist in the system
- name: Cri-o | make sure needed folders exist in the system
with_items:
- /etc/crio
- /etc/containers
@@ -58,21 +58,21 @@
state: directory
mode: 0755
- name: cri-o | install cri-o config
- name: Cri-o | install cri-o config
template:
src: crio.conf.j2
dest: /etc/crio/crio.conf
mode: 0644
register: config_install
- name: cri-o | install config.json
- name: Cri-o | install config.json
template:
src: config.json.j2
dest: /etc/crio/config.json
mode: 0644
register: reg_auth_install
- name: cri-o | copy binaries
- name: Cri-o | copy binaries
copy:
src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"
@@ -80,48 +80,48 @@
remote_src: true
with_items:
- "{{ crio_bin_files }}"
notify: restart crio
notify: Restart crio
- name: cri-o | copy service file
- name: Cri-o | copy service file
copy:
src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
dest: /etc/systemd/system/crio.service
mode: 0755
remote_src: true
notify: restart crio
notify: Restart crio
- name: cri-o | update the bin dir for crio.service file
- name: Cri-o | update the bin dir for crio.service file
replace:
dest: /etc/systemd/system/crio.service
regexp: "/usr/local/bin/crio"
replace: "{{ bin_dir }}/crio"
notify: restart crio
notify: Restart crio
- name: cri-o | copy default policy
- name: Cri-o | copy default policy
copy:
src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
dest: /etc/containers/policy.json
mode: 0755
remote_src: true
notify: restart crio
notify: Restart crio
- name: cri-o | copy mounts.conf
- name: Cri-o | copy mounts.conf
copy:
src: mounts.conf
dest: /etc/containers/mounts.conf
mode: 0644
when:
- ansible_os_family == 'RedHat'
notify: restart crio
notify: Restart crio
- name: cri-o | create directory for oci hooks
- name: Cri-o | create directory for oci hooks
file:
path: /etc/containers/oci/hooks.d
state: directory
owner: root
mode: 0755
- name: cri-o | set overlay driver
- name: Cri-o | set overlay driver
community.general.ini_file:
dest: /etc/containers/storage.conf
section: storage
@@ -135,7 +135,7 @@
value: '"/var/lib/containers/storage"'
# metacopy=on is available since 4.19 and was backported to RHEL 4.18 kernel
- name: cri-o | set metacopy mount options correctly
- name: Cri-o | set metacopy mount options correctly
community.general.ini_file:
dest: /etc/containers/storage.conf
section: storage.options.overlay
@@ -143,37 +143,37 @@
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
mode: 0644
- name: cri-o | create directory registries configs
- name: Cri-o | create directory registries configs
file:
path: /etc/containers/registries.conf.d
state: directory
owner: root
mode: 0755
- name: cri-o | write registries configs
- name: Cri-o | write registries configs
template:
src: registry.conf.j2
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':', '_') }}.conf"
mode: 0644
loop: "{{ crio_registries }}"
notify: restart crio
notify: Restart crio
- name: cri-o | configure unqualified registry settings
- name: Cri-o | configure unqualified registry settings
template:
src: unqualified.conf.j2
dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
mode: 0644
notify: restart crio
notify: Restart crio
- name: cri-o | write cri-o proxy drop-in
- name: Cri-o | write cri-o proxy drop-in
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
mode: 0644
notify: restart crio
notify: Restart crio
when: http_proxy is defined or https_proxy is defined
- name: cri-o | configure the uid/gid space for user namespaces
- name: Cri-o | configure the uid/gid space for user namespaces
lineinfile:
path: '{{ item.path }}'
line: '{{ item.entry }}'
@@ -187,7 +187,7 @@
loop_control:
label: '{{ item.path }}'
- name: cri-o | ensure crio service is started and enabled
- name: Cri-o | ensure crio service is started and enabled
service:
name: crio
daemon_reload: true
@@ -195,7 +195,7 @@
state: started
register: service_start
- name: cri-o | trigger service restart only when needed
- name: Cri-o | trigger service restart only when needed
service:
name: crio
state: restarted
@@ -203,7 +203,7 @@
- config_install.changed or reg_auth_install.changed
- not service_start.changed
- name: cri-o | verify that crio is running
- name: Cri-o | verify that crio is running
command: "{{ bin_dir }}/crio-status info"
register: get_crio_info
until: get_crio_info is succeeded

View File

@@ -1,5 +1,5 @@
---
- name: crictl | Download crictl
- name: Crictl | Download crictl
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.crictl) }}"

View File

@@ -1,3 +1,3 @@
---
- name: install crictl
- name: Install crictl
include_tasks: crictl.yml

View File

@@ -1,5 +1,5 @@
---
- name: crun | Download crun binary
- name: Crun | Download crun binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.crun) }}"

View File

@@ -1,18 +1,18 @@
---
- name: docker-storage-setup | install git and make
- name: Docker-storage-setup | install git and make
with_items: [git, make]
package:
pkg: "{{ item }}"
state: present
- name: docker-storage-setup | docker-storage-setup sysconfig template
- name: Docker-storage-setup | docker-storage-setup sysconfig template
template:
src: docker-storage-setup.j2
dest: /etc/sysconfig/docker-storage-setup
mode: 0644
- name: docker-storage-override-directory | docker service storage-setup override dir
- name: Docker-storage-override-directory | docker service storage-setup override dir
file:
dest: /etc/systemd/system/docker.service.d
mode: 0755
@@ -20,7 +20,7 @@
group: root
state: directory
- name: docker-storage-override | docker service storage-setup override file
- name: Docker-storage-override | docker service storage-setup override file
copy:
dest: /etc/systemd/system/docker.service.d/override.conf
content: |-
@@ -33,12 +33,12 @@
mode: 0644
# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
- name: docker-storage-setup | install lvm2
- name: Docker-storage-setup | install lvm2
package:
name: lvm2
state: present
- name: docker-storage-setup | install and run container-storage-setup
- name: Docker-storage-setup | install and run container-storage-setup
become: yes
script: |
install_container_storage_setup.sh \

View File

@@ -1,5 +1,5 @@
---
- name: restart docker
- name: Restart docker
command: /bin/true
notify:
- Docker | reload systemd

View File

@@ -1,5 +1,5 @@
---
- name: check if fedora coreos
- name: Check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,18 +7,18 @@
get_mime: no
register: ostree
- name: set is_ostree
- name: Set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: set docker_version for openEuler
- name: Set docker_version for openEuler
set_fact:
docker_version: '19.03'
when: ansible_distribution == "openEuler"
tags:
- facts
- name: gather os specific variables
- name: Gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
@@ -44,14 +44,16 @@
msg: "SUSE distributions always install Docker from the distro repos"
when: ansible_pkg_mgr == 'zypper'
- include_tasks: set_facts_dns.yml
- name: Gather DNS facts
include_tasks: set_facts_dns.yml
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
tags:
- facts
- import_tasks: pre-upgrade.yml
- name: Pre-upgrade docker
import_tasks: pre-upgrade.yml
- name: ensure docker-ce repository public key is installed
- name: Ensure docker-ce repository public key is installed
apt_key:
id: "{{ item }}"
url: "{{ docker_repo_key_info.url }}"
@@ -64,7 +66,7 @@
environment: "{{ proxy_env }}"
when: ansible_pkg_mgr == 'apt'
- name: ensure docker-ce repository is enabled
- name: Ensure docker-ce repository is enabled
apt_repository:
repo: "{{ item }}"
state: present
@@ -99,7 +101,7 @@
- docker-ce
- docker-ce-cli
- name: ensure docker packages are installed
- name: Ensure docker packages are installed
package:
name: "{{ docker_package_info.pkgs }}"
state: "{{ docker_package_info.state | default('present') }}"
@@ -117,7 +119,7 @@
until: docker_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
notify: restart docker
notify: Restart docker
when:
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- not is_ostree
@@ -135,9 +137,9 @@
- docker-ce
- docker-ce-cli
- name: ensure docker started, remove our config if docker start failed and try again
- name: Ensure docker started, remove our config if docker start failed and try again
block:
- name: ensure service is started if docker packages are already present
- name: Ensure service is started if docker packages are already present
service:
name: docker
state: started
@@ -145,7 +147,7 @@
rescue:
- debug: # noqa name[missing]
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
- name: Remove kubespray generated config
file:
path: "{{ item }}"
state: absent
@@ -154,13 +156,14 @@
- /etc/systemd/system/docker.service.d/docker-options.conf
- /etc/systemd/system/docker.service.d/docker-dns.conf
- /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf
notify: restart docker
notify: Restart docker
- name: flush handlers so we can wait for docker to come up
- name: Flush handlers so we can wait for docker to come up
meta: flush_handlers
# Install each plugin using a looped include to make error handling in the included task simpler.
- include_tasks: docker_plugin.yml
- name: Install docker plugin
include_tasks: docker_plugin.yml
loop: "{{ docker_plugins }}"
loop_control:
loop_var: docker_plugin
@@ -168,7 +171,7 @@
- name: Set docker systemd config
import_tasks: systemd.yml
- name: ensure docker service is started and enabled
- name: Ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes

View File

@@ -21,7 +21,7 @@
ignore_errors: true # noqa ignore-errors
when: docker_packages_list | length>0
- name: reset | remove all containers
- name: Reset | remove all containers
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
args:
executable: /bin/bash

View File

@@ -1,23 +1,23 @@
---
- name: set dns server for docker
- name: Set dns server for docker
set_fact:
docker_dns_servers: "{{ dns_servers }}"
- name: show docker_dns_servers
- name: Show docker_dns_servers
debug:
msg: "{{ docker_dns_servers }}"
- name: add upstream dns servers
- name: Add upstream dns servers
set_fact:
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers | default([]) }}"
when: dns_mode in ['coredns', 'coredns_dual']
- name: add global searchdomains
- name: Add global searchdomains
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains | default([]) }}"
- name: check system nameservers
- name: Check system nameservers
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
args:
executable: /bin/bash
@@ -25,7 +25,7 @@
register: system_nameservers
check_mode: no
- name: check system search domains
- name: Check system search domains
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# Therefore -o pipefail is not applicable in this specific instance
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
@@ -35,32 +35,32 @@
register: system_search_domains
check_mode: no
- name: add system nameservers to docker options
- name: Add system nameservers to docker options
set_fact:
docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}"
when: system_nameservers.stdout
- name: add system search domains to docker options
- name: Add system search domains to docker options
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split() | default([])) | unique }}"
when: system_search_domains.stdout
- name: check number of nameservers
- name: Check number of nameservers
fail:
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3."
when: docker_dns_servers | length > 3 and docker_dns_servers_strict | bool
- name: rtrim number of nameservers to 3
- name: Rtrim number of nameservers to 3
set_fact:
docker_dns_servers: "{{ docker_dns_servers[0:3] }}"
when: docker_dns_servers | length > 3 and not docker_dns_servers_strict | bool
- name: check number of search domains
- name: Check number of search domains
fail:
msg: "Too many search domains"
when: docker_dns_search_domains | length > 6
- name: check length of search domains
- name: Check length of search domains
fail:
msg: "Search domains exceeded limit of 256 characters"
when: docker_dns_search_domains | join(' ') | length > 256

View File

@@ -10,10 +10,10 @@
src: http-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
mode: 0644
notify: restart docker
notify: Restart docker
when: http_proxy is defined or https_proxy is defined
- name: get systemd version
- name: Get systemd version
# noqa command-instead-of-module - systemctl is called intentionally here
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
args:
@@ -29,7 +29,7 @@
dest: /etc/systemd/system/docker.service
mode: 0644
register: docker_service_file
notify: restart docker
notify: Restart docker
when:
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- not is_fedora_coreos
@@ -39,14 +39,14 @@
src: docker-options.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
mode: 0644
notify: restart docker
notify: Restart docker
- name: Write docker dns systemd drop-in
template:
src: docker-dns.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
mode: 0644
notify: restart docker
notify: Restart docker
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
- name: Copy docker orphan clean up script to the node
@@ -61,7 +61,7 @@
src: docker-orphan-cleanup.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
mode: 0644
notify: restart docker
notify: Restart docker
when: docker_orphan_clean_up | bool
- name: Flush handlers

View File

@@ -8,7 +8,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -1,15 +1,15 @@
---
- name: gVisor | Download runsc binary
- name: GVisor | Download runsc binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}"
- name: gVisor | Download containerd-shim-runsc-v1 binary
- name: GVisor | Download containerd-shim-runsc-v1 binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}"
- name: gVisor | Copy binaries
- name: GVisor | Copy binaries
copy:
src: "{{ item.src }}"
dest: "{{ bin_dir }}/{{ item.dest }}"

View File

@@ -8,7 +8,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -1,23 +1,23 @@
---
- name: kata-containers | Download kata binary
- name: Kata-containers | Download kata binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.kata_containers) }}"
- name: kata-containers | Copy kata-containers binary
- name: Kata-containers | Copy kata-containers binary
unarchive:
src: "{{ downloads.kata_containers.dest }}"
dest: "/"
mode: 0755
remote_src: yes
- name: kata-containers | Create config directory
- name: Kata-containers | Create config directory
file:
path: "{{ kata_containers_config_dir }}"
state: directory
mode: 0755
- name: kata-containers | Set configuration
- name: Kata-containers | Set configuration
template:
src: "{{ item }}.j2"
dest: "{{ kata_containers_config_dir }}/{{ item }}"
@@ -25,7 +25,7 @@
with_items:
- configuration-qemu.toml
- name: kata-containers | Set containerd bin
- name: Kata-containers | Set containerd bin
vars:
shim: "{{ item }}"
template:
@@ -35,7 +35,7 @@
with_items:
- qemu
- name: kata-containers | Load vhost kernel modules
- name: Kata-containers | Load vhost kernel modules
community.general.modprobe:
state: present
name: "{{ item }}"
@@ -43,7 +43,7 @@
- vhost_vsock
- vhost_net
- name: kata-containers | Persist vhost kernel modules
- name: Kata-containers | Persist vhost kernel modules
copy:
dest: /etc/modules-load.d/kubespray-kata-containers.conf
mode: 0644

View File

@@ -1,10 +1,10 @@
---
- name: nerdctl | Download nerdctl
- name: Nerdctl | Download nerdctl
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.nerdctl) }}"
- name: nerdctl | Copy nerdctl binary from download dir
- name: Nerdctl | Copy nerdctl binary from download dir
copy:
src: "{{ local_release_dir }}/nerdctl"
dest: "{{ bin_dir }}/nerdctl"
@@ -17,7 +17,7 @@
- Get nerdctl completion
- Install nerdctl completion
- name: nerdctl | Create configuration dir
- name: Nerdctl | Create configuration dir
file:
path: /etc/nerdctl
state: directory
@@ -26,7 +26,7 @@
group: root
become: true
- name: nerdctl | Install nerdctl configuration
- name: Nerdctl | Install nerdctl configuration
template:
src: nerdctl.toml.j2
dest: /etc/nerdctl/nerdctl.toml

View File

@@ -1,5 +1,5 @@
---
- name: runc | check if fedora coreos
- name: Runc | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,18 +7,18 @@
get_mime: no
register: ostree
- name: runc | set is_ostree
- name: Runc | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: runc | Uninstall runc package managed by package manager
- name: Runc | Uninstall runc package managed by package manager
package:
name: "{{ runc_package_name }}"
state: absent
when:
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
- name: runc | Download runc binary
- name: Runc | Download runc binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.runc) }}"
@@ -30,7 +30,7 @@
mode: 0755
remote_src: true
- name: runc | Remove orphaned binary
- name: Runc | Remove orphaned binary
file:
path: /usr/bin/runc
state: absent

View File

@@ -1,5 +1,5 @@
---
- name: skopeo | check if fedora coreos
- name: Skopeo | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -7,11 +7,11 @@
get_mime: no
register: ostree
- name: skopeo | set is_ostree
- name: Skopeo | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: skopeo | Uninstall skopeo package managed by package manager
- name: Skopeo | Uninstall skopeo package managed by package manager
package:
name: skopeo
state: absent
@@ -19,7 +19,7 @@
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
ignore_errors: true # noqa ignore-errors
- name: skopeo | Download skopeo binary
- name: Skopeo | Download skopeo binary
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.skopeo) }}"

View File

@@ -1,5 +1,5 @@
---
- name: validate-container-engine | check if fedora coreos
- name: Validate-container-engine | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
@@ -9,7 +9,7 @@
tags:
- facts
- name: validate-container-engine | set is_ostree
- name: Validate-container-engine | set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
tags:

View File

@@ -8,7 +8,8 @@
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
- name: Download CNI
include_tasks: "../../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.cni) }}"

View File

@@ -1,10 +1,10 @@
---
- name: youki | Download youki
- name: Youki | Download youki
include_tasks: "../../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.youki) }}"
- name: youki | Copy youki binary from download dir
- name: Youki | Copy youki binary from download dir
copy:
src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki"
dest: "{{ youki_bin_dir }}/youki"

View File

@@ -1,20 +1,20 @@
---
# The image_info_command depends on the Container Runtime and will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
- name: Check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false
check_mode: no
when: not download_always_pull
- name: check_pull_required | Set pull_required if the desired image is not yet loaded
- name: Check_pull_required | Set pull_required if the desired image is not yet loaded
set_fact:
pull_required: >-
{%- if image_reponame | regex_replace('^docker\.io/(library/)?', '') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
when: not download_always_pull
- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag
- name: Check_pull_required | Check that the local digest sha256 corresponds to the given image tag
assert:
that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
when:

View File

@@ -1,6 +1,6 @@
---
- block:
- name: set default values for flag variables
- name: Set default values for flag variables
set_fact:
image_is_cached: false
image_changed: false
@@ -8,12 +8,12 @@
tags:
- facts
- name: download_container | Set a few facts
- name: Download_container | Set a few facts
import_tasks: set_container_facts.yml
tags:
- facts
- name: download_container | Prepare container download
- name: Download_container | Prepare container download
include_tasks: check_pull_required.yml
when:
- not download_always_pull
@@ -21,7 +21,7 @@
- debug: # noqa name[missing]
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
- name: download_container | Determine if image is in cache
- name: Download_container | Determine if image is in cache
stat:
path: "{{ image_path_cached }}"
get_attributes: no
@@ -36,7 +36,7 @@
when:
- download_force_cache
- name: download_container | Set fact indicating if image is in cache
- name: Download_container | Set fact indicating if image is in cache
set_fact:
image_is_cached: "{{ cache_image.stat.exists }}"
tags:
@@ -52,7 +52,7 @@
- download_force_cache
- not download_run_once
- name: download_container | Download image if required
- name: Download_container | Download image if required
command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
delegate_facts: yes
@@ -67,7 +67,7 @@
- pull_required or download_run_once
- not image_is_cached
- name: download_container | Save and compress image
- name: Download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
@@ -79,7 +79,7 @@
- not image_is_cached
- download_run_once
- name: download_container | Copy image to ansible host cache
- name: Download_container | Copy image to ansible host cache
ansible.posix.synchronize:
src: "{{ image_path_final }}"
dest: "{{ image_path_cached }}"
@@ -91,7 +91,7 @@
- not download_localhost
- download_delegate == inventory_hostname
- name: download_container | Upload image to node if it is cached
- name: Download_container | Upload image to node if it is cached
ansible.posix.synchronize:
src: "{{ image_path_cached }}"
dest: "{{ image_path_final }}"
@@ -107,7 +107,7 @@
- pull_required
- download_force_cache
- name: download_container | Load image into the local container registry
- name: Download_container | Load image into the local container registry
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
register: container_load_status
failed_when: container_load_status is failed
@@ -115,7 +115,7 @@
- pull_required
- download_force_cache
- name: download_container | Remove container image from cache
- name: Download_container | Remove container image from cache
file:
state: absent
path: "{{ image_path_final }}"

View File

@@ -1,21 +1,22 @@
---
- block:
- name: prep_download | Set a few facts
- name: "Download_file | download {{ download.dest }}"
block:
- name: Prep_download | Set a few facts
set_fact:
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
- name: download_file | Starting download of file
- name: Download_file | Starting download of file
debug:
msg: "{{ download.url }}"
run_once: "{{ download_run_once }}"
- name: download_file | Set pathname of cached file
- name: Download_file | Set pathname of cached file
set_fact:
file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}"
tags:
- facts
- name: download_file | Create dest directory on node
- name: Download_file | Create dest directory on node
file:
path: "{{ download.dest | dirname }}"
owner: "{{ download.owner | default(omit) }}"
@@ -23,7 +24,7 @@
state: directory
recurse: yes
- name: download_file | Create local cache directory
- name: Download_file | Create local cache directory
file:
path: "{{ file_path_cached | dirname }}"
state: directory
@@ -38,7 +39,7 @@
tags:
- localhost
- name: download_file | Create cache directory on download_delegate host
- name: Download_file | Create cache directory on download_delegate host
file:
path: "{{ file_path_cached | dirname }}"
state: directory
@@ -52,7 +53,7 @@
# We check a number of mirrors that may hold the file and pick a working one at random
# This task will avoid logging it's parameters to not leak environment passwords in the log
- name: download_file | Validate mirrors
- name: Download_file | Validate mirrors
uri:
url: "{{ mirror }}"
method: HEAD
@@ -75,14 +76,14 @@
ignore_errors: true
# Ansible 2.9 requires we convert a generator to a list
- name: download_file | Get the list of working mirrors
- name: Download_file | Get the list of working mirrors
set_fact:
valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}"
delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}"
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
# This task will avoid logging it's parameters to not leak environment passwords in the log
- name: download_file | Download item
- name: Download_file | Download item
get_url:
url: "{{ valid_mirror_urls | random }}"
dest: "{{ file_path_cached if download_force_cache else download.dest }}"
@@ -104,7 +105,7 @@
environment: "{{ proxy_env }}"
no_log: "{{ not (unsafe_show_logs | bool) }}"
- name: download_file | Copy file back to ansible host file cache
- name: Download_file | Copy file back to ansible host file cache
ansible.posix.synchronize:
src: "{{ file_path_cached }}"
dest: "{{ file_path_cached }}"
@@ -115,7 +116,7 @@
- not download_localhost
- download_delegate == inventory_hostname
- name: download_file | Copy file from cache to nodes, if it is available
- name: Download_file | Copy file from cache to nodes, if it is available
ansible.posix.synchronize:
src: "{{ file_path_cached }}"
dest: "{{ download.dest }}"
@@ -128,7 +129,7 @@
when:
- download_force_cache
- name: download_file | Set mode and owner
- name: Download_file | Set mode and owner
file:
path: "{{ download.dest }}"
mode: "{{ download.mode | default(omit) }}"
@@ -136,7 +137,7 @@
when:
- download_force_cache
- name: "download_file | Extract file archives"
- name: "Download_file | Extract file archives"
include_tasks: "extract_file.yml"
tags:

View File

@@ -1,5 +1,5 @@
---
- name: extract_file | Unpacking archive
- name: Extract_file | Unpacking archive
unarchive:
src: "{{ download.dest }}"
dest: "{{ download.dest | dirname }}"

View File

@@ -1,5 +1,5 @@
---
- name: download | Prepare working directories and variables
- name: Download | Prepare working directories and variables
import_tasks: prep_download.yml
when:
- not skip_downloads | default(false)
@@ -7,7 +7,7 @@
- download
- upload
- name: download | Get kubeadm binary and list of required images
- name: Download | Get kubeadm binary and list of required images
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads | default(false)
@@ -16,7 +16,7 @@
- download
- upload
- name: download | Download files / images
- name: Download | Download files / images
include_tasks: "{{ include_file }}"
loop: "{{ downloads | combine(kubeadm_images) | dict2items }}"
vars:

View File

@@ -1,11 +1,11 @@
---
- name: prep_download | Set a few facts
- name: Prep_download | Set a few facts
set_fact:
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
tags:
- facts
- name: prep_download | On localhost, check if passwordless root is possible
- name: Prep_download | On localhost, check if passwordless root is possible
command: "true"
delegate_to: localhost
connection: local
@@ -20,7 +20,7 @@
- localhost
- asserts
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
- name: Prep_download | On localhost, check if user has access to the container runtime without using sudo
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost
connection: local
@@ -35,7 +35,7 @@
- localhost
- asserts
- name: prep_download | Parse the outputs of the previous commands
- name: Prep_download | Parse the outputs of the previous commands
set_fact:
user_in_docker_group: "{{ not test_docker.failed }}"
user_can_become_root: "{{ not test_become.failed }}"
@@ -45,7 +45,7 @@
- localhost
- asserts
- name: prep_download | Check that local user is in group or can become root
- name: Prep_download | Check that local user is in group or can become root
assert:
that: "user_in_docker_group or user_can_become_root"
msg: >-
@@ -56,7 +56,7 @@
- localhost
- asserts
- name: prep_download | Register docker images info
- name: Prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
no_log: "{{ not (unsafe_show_logs | bool) }}"
register: docker_images
@@ -65,7 +65,7 @@
check_mode: no
when: download_container
- name: prep_download | Create staging directory on remote node
- name: Prep_download | Create staging directory on remote node
file:
path: "{{ local_release_dir }}/images"
state: directory
@@ -75,7 +75,7 @@
when:
- ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: prep_download | Create local cache for files and images on control node
- name: Prep_download | Create local cache for files and images on control node
file:
path: "{{ download_cache_dir }}/images"
state: directory

View File

@@ -1,12 +1,12 @@
---
- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version
- name: Prep_kubeadm_images | Check kubeadm version matches kubernetes version
fail:
msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}"
when:
- not skip_downloads | default(false)
- not kubeadm_version == downloads.kubeadm.version
- name: prep_kubeadm_images | Download kubeadm binary
- name: Prep_kubeadm_images | Download kubeadm binary
include_tasks: "download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.kubeadm) }}"
@@ -14,7 +14,7 @@
- not skip_downloads | default(false)
- downloads.kubeadm.enabled
- name: prep_kubeadm_images | Create kubeadm config
- name: Prep_kubeadm_images | Create kubeadm config
template:
src: "kubeadm-images.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
@@ -22,21 +22,21 @@
when:
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path
- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
copy:
src: "{{ downloads.kubeadm.dest }}"
dest: "{{ bin_dir }}/kubeadm"
mode: 0755
remote_src: true
- name: prep_kubeadm_images | Set kubeadm binary permissions
- name: Prep_kubeadm_images | Set kubeadm binary permissions
file:
path: "{{ bin_dir }}/kubeadm"
mode: "0755"
state: file
- name: prep_kubeadm_images | Generate list of required images
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns | pause'"
- name: Prep_kubeadm_images | Generate list of required images
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
args:
executable: /bin/bash
register: kubeadm_images_raw
@@ -45,7 +45,7 @@
when:
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Parse list of images
- name: Prep_kubeadm_images | Parse list of images
vars:
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
set_fact:
@@ -63,7 +63,7 @@
when:
- not skip_kubeadm_images | default(false)
- name: prep_kubeadm_images | Convert list of images to dict for later use
- name: Prep_kubeadm_images | Convert list of images to dict for later use
set_fact:
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
run_once: true

View File

@@ -1,22 +1,22 @@
---
- name: set_container_facts | Display the name of the image being processed
- name: Set_container_facts | Display the name of the image being processed
debug:
msg: "{{ download.repo }}"
- name: set_container_facts | Set if containers should be pulled by digest
- name: Set_container_facts | Set if containers should be pulled by digest
set_fact:
pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}"
- name: set_container_facts | Define by what name to pull the image
- name: Set_container_facts | Define by what name to pull the image
set_fact:
image_reponame: >-
{%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
- name: set_container_facts | Define file name of image
- name: Set_container_facts | Define file name of image
set_fact:
image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar"
- name: set_container_facts | Define path of image
- name: Set_container_facts | Define path of image
set_fact:
image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}"
image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}"

View File

@@ -1,39 +1,40 @@
---
- name: restart etcd
- name: Restart etcd
command: /bin/true
notify:
- Backup etcd data
- etcd | reload systemd
- reload etcd
- wait for etcd up
- Etcd | reload systemd
- Reload etcd
- Wait for etcd up
- Cleanup etcd backups
- name: restart etcd-events
- name: Restart etcd-events
command: /bin/true
notify:
- etcd | reload systemd
- reload etcd-events
- wait for etcd-events up
- Etcd | reload systemd
- Reload etcd-events
- Wait for etcd-events up
- import_tasks: backup.yml
- name: Backup etcd
import_tasks: backup.yml
- name: etcd | reload systemd
- name: Etcd | reload systemd
systemd:
daemon_reload: true
- name: reload etcd
- name: Reload etcd
service:
name: etcd
state: restarted
when: is_etcd_master
- name: reload etcd-events
- name: Reload etcd-events
service:
name: etcd-events
state: restarted
when: is_etcd_master
- name: wait for etcd up
- name: Wait for etcd up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: no
@@ -44,9 +45,10 @@
retries: 60
delay: 1
- import_tasks: backup_cleanup.yml
- name: Cleanup etcd backups
import_tasks: backup_cleanup.yml
- name: wait for etcd-events up
- name: Wait for etcd-events up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
validate_certs: no
@@ -57,6 +59,6 @@
retries: 60
delay: 1
- name: set etcd_secret_changed
- name: Set etcd_secret_changed
set_fact:
etcd_secret_changed: true

View File

@@ -41,7 +41,8 @@
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
- include_tasks: refresh_config.yml
- name: Configure | Refresh etcd config
include_tasks: refresh_config.yml
when: is_etcd_master
- name: Configure | Copy etcd.service systemd file

View File

@@ -56,7 +56,7 @@
run_once: yes
delegate_to: "{{ groups['etcd'][0] }}"
when: gen_certs | default(false)
notify: set etcd_secret_changed
notify: Set etcd_secret_changed
- name: Gen_certs | run cert generation script for all clients
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
@@ -73,7 +73,7 @@
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- gen_certs | default(false)
notify: set etcd_secret_changed
notify: Set etcd_secret_changed
- name: Gen_certs | Gather etcd member/admin and kube_control_plane client certs from first etcd node
slurp:
@@ -97,7 +97,7 @@
- inventory_hostname in groups['etcd']
- sync_certs | default(false)
- inventory_hostname != groups['etcd'][0]
notify: set etcd_secret_changed
notify: Set etcd_secret_changed
- name: Gen_certs | Write etcd member/admin and kube_control_plane client certs to other etcd nodes
copy:
@@ -129,7 +129,7 @@
- inventory_hostname != groups['etcd'][0]
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
notify: set etcd_secret_changed
notify: Set etcd_secret_changed
- name: Gen_certs | Write node certs to other etcd nodes
copy:
@@ -147,12 +147,14 @@
loop_control:
label: "{{ item.item }}"
- include_tasks: gen_nodes_certs_script.yml
- name: Gen_certs | Generate etcd certs
include_tasks: gen_nodes_certs_script.yml
when:
- inventory_hostname in groups['kube_control_plane'] and
sync_certs | default(false) and inventory_hostname not in groups['etcd']
- include_tasks: gen_nodes_certs_script.yml
- name: Gen_certs | Generate etcd certs on nodes if needed
include_tasks: gen_nodes_certs_script.yml
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"

View File

@@ -1,5 +1,7 @@
---
- import_tasks: install_etcdctl_docker.yml
- name: Install etcdctl from docker
import_tasks: install_etcdctl_docker.yml
when: etcd_cluster_setup
- name: Get currently-deployed etcd version
@@ -14,14 +16,14 @@
- name: Restart etcd if necessary
command: /bin/true
notify: restart etcd
notify: Restart etcd
when:
- etcd_cluster_setup
- etcd_image_tag not in etcd_current_docker_image.stdout | default('')
- name: Restart etcd-events if necessary
command: /bin/true
notify: restart etcd-events
notify: Restart etcd-events
when:
- etcd_events_cluster_setup
- etcd_image_tag not in etcd_events_current_docker_image.stdout | default('')

View File

@@ -8,19 +8,19 @@
- name: Restart etcd if necessary
command: /bin/true
notify: restart etcd
notify: Restart etcd
when:
- etcd_cluster_setup
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
- name: Restart etcd-events if necessary
command: /bin/true
notify: restart etcd-events
notify: Restart etcd-events
when:
- etcd_events_cluster_setup
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
- name: install | Download etcd and etcdctl
- name: Install | Download etcd and etcdctl
include_tasks: "../../download/tasks/download_file.yml"
vars:
download: "{{ download_defaults | combine(downloads.etcd) }}"
@@ -29,7 +29,7 @@
- never
- etcd
- name: install | Copy etcd and etcdctl binary from download dir
- name: Install | Copy etcd and etcdctl binary from download dir
copy:
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"

View File

@@ -12,7 +12,8 @@
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
- include_tasks: refresh_config.yml
- name: Join Member | Refresh etcd config
include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_events_peer_addresses: >-

View File

@@ -13,7 +13,8 @@
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
- include_tasks: refresh_config.yml
- name: Join Member | Refresh etcd config
include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_peer_addresses: >-

View File

@@ -1,23 +1,27 @@
---
- include_tasks: check_certs.yml
- name: Check etcd certs
include_tasks: check_certs.yml
when: cert_management == "script"
tags:
- etcd-secrets
- facts
- include_tasks: "gen_certs_script.yml"
- name: Generate etcd certs
include_tasks: "gen_certs_script.yml"
when:
- cert_management | d('script') == "script"
tags:
- etcd-secrets
- include_tasks: upd_ca_trust.yml
- name: Trust etcd CA
include_tasks: upd_ca_trust.yml
when:
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
tags:
- etcd-secrets
- include_tasks: upd_ca_trust.yml
- name: Trust etcd CA on nodes if needed
include_tasks: upd_ca_trust.yml
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
@@ -49,29 +53,33 @@
- master
- network
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
- name: Install etcd
include_tasks: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
tags:
- upgrade
- include_tasks: configure.yml
- name: Configure etcd
include_tasks: configure.yml
when: is_etcd_master
- include_tasks: refresh_config.yml
- name: Refresh etcd config
include_tasks: refresh_config.yml
when: is_etcd_master
- name: Restart etcd if certs changed
command: /bin/true
notify: restart etcd
notify: Restart etcd
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
- name: Restart etcd-events if certs changed
command: /bin/true
notify: restart etcd
notify: Restart etcd
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
# After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing`
# state instead of `new`.
- include_tasks: refresh_config.yml
- name: Refresh etcd config again for idempotency
include_tasks: refresh_config.yml
when: is_etcd_master

View File

@@ -4,7 +4,7 @@
src: etcd.env.j2
dest: /etc/etcd.env
mode: 0640
notify: restart etcd
notify: Restart etcd
when: is_etcd_master and etcd_cluster_setup
- name: Refresh config | Create etcd-events config file
@@ -12,5 +12,5 @@
src: etcd-events.env.j2
dest: /etc/etcd-events.env
mode: 0640
notify: restart etcd-events
notify: Restart etcd-events
when: is_etcd_master and etcd_events_cluster_setup

View File

@@ -14,7 +14,8 @@
get_mime: no
register: stat_etcdctl
- block:
- name: Remove old etcd binary
block:
- name: Check version
command: "{{ bin_dir }}/etcdctl version"
register: etcdctl_version
@@ -36,7 +37,8 @@
get_mime: no
register: stat_etcdctl
- block:
- name: Copy etcdctl script to host
block:
- name: Copy etcdctl script to host
shell: "{{ docker_bin_dir }}/docker cp \"$({{ docker_bin_dir }}/docker ps -qf ancestor={{ etcd_image_repo }}:{{ etcd_image_tag }})\":/usr/local/bin/etcdctl {{ etcd_data_dir }}/etcdctl"
when: container_manager == "docker"

View File

@@ -1,6 +1,7 @@
---
- import_tasks: credentials-check.yml
- name: OCI Cloud Controller | Check Oracle Cloud credentials
import_tasks: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:

View File

@@ -59,7 +59,8 @@
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- include_tasks: oci.yml
- name: Configure Oracle Cloud provider
include_tasks: oci.yml
tags: oci
when:
- cloud_provider is defined

View File

@@ -1,6 +1,6 @@
---
- name: crun | Copy runtime class manifest
- name: Crun | Copy runtime class manifest
template:
src: runtimeclass-crun.yml
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
@@ -8,7 +8,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: crun | Apply manifests
- name: Crun | Apply manifests
kube:
name: "runtimeclass-crun"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,5 +1,5 @@
---
- name: gVisor | Create addon dir
- name: GVisor | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/gvisor"
owner: root
@@ -7,12 +7,12 @@
mode: 0755
recurse: true
- name: gVisor | Templates List
- name: GVisor | Templates List
set_fact:
gvisor_templates:
- { name: runtimeclass-gvisor, file: runtimeclass-gvisor.yml, type: runtimeclass }
- name: gVisort | Create manifests
- name: GVisort | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/gvisor/{{ item.file }}"
@@ -22,7 +22,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: gVisor | Apply manifests
- name: GVisor | Apply manifests
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,6 +1,6 @@
---
- name: youki | Copy runtime class manifest
- name: Youki | Copy runtime class manifest
template:
src: runtimeclass-youki.yml
dest: "{{ kube_config_dir }}/runtimeclass-youki.yml"
@@ -8,7 +8,7 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: youki | Apply manifests
- name: Youki | Apply manifests
kube:
name: "runtimeclass-youki"
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -1,5 +1,6 @@
---
- include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Check Azure credentials
include_tasks: azure-credential-check.yml
- name: Azure CSI Driver | Write Azure CSI cloud-config
template:

View File

@@ -1,5 +1,6 @@
---
- include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Check Cinder credentials
include_tasks: cinder-credential-check.yml
- name: Cinder CSI Driver | Write cacert file
include_tasks: cinder-write-cacert.yml

View File

@@ -1,7 +1,8 @@
---
- include_tasks: vsphere-credentials-check.yml
- name: VSphere CSI Driver | Check vsphare credentials
include_tasks: vsphere-credentials-check.yml
- name: vSphere CSI Driver | Generate CSI cloud-config
- name: VSphere CSI Driver | Generate CSI cloud-config
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
@@ -10,7 +11,7 @@
- vsphere-csi-cloud-config
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Generate Manifests
- name: VSphere CSI Driver | Generate Manifests
template:
src: "{{ item }}.j2"
dest: "{{ kube_config_dir }}/{{ item }}"
@@ -27,7 +28,7 @@
register: vsphere_csi_manifests
when: inventory_hostname == groups['kube_control_plane'][0]
- name: vSphere CSI Driver | Apply Manifests
- name: VSphere CSI Driver | Apply Manifests
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item }}"
@@ -40,13 +41,13 @@
loop_control:
label: "{{ item.item }}"
- name: vSphere CSI Driver | Generate a CSI secret manifest
- name: VSphere CSI Driver | Generate a CSI secret manifest
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: "{{ not (unsafe_show_logs | bool) }}"
- name: vSphere CSI Driver | Apply a CSI secret manifest
- name: VSphere CSI Driver | Apply a CSI secret manifest
command:
cmd: "{{ kubectl }} apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"

View File

@@ -1,5 +1,6 @@
---
- include_tasks: openstack-credential-check.yml
- name: External OpenStack Cloud Controller | Check OpenStack credentials
include_tasks: openstack-credential-check.yml
tags: external-openstack
- name: External OpenStack Cloud Controller | Get base64 cacert

View File

@@ -1,5 +1,6 @@
---
- include_tasks: vsphere-credentials-check.yml
- name: External vSphere Cloud Controller | Check vsphere credentials
include_tasks: vsphere-credentials-check.yml
- name: External vSphere Cloud Controller | Generate CPI cloud-config
template:

View File

@@ -1,6 +1,6 @@
---
- name: kube-router | Start Resources
- name: Kube-router | Start Resources
kube:
name: "kube-router"
kubectl: "{{ bin_dir }}/kubectl"
@@ -11,7 +11,7 @@
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
- name: Kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1

View File

@@ -1,5 +1,5 @@
---
- name: check if snapshot namespace exists
- name: Check if snapshot namespace exists
register: snapshot_namespace_exists
kube:
kubectl: "{{ bin_dir }}/kubectl"

View File

@@ -100,7 +100,7 @@
run_once: yes
when: kubectl_localhost
- name: create helper script kubectl.sh on ansible host
- name: Create helper script kubectl.sh on ansible host
copy:
content: |
#!/bin/bash

View File

@@ -47,7 +47,7 @@
timeout: 180
- name: check already run
- name: Check already run
debug:
msg: "{{ kubeadm_already_run.stat.exists }}"

View File

@@ -10,7 +10,7 @@
- kube_oidc_auth
- kube_oidc_ca_cert is defined
- name: kubeadm | Check if kubeadm has already run
- name: Kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
get_attributes: no
@@ -18,12 +18,12 @@
get_mime: no
register: kubeadm_already_run
- name: kubeadm | Backup kubeadm certs / kubeconfig
- name: Kubeadm | Backup kubeadm certs / kubeconfig
import_tasks: kubeadm-backup.yml
when:
- kubeadm_already_run.stat.exists
- name: kubeadm | aggregate all SANs
- name: Kubeadm | aggregate all SANs
set_fact:
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
vars:
@@ -69,7 +69,7 @@
when: kubernetes_audit_webhook | default(false)
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
- name: set kubeadm_config_api_fqdn define
- name: Set kubeadm_config_api_fqdn define
set_fact:
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
when: loadbalancer_apiserver is defined
@@ -78,27 +78,27 @@
set_fact:
kubeadmConfig_api_version: v1beta3
- name: kubeadm | Create kubeadm config
- name: Kubeadm | Create kubeadm config
template:
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
mode: 0640
- name: kubeadm | Create directory to store admission control configurations
- name: Kubeadm | Create directory to store admission control configurations
file:
path: "{{ kube_config_dir }}/admission-controls"
state: directory
mode: 0640
when: kube_apiserver_admission_control_config_file
- name: kubeadm | Push admission control config file
- name: Kubeadm | Push admission control config file
template:
src: "admission-controls.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml"
mode: 0640
when: kube_apiserver_admission_control_config_file
- name: kubeadm | Push admission control config files
- name: Kubeadm | Push admission control config files
template:
src: "{{ item | lower }}.yaml.j2"
dest: "{{ kube_config_dir }}/admission-controls/{{ item | lower }}.yaml"
@@ -108,15 +108,15 @@
- item in kube_apiserver_admission_plugins_needs_configuration
loop: "{{ kube_apiserver_enable_admission_plugins }}"
- name: kubeadm | Check apiserver.crt SANs
- name: Kubeadm | Check apiserver.crt SANs
block:
- name: kubeadm | Check apiserver.crt SAN IPs
- name: Kubeadm | Check apiserver.crt SAN IPs
command:
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkip {{ item }}"
loop: "{{ apiserver_ips }}"
register: apiserver_sans_ip_check
changed_when: apiserver_sans_ip_check.stdout is not search('does match certificate')
- name: kubeadm | Check apiserver.crt SAN hosts
- name: Kubeadm | Check apiserver.crt SAN hosts
command:
cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkhost {{ item }}"
loop: "{{ apiserver_hosts }}"
@@ -129,7 +129,7 @@
- kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: kubeadm | regenerate apiserver cert 1/2
- name: Kubeadm | regenerate apiserver cert 1/2
file:
state: absent
path: "{{ kube_cert_dir }}/{{ item }}"
@@ -141,7 +141,7 @@
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
- not kube_external_ca_mode
- name: kubeadm | regenerate apiserver cert 2/2
- name: Kubeadm | regenerate apiserver cert 2/2
command: >-
{{ bin_dir }}/kubeadm
init phase certs apiserver
@@ -151,14 +151,14 @@
- apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed
- not kube_external_ca_mode
- name: kubeadm | Create directory to store kubeadm patches
- name: Kubeadm | Create directory to store kubeadm patches
file:
path: "{{ kubeadm_patches.dest_dir }}"
state: directory
mode: 0640
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: kubeadm | Copy kubeadm patches from inventory files
- name: Kubeadm | Copy kubeadm patches from inventory files
copy:
src: "{{ kubeadm_patches.source_dir }}/"
dest: "{{ kubeadm_patches.dest_dir }}"
@@ -166,7 +166,7 @@
mode: 0644
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: kubeadm | Initialize first master
- name: Kubeadm | Initialize first master
command: >-
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
{{ bin_dir }}/kubeadm init
@@ -184,7 +184,7 @@
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: set kubeadm certificate key
- name: Set kubeadm certificate key
set_fact:
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)', '\\1') | first }}"
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
@@ -229,17 +229,17 @@
- podsecuritypolicy_enabled
- inventory_hostname == first_kube_control_plane
- name: kubeadm | Join other masters
- name: Kubeadm | Join other masters
include_tasks: kubeadm-secondary.yml
- name: kubeadm | upgrade kubernetes cluster
- name: Kubeadm | upgrade kubernetes cluster
include_tasks: kubeadm-upgrade.yml
when:
- upgrade_cluster_setup
- kubeadm_already_run.stat.exists
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role
- name: Kubeadm | Remove taint for master with node role
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ first_kube_control_plane }}"
with_items:

View File

@@ -1,5 +1,5 @@
---
- name: kubeadm | Check api is up
- name: Kubeadm | Check api is up
uri:
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
@@ -9,7 +9,7 @@
delay: 5
until: _result.status == 200
- name: kubeadm | Upgrade first master
- name: Kubeadm | Upgrade first master
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@@ -31,7 +31,7 @@
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: kubeadm | Upgrade other masters
- name: Kubeadm | Upgrade other masters
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@@ -53,7 +53,7 @@
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: kubeadm | clean kubectl cache to refresh api types
- name: Kubeadm | clean kubectl cache to refresh api types
file:
path: "{{ item }}"
state: absent
@@ -62,7 +62,7 @@
- /root/.kube/http-cache
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
- name: Kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
command: >-
{{ kubectl }}
-n kube-system

View File

@@ -1,5 +1,6 @@
---
- import_tasks: pre-upgrade.yml
- name: Pre-upgrade control plane
import_tasks: pre-upgrade.yml
tags:
- k8s-pre-upgrade
@@ -23,7 +24,8 @@
dest: "{{ kube_config_dir }}/kubescheduler-config.yaml"
mode: 0644
- import_tasks: encrypt-at-rest.yml
- name: Apply Kubernetes encrypt at rest config
import_tasks: encrypt-at-rest.yml
when:
- kube_encrypt_secret_data

View File

@@ -65,14 +65,14 @@
mode: 0640
when: not is_kube_master
- name: kubeadm | Create directory to store kubeadm patches
- name: Kubeadm | Create directory to store kubeadm patches
file:
path: "{{ kubeadm_patches.dest_dir }}"
state: directory
mode: 0640
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: kubeadm | Copy kubeadm patches from inventory files
- name: Kubeadm | Copy kubeadm patches from inventory files
copy:
src: "{{ kubeadm_patches.source_dir }}/"
dest: "{{ kubeadm_patches.dest_dir }}"

View File

@@ -1,82 +1,82 @@
---
- name: check azure_tenant_id value
- name: Check azure_tenant_id value
fail:
msg: "azure_tenant_id is missing"
when: azure_tenant_id is not defined or not azure_tenant_id
- name: check azure_subscription_id value
- name: Check azure_subscription_id value
fail:
msg: "azure_subscription_id is missing"
when: azure_subscription_id is not defined or not azure_subscription_id
- name: check azure_aad_client_id value
- name: Check azure_aad_client_id value
fail:
msg: "azure_aad_client_id is missing"
when: azure_aad_client_id is not defined or not azure_aad_client_id
- name: check azure_aad_client_secret value
- name: Check azure_aad_client_secret value
fail:
msg: "azure_aad_client_secret is missing"
when: azure_aad_client_secret is not defined or not azure_aad_client_secret
- name: check azure_resource_group value
- name: Check azure_resource_group value
fail:
msg: "azure_resource_group is missing"
when: azure_resource_group is not defined or not azure_resource_group
- name: check azure_location value
- name: Check azure_location value
fail:
msg: "azure_location is missing"
when: azure_location is not defined or not azure_location
- name: check azure_subnet_name value
- name: Check azure_subnet_name value
fail:
msg: "azure_subnet_name is missing"
when: azure_subnet_name is not defined or not azure_subnet_name
- name: check azure_security_group_name value
- name: Check azure_security_group_name value
fail:
msg: "azure_security_group_name is missing"
when: azure_security_group_name is not defined or not azure_security_group_name
- name: check azure_vnet_name value
- name: Check azure_vnet_name value
fail:
msg: "azure_vnet_name is missing"
when: azure_vnet_name is not defined or not azure_vnet_name
- name: check azure_vnet_resource_group value
- name: Check azure_vnet_resource_group value
fail:
msg: "azure_vnet_resource_group is missing"
when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group
- name: check azure_route_table_name value
- name: Check azure_route_table_name value
fail:
msg: "azure_route_table_name is missing"
when: azure_route_table_name is not defined or not azure_route_table_name
- name: check azure_loadbalancer_sku value
- name: Check azure_loadbalancer_sku value
fail:
msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
when: azure_loadbalancer_sku not in ["basic", "standard"]
- name: "check azure_exclude_master_from_standard_lb is a bool"
- name: "Check azure_exclude_master_from_standard_lb is a bool"
assert:
that: azure_exclude_master_from_standard_lb | type_debug == 'bool'
- name: "check azure_disable_outbound_snat is a bool"
- name: "Check azure_disable_outbound_snat is a bool"
assert:
that: azure_disable_outbound_snat | type_debug == 'bool'
- name: "check azure_use_instance_metadata is a bool"
- name: "Check azure_use_instance_metadata is a bool"
assert:
that: azure_use_instance_metadata | type_debug == 'bool'
- name: check azure_vmtype value
- name: Check azure_vmtype value
fail:
msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'"
when: azure_vmtype is not defined or not azure_vmtype
- name: check azure_cloud value
- name: Check azure_cloud value
fail:
msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'."
when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"]

View File

@@ -1,32 +1,32 @@
---
- name: check openstack_auth_url value
- name: Check openstack_auth_url value
fail:
msg: "openstack_auth_url is missing"
when: openstack_auth_url is not defined or not openstack_auth_url
- name: check openstack_username value
- name: Check openstack_username value
fail:
msg: "openstack_username is missing"
when: openstack_username is not defined or not openstack_username
- name: check openstack_password value
- name: Check openstack_password value
fail:
msg: "openstack_password is missing"
when: openstack_password is not defined or not openstack_password
- name: check openstack_region value
- name: Check openstack_region value
fail:
msg: "openstack_region is missing"
when: openstack_region is not defined or not openstack_region
- name: check openstack_tenant_id value
- name: Check openstack_tenant_id value
fail:
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
when:
- openstack_tenant_id is not defined or not openstack_tenant_id
- openstack_trust_id is not defined
- name: check openstack_trust_id value
- name: Check openstack_trust_id value
fail:
msg: "one of openstack_tenant_id or openstack_trust_id must be specified"
when:

View File

@@ -1,5 +1,5 @@
---
- name: check vsphere environment variables
- name: Check vsphere environment variables
fail:
msg: "{{ item.name }} is missing"
when: item.value is not defined or not item.value

View File

@@ -1,6 +1,7 @@
---
- block:
- name: look up docker cgroup driver
- name: Gather cgroups facts for docker
block:
- name: Look up docker cgroup driver
shell: "set -o pipefail && docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'"
args:
executable: /bin/bash
@@ -8,47 +9,48 @@
changed_when: false
check_mode: no
- name: set kubelet_cgroup_driver_detected fact for docker
- name: Set kubelet_cgroup_driver_detected fact for docker
set_fact:
kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}"
when: container_manager == 'docker'
- block:
- name: look up crio cgroup driver
- name: Gather cgroups facts for crio
block:
- name: Look up crio cgroup driver
shell: "set -o pipefail && {{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'"
args:
executable: /bin/bash
register: crio_cgroup_driver_result
changed_when: false
- name: set kubelet_cgroup_driver_detected fact for crio
- name: Set kubelet_cgroup_driver_detected fact for crio
set_fact:
kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}"
when: container_manager == 'crio'
- name: set kubelet_cgroup_driver_detected fact for containerd
- name: Set kubelet_cgroup_driver_detected fact for containerd
set_fact:
kubelet_cgroup_driver_detected: >-
{%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%}
when: container_manager == 'containerd'
- name: set kubelet_cgroup_driver
- name: Set kubelet_cgroup_driver
set_fact:
kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}"
when: kubelet_cgroup_driver is undefined
- name: set kubelet_cgroups options when cgroupfs is used
- name: Set kubelet_cgroups options when cgroupfs is used
set_fact:
kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}"
kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}"
when: kubelet_cgroup_driver == 'cgroupfs'
- name: set kubelet_config_extra_args options when cgroupfs is used
- name: Set kubelet_config_extra_args options when cgroupfs is used
set_fact:
kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}"
when: kubelet_cgroup_driver == 'cgroupfs'
- name: os specific vars
- name: Os specific vars
include_vars: "{{ item }}"
with_first_found:
- files:

View File

@@ -1,5 +1,5 @@
---
- name: install | Copy kubeadm binary from download dir
- name: Install | Copy kubeadm binary from download dir
copy:
src: "{{ downloads.kubeadm.dest }}"
dest: "{{ bin_dir }}/kubeadm"
@@ -10,7 +10,7 @@
when:
- not inventory_hostname in groups['kube_control_plane']
- name: install | Copy kubelet binary from download dir
- name: Install | Copy kubelet binary from download dir
copy:
src: "{{ downloads.kubelet.dest }}"
dest: "{{ bin_dir }}/kubelet"

View File

@@ -39,7 +39,7 @@
- kubelet
- kubeadm
- name: flush_handlers and reload-systemd
- name: Flush_handlers and reload-systemd
meta: flush_handlers
- name: Enable kubelet

View File

@@ -1,17 +1,17 @@
---
- name: haproxy | Cleanup potentially deployed nginx-proxy
- name: Haproxy | Cleanup potentially deployed nginx-proxy
file:
path: "{{ kube_manifest_dir }}/nginx-proxy.yml"
state: absent
- name: haproxy | Make haproxy directory
- name: Haproxy | Make haproxy directory
file:
path: "{{ haproxy_config_dir }}"
state: directory
mode: 0755
owner: root
- name: haproxy | Write haproxy configuration
- name: Haproxy | Write haproxy configuration
template:
src: "loadbalancer/haproxy.cfg.j2"
dest: "{{ haproxy_config_dir }}/haproxy.cfg"
@@ -19,7 +19,7 @@
mode: 0755
backup: yes
- name: haproxy | Get checksum from config
- name: Haproxy | Get checksum from config
stat:
path: "{{ haproxy_config_dir }}/haproxy.cfg"
get_attributes: no
@@ -27,7 +27,7 @@
get_mime: no
register: haproxy_stat
- name: haproxy | Write static pod
- name: Haproxy | Write static pod
template:
src: manifests/haproxy.manifest.j2
dest: "{{ kube_manifest_dir }}/haproxy.yml"

View File

@@ -1,12 +1,12 @@
---
- name: kube-vip | Check cluster settings for kube-vip
- name: Kube-vip | Check cluster settings for kube-vip
fail:
msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md"
when:
- kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp
- kube_vip_arp_enabled
- name: kube-vip | Write static pod
- name: Kube-vip | Write static pod
template:
src: manifests/kube-vip.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-vip.yml"

View File

@@ -1,17 +1,17 @@
---
- name: haproxy | Cleanup potentially deployed haproxy
- name: Haproxy | Cleanup potentially deployed haproxy
file:
path: "{{ kube_manifest_dir }}/haproxy.yml"
state: absent
- name: nginx-proxy | Make nginx directory
- name: Nginx-proxy | Make nginx directory
file:
path: "{{ nginx_config_dir }}"
state: directory
mode: 0700
owner: root
- name: nginx-proxy | Write nginx-proxy configuration
- name: Nginx-proxy | Write nginx-proxy configuration
template:
src: "loadbalancer/nginx.conf.j2"
dest: "{{ nginx_config_dir }}/nginx.conf"
@@ -19,7 +19,7 @@
mode: 0755
backup: yes
- name: nginx-proxy | Get checksum from config
- name: Nginx-proxy | Get checksum from config
stat:
path: "{{ nginx_config_dir }}/nginx.conf"
get_attributes: no
@@ -27,7 +27,7 @@
get_mime: no
register: nginx_stat
- name: nginx-proxy | Write static pod
- name: Nginx-proxy | Write static pod
template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"

View File

@@ -1,9 +1,11 @@
---
- import_tasks: facts.yml
- name: Fetch facts
import_tasks: facts.yml
tags:
- facts
- import_tasks: pre_upgrade.yml
- name: Pre-upgrade kubelet
import_tasks: pre_upgrade.yml
tags:
- kubelet
@@ -13,18 +15,21 @@
state: directory
mode: 0755
- import_tasks: install.yml
- name: Install kubelet binary
import_tasks: install.yml
tags:
- kubelet
- import_tasks: loadbalancer/kube-vip.yml
- name: Install kube-vip
import_tasks: loadbalancer/kube-vip.yml
when:
- is_kube_master
- kube_vip_enabled
tags:
- kube-vip
- import_tasks: loadbalancer/nginx-proxy.yml
- name: Install nginx-proxy
import_tasks: loadbalancer/nginx-proxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- loadbalancer_apiserver_localhost
@@ -32,7 +37,8 @@
tags:
- nginx
- import_tasks: loadbalancer/haproxy.yml
- name: Install haproxy
import_tasks: loadbalancer/haproxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- loadbalancer_apiserver_localhost
@@ -141,7 +147,8 @@
tags:
- kube-proxy
- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
- name: Check cloud provider credentials
include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
@@ -187,7 +194,8 @@
tags:
- cloud-provider
- import_tasks: kubelet.yml
- name: Install kubelet
import_tasks: kubelet.yml
tags:
- kubelet
- kubeadm

View File

@@ -9,7 +9,7 @@
- none
# kubelet fails even if ansible_swaptotal_mb = 0
- name: check swap
- name: Check swap
command: /sbin/swapon -s
register: swapon
changed_when: no

View File

@@ -21,7 +21,7 @@
tags:
- facts
- name: check if booted with ostree
- name: Check if booted with ostree
stat:
path: /run/ostree-booted
get_attributes: no
@@ -29,7 +29,7 @@
get_mime: no
register: ostree
- name: set is_fedora_coreos
- name: Set is_fedora_coreos
lineinfile:
path: /etc/os-release
line: "VARIANT_ID=coreos"
@@ -38,18 +38,18 @@
register: os_variant_coreos
changed_when: false
- name: set is_fedora_coreos
- name: Set is_fedora_coreos
set_fact:
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
- name: check resolvconf
- name: Check resolvconf
command: which resolvconf
register: resolvconf
failed_when: false
changed_when: false
check_mode: no
- name: check existence of /etc/resolvconf/resolv.conf.d
- name: Check existence of /etc/resolvconf/resolv.conf.d
stat:
path: /etc/resolvconf/resolv.conf.d
get_attributes: no
@@ -58,7 +58,7 @@
failed_when: false
register: resolvconfd_path
- name: check status of /etc/resolv.conf
- name: Check status of /etc/resolv.conf
stat:
path: /etc/resolv.conf
follow: no
@@ -68,14 +68,15 @@
failed_when: false
register: resolvconf_stat
- block:
- name: Fetch resolconf
block:
- name: get content of /etc/resolv.conf
- name: Get content of /etc/resolv.conf
slurp:
src: /etc/resolv.conf
register: resolvconf_slurp
- name: get currently configured nameservers
- name: Get currently configured nameservers
set_fact:
configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}"
when: resolvconf_slurp.content is defined
@@ -100,7 +101,7 @@
changed_when: false
check_mode: false
- name: check systemd-resolved
- name: Check systemd-resolved
# noqa command-instead-of-module - Should we use service_facts for this?
command: systemctl is-active systemd-resolved
register: systemd_resolved_enabled
@@ -108,12 +109,12 @@
changed_when: false
check_mode: no
- name: set default dns if remove_default_searchdomains is false
- name: Set default dns if remove_default_searchdomains is false
set_fact:
default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)
- name: set dns facts
- name: Set dns facts
set_fact:
resolvconf: >-
{%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%}
@@ -125,7 +126,7 @@
['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else
[] }}"
- name: check if kubelet is configured
- name: Check if kubelet is configured
stat:
path: "{{ kube_config_dir }}/kubelet.env"
get_attributes: no
@@ -134,11 +135,11 @@
register: kubelet_configured
changed_when: false
- name: check if early DNS configuration stage
- name: Check if early DNS configuration stage
set_fact:
dns_early: "{{ not kubelet_configured.stat.exists }}"
- name: target resolv.conf files
- name: Target resolv.conf files
set_fact:
resolvconffile: /etc/resolv.conf
base: >-
@@ -147,12 +148,12 @@
{%- if resolvconf | bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%}
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
- name: Target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS)
set_fact:
resolvconffile: /tmp/resolveconf_cloud_init_conf
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos
- name: check if /etc/dhclient.conf exists
- name: Check if /etc/dhclient.conf exists
stat:
path: /etc/dhclient.conf
get_attributes: no
@@ -160,12 +161,12 @@
get_mime: no
register: dhclient_stat
- name: target dhclient conf file for /etc/dhclient.conf
- name: Target dhclient conf file for /etc/dhclient.conf
set_fact:
dhclientconffile: /etc/dhclient.conf
when: dhclient_stat.stat.exists
- name: check if /etc/dhcp/dhclient.conf exists
- name: Check if /etc/dhcp/dhclient.conf exists
stat:
path: /etc/dhcp/dhclient.conf
get_attributes: no
@@ -173,22 +174,22 @@
get_mime: no
register: dhcp_dhclient_stat
- name: target dhclient conf file for /etc/dhcp/dhclient.conf
- name: Target dhclient conf file for /etc/dhcp/dhclient.conf
set_fact:
dhclientconffile: /etc/dhcp/dhclient.conf
when: dhcp_dhclient_stat.stat.exists
- name: target dhclient hook file for Red Hat family
- name: Target dhclient hook file for Red Hat family
set_fact:
dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh
when: ansible_os_family == "RedHat"
- name: target dhclient hook file for Debian family
- name: Target dhclient hook file for Debian family
set_fact:
dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
when: ansible_os_family == "Debian"
- name: generate search domains to resolvconf
- name: Generate search domains to resolvconf
set_fact:
searchentries:
search {{ (default_searchdomains | default([]) + searchdomains | default([])) | join(' ') }}
@@ -199,7 +200,7 @@
supersede_domain:
supersede domain-name "{{ dns_domain }}";
- name: pick coredns cluster IP or default resolver
- name: Pick coredns cluster IP or default resolver
set_fact:
coredns_server: |-
{%- if dns_mode == 'coredns' and not dns_early | bool -%}
@@ -215,7 +216,7 @@
{%- endif -%}
# This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout
- name: generate nameservers for resolvconf, including cluster DNS
- name: Generate nameservers for resolvconf, including cluster DNS
set_fact:
nameserverentries: |-
{{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server | d([]) if not enable_nodelocaldns else []) + nameservers | d([]) + cloud_resolver | d([]) + (configured_nameservers | d([]) if not disable_host_nameservers | d() | bool else [])) | unique | join(',') }}
@@ -225,7 +226,7 @@
# This task should run instead of the above task when cluster/nodelocal DNS hasn't
# been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml)
- name: generate nameservers for resolvconf, not including cluster DNS
- name: Generate nameservers for resolvconf, not including cluster DNS
set_fact:
nameserverentries: |-
{{ (nameservers | d([]) + cloud_resolver | d([]) + configured_nameservers | d([])) | unique | join(',') }}
@@ -233,7 +234,7 @@
supersede domain-name-servers {{ (nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }};
when: dns_early and not dns_late
- name: gather os specific variables
- name: Gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
@@ -247,7 +248,7 @@
- ../vars
skip: true
- name: set etcd vars if using kubeadm mode
- name: Set etcd vars if using kubeadm mode
set_fact:
etcd_cert_dir: "{{ kube_cert_dir }}"
kube_etcd_cacert_file: "etcd/ca.crt"
@@ -256,7 +257,7 @@
when:
- etcd_deployment_type == "kubeadm"
- name: check /usr readonly
- name: Check /usr readonly
stat:
path: "/usr"
get_attributes: no
@@ -264,7 +265,7 @@
get_mime: no
register: usr
- name: set alternate flexvolume path
- name: Set alternate flexvolume path
set_fact:
kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins
when: not usr.stat.writeable

View File

@@ -152,7 +152,7 @@
msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
when: not ignore_assert_errors
- name: check cloud_provider value
- name: Check cloud_provider value
assert:
that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external']
msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'"

View File

@@ -1,5 +1,5 @@
---
- name: create temporary resolveconf cloud init file
- name: Create temporary resolveconf cloud init file
command: cp -f /etc/resolv.conf "{{ resolvconffile }}"
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
@@ -43,12 +43,12 @@
- [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ]
notify: Preinstall | propagate resolvconf to k8s components
- name: get temporary resolveconf cloud init file content
- name: Get temporary resolveconf cloud init file content
command: cat {{ resolvconffile }}
register: cloud_config
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: persist resolvconf cloud init file
- name: Persist resolvconf cloud init file
template:
dest: "{{ resolveconf_cloud_init_conf }}"
src: resolvconf.j2

View File

@@ -9,7 +9,7 @@
backup: yes
notify: Preinstall | update resolvconf for networkmanager
- name: set default dns if remove_default_searchdomains is false
- name: Set default dns if remove_default_searchdomains is false
set_fact:
default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"]
when: not remove_default_searchdomains | default() | bool or (remove_default_searchdomains | default() | bool and searchdomains | default([]) | length==0)

View File

@@ -9,7 +9,8 @@
- ansible_pkg_mgr == 'zypper'
tags: bootstrap-os
- block:
- name: Add debian 10 required repos
block:
- name: Add Debian Backports apt repo
apt_repository:
repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"

View File

@@ -2,7 +2,7 @@
# Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time
- name: install growpart
- name: Install growpart
package:
name: cloud-utils-growpart
state: present
@@ -20,7 +20,7 @@
partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}"
root_device: "{{ _root_device }}"
- name: check if growpart needs to be run
- name: Check if growpart needs to be run
command: growpart -N {{ device }} {{ partition }}
failed_when: False
changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
@@ -28,17 +28,17 @@
environment:
LC_ALL: C
- name: check fs type
- name: Check fs type
command: file -Ls {{ root_device }}
changed_when: False
register: fs_type
- name: run growpart # noqa no-handler
- name: Run growpart # noqa no-handler
command: growpart {{ device }} {{ partition }}
when: growpart_needed.changed
environment:
LC_ALL: C
- name: run xfs_growfs # noqa no-handler
- name: Run xfs_growfs # noqa no-handler
command: xfs_growfs {{ root_device }}
when: growpart_needed.changed and 'XFS' in fs_type.stdout

View File

@@ -1,26 +1,31 @@
---
# Disable swap
- import_tasks: 0010-swapoff.yml
- name: Disable swap
import_tasks: 0010-swapoff.yml
when:
- not dns_late
- kubelet_fail_swap_on
- import_tasks: 0020-set_facts.yml
- name: Set facts
import_tasks: 0020-set_facts.yml
tags:
- resolvconf
- facts
- import_tasks: 0040-verify-settings.yml
- name: Check settings
import_tasks: 0040-verify-settings.yml
when:
- not dns_late
tags:
- asserts
- import_tasks: 0050-create_directories.yml
- name: Create directories
import_tasks: 0050-create_directories.yml
when:
- not dns_late
- import_tasks: 0060-resolvconf.yml
- name: Apply resolvconf settings
import_tasks: 0060-resolvconf.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -30,7 +35,8 @@
- bootstrap-os
- resolvconf
- import_tasks: 0061-systemd-resolved.yml
- name: Apply systemd-resolved settings
import_tasks: 0061-systemd-resolved.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -39,13 +45,15 @@
- bootstrap-os
- resolvconf
- import_tasks: 0062-networkmanager-unmanaged-devices.yml
- name: Apply networkmanager unmanaged devices settings
import_tasks: 0062-networkmanager-unmanaged-devices.yml
when:
- networkmanager_enabled.rc == 0
tags:
- bootstrap-os
- import_tasks: 0063-networkmanager-dns.yml
- name: Apply networkmanager DNS settings
import_tasks: 0063-networkmanager-dns.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -54,31 +62,36 @@
- bootstrap-os
- resolvconf
- import_tasks: 0070-system-packages.yml
- name: Install required system packages
import_tasks: 0070-system-packages.yml
when:
- not dns_late
tags:
- bootstrap-os
- import_tasks: 0080-system-configurations.yml
- name: Apply system configurations
import_tasks: 0080-system-configurations.yml
when:
- not dns_late
tags:
- bootstrap-os
- import_tasks: 0081-ntp-configurations.yml
- name: Configure NTP
import_tasks: 0081-ntp-configurations.yml
when:
- not dns_late
- ntp_enabled
tags:
- bootstrap-os
- import_tasks: 0090-etchosts.yml
- name: Configure /etc/hosts
import_tasks: 0090-etchosts.yml
tags:
- bootstrap-os
- etchosts
- import_tasks: 0100-dhclient-hooks.yml
- name: Configure dhclient
import_tasks: 0100-dhclient-hooks.yml
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -88,7 +101,8 @@
- bootstrap-os
- resolvconf
- import_tasks: 0110-dhclient-hooks-undo.yml
- name: Configure dhclient dhclient hooks
import_tasks: 0110-dhclient-hooks-undo.yml
when:
- dns_mode != 'none'
- resolvconf_mode != 'host_resolvconf'
@@ -115,7 +129,8 @@
tags:
- bootstrap-os
- import_tasks: 0120-growpart-azure-centos-7.yml
- name: Grow partition on azure CentOS
import_tasks: 0120-growpart-azure-centos-7.yml
when:
- not dns_late
- azure_check.stat.exists

View File

@@ -1,6 +1,7 @@
---
- import_tasks: check-tokens.yml
- name: Check tokens
import_tasks: check-tokens.yml
tags:
- k8s-secrets
- k8s-gen-tokens
@@ -13,7 +14,8 @@
mode: 0644
group: "{{ kube_cert_group }}"
- import_tasks: gen_tokens.yml
- name: Generate tokens
import_tasks: gen_tokens.yml
tags:
- k8s-secrets
- k8s-gen-tokens

View File

@@ -14,7 +14,7 @@
run_once: yes
tags: always
- name: create fallback_ips_base
- name: Create fallback_ips_base
set_fact:
fallback_ips_base: |
---
@@ -28,6 +28,6 @@
become: no
run_once: yes
- name: set fallback_ips
- name: Set fallback_ips
set_fact:
fallback_ips: "{{ hostvars.localhost.fallback_ips_base | from_yaml }}"

View File

@@ -6,7 +6,7 @@
- always
# do not run gather facts when bootstrap-os in roles
- name: set fallback_ips
- name: Set fallback_ips
import_tasks: fallback_ips.yml
when:
- "'bootstrap-os' not in ansible_play_role_names"
@@ -14,7 +14,7 @@
tags:
- always
- name: set no_proxy
- name: Set no_proxy
import_tasks: no_proxy.yml
when:
- "'bootstrap-os' not in ansible_play_role_names"

View File

@@ -1,13 +1,13 @@
---
- name: reset_calico_cni
- name: Reset_calico_cni
command: /bin/true
when: calico_cni_config is defined
notify:
- delete 10-calico.conflist
- Delete 10-calico.conflist
- Calico | delete calico-node docker containers
- Calico | delete calico-node crio/containerd containers
- name: delete 10-calico.conflist
- name: Delete 10-calico.conflist
file:
path: /etc/cni/net.d/10-calico.conflist
state: absent

View File

@@ -1,7 +1,8 @@
---
# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it,
# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203
- block:
- name: Calico-rr | Configure route reflector
block:
- name: Set the retry count
set_fact:
retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}"

View File

@@ -119,7 +119,8 @@
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
- block:
- name: Calico | kdd specific configuration
block:
- name: Calico | Check if extra directory is needed
stat:
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('v3.22.3', '<')) else 'crd' }}"
@@ -157,7 +158,8 @@
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- block:
- name: Calico | Configure Felix
block:
- name: Calico | Get existing FelixConfiguration
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
register: _felix_cmd
@@ -201,7 +203,8 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- block:
- name: Calico | Configure Calico IP Pool
block:
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
register: _calico_pool_cmd
@@ -240,7 +243,8 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- block:
- name: Calico | Configure Calico IPv6 Pool
block:
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
register: _calico_pool_ipv6_cmd
@@ -300,7 +304,8 @@
- inventory_hostname in groups['k8s_cluster']
run_once: yes
- block:
- name: Calico | Configure Calico BGP
block:
- name: Calico | Get existing BGP Configuration
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
register: _bgp_config_cmd
@@ -463,10 +468,12 @@
- inventory_hostname == groups['kube_control_plane'][0]
- calico_datastore == "kdd"
- include_tasks: peer_with_calico_rr.yml
- name: Calico | Peer with Calico Route Reflector
include_tasks: peer_with_calico_rr.yml
when:
- peer_with_calico_rr | default(false)
- include_tasks: peer_with_router.yml
- name: Calico | Peer with the router
include_tasks: peer_with_router.yml
when:
- peer_with_router | default(false)

View File

@@ -1,6 +1,9 @@
---
- import_tasks: pre.yml
- name: Calico Pre tasks
import_tasks: pre.yml
- import_tasks: repos.yml
- name: Calico repos
import_tasks: repos.yml
- include_tasks: install.yml
- name: Calico install
include_tasks: install.yml

View File

@@ -5,7 +5,8 @@
register: calico_cni_config_slurp
failed_when: false
- block:
- name: Gather calico facts
block:
- name: Set fact calico_cni_config from slurped CNI config
set_fact:
calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}"

View File

@@ -1,5 +1,5 @@
---
- name: reset | check vxlan.calico network device
- name: Reset | check vxlan.calico network device
stat:
path: /sys/class/net/vxlan.calico
get_attributes: no
@@ -7,11 +7,11 @@
get_mime: no
register: vxlan
- name: reset | remove the network vxlan.calico device created by calico
- name: Reset | remove the network vxlan.calico device created by calico
command: ip link del vxlan.calico
when: vxlan.stat.exists
- name: reset | check dummy0 network device
- name: Reset | check dummy0 network device
stat:
path: /sys/class/net/dummy0
get_attributes: no
@@ -19,11 +19,11 @@
get_mime: no
register: dummy0
- name: reset | remove the network device created by calico
- name: Reset | remove the network device created by calico
command: ip link del dummy0
when: dummy0.stat.exists
- name: reset | get and remove remaining routes set by bird
- name: Reset | get and remove remaining routes set by bird
shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird "
args:
executable: /bin/bash

View File

@@ -1,6 +1,9 @@
---
- import_tasks: check.yml
- name: Cilium check
import_tasks: check.yml
- include_tasks: install.yml
- name: Cilium install
include_tasks: install.yml
- include_tasks: apply.yml
- name: Cilium apply
include_tasks: apply.yml

View File

@@ -1,5 +1,5 @@
---
- name: reset | check and remove devices if still present
- name: Reset | check and remove devices if still present
include_tasks: reset_iface.yml
vars:
iface: "{{ item }}"

View File

@@ -1,5 +1,5 @@
---
- name: "reset | check if network device {{ iface }} is present"
- name: "Reset | check if network device {{ iface }} is present"
stat:
path: "/sys/class/net/{{ iface }}"
get_attributes: no
@@ -7,6 +7,6 @@
get_mime: no
register: device_remains
- name: "reset | remove network device {{ iface }}"
- name: "Reset | remove network device {{ iface }}"
command: "ip link del {{ iface }}"
when: device_remains.stat.exists

View File

@@ -1,5 +1,5 @@
---
- name: reset | check cni network device
- name: Reset | check cni network device
stat:
path: /sys/class/net/cni0
get_attributes: no
@@ -7,11 +7,11 @@
get_mime: no
register: cni
- name: reset | remove the network device created by the flannel
- name: Reset | remove the network device created by the flannel
command: ip link del cni0
when: cni.stat.exists
- name: reset | check flannel network device
- name: Reset | check flannel network device
stat:
path: /sys/class/net/flannel.1
get_attributes: no
@@ -19,6 +19,6 @@
get_mime: no
register: flannel
- name: reset | remove the network device created by the flannel
- name: Reset | remove the network device created by the flannel
command: ip link del flannel.1
when: flannel.stat.exists

Some files were not shown because too many files have changed in this diff Show More