Do not use ‘yes/no’ for boolean values (#11472)

Consistent boolean values in ansible playbooks
This commit is contained in:
Vlad Korolev
2024-08-28 01:30:56 -04:00
committed by GitHub
parent 5c5421e453
commit 9a7b021eb8
162 changed files with 507 additions and 508 deletions

View File

@@ -7,14 +7,14 @@ addusers:
etcd:
name: etcd
comment: "Etcd user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
kube:
name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -3,6 +3,6 @@ addusers:
- name: kube
comment: "Kubernetes user"
shell: /sbin/nologin
system: yes
system: true
group: "{{ kube_cert_group }}"
create_home: no
create_home: false

View File

@@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: yes
create_home: true
home: "{{ etcd_data_dir }}"
system: yes
system: true
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: yes
create_home: true
home: "{{ etcd_data_dir }}"
system: yes
system: true
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: no
system: yes
create_home: false
system: true
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -1,6 +1,6 @@
---
- name: Converge
hosts: all
gather_facts: no
gather_facts: false
roles:
- role: bootstrap-os

View File

@@ -8,9 +8,9 @@
file: epel
description: Extra Packages for Enterprise Linux 7 - $basearch
baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
gpgcheck: yes
gpgcheck: true
gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no
skip_if_unavailable: true
enabled: true
repo_gpgcheck: false
when: epel_enabled

View File

@@ -119,9 +119,9 @@
- name: Check presence of fastestmirror.conf
stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fastestmirror
# the fastestmirror plugin can actually slow down Ansible deployments

View File

@@ -28,7 +28,7 @@
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
become: true
ignore_errors: true # noqa ignore-errors
ignore_unreachable: yes
ignore_unreachable: true
when: need_bootstrap.rc != 0
- name: Wait for the reboot to complete

View File

@@ -22,7 +22,7 @@
- "{{ os_release_dict['ID'] }}.yml"
paths:
- vars/
skip: True
skip: true
- name: Include tasks
include_tasks: "{{ included_tasks_file }}"
with_first_found:

View File

@@ -8,9 +8,9 @@
- name: Check that /etc/sysconfig/proxy file exists
stat:
path: /etc/sysconfig/proxy
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: stat_result
- name: Create the /etc/sysconfig/proxy empty file

View File

@@ -87,9 +87,9 @@
- name: Check presence of fastestmirror.conf
stat:
path: /etc/yum/pluginconf.d/fastestmirror.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fastestmirror
# the fastestmirror plugin can actually slow down Ansible deployments

View File

@@ -1,2 +1,2 @@
---
is_fedora_coreos: True
is_fedora_coreos: true

View File

@@ -2,9 +2,9 @@
- name: Containerd-common | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Containerd-common | set is_ostree

View File

@@ -3,9 +3,9 @@
systemd_service:
name: containerd
state: restarted
enabled: yes
daemon-reload: yes
masked: no
enabled: true
daemon-reload: true
masked: false
listen: Restart containerd
- name: Containerd | wait for containerd

View File

@@ -1,7 +1,7 @@
---
- name: Prepare
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true
@@ -19,7 +19,7 @@
- name: Prepare CNI
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true

View File

@@ -36,7 +36,7 @@
src: "{{ downloads.containerd.dest }}"
dest: "{{ containerd_bin_dir }}"
mode: "0755"
remote_src: yes
remote_src: true
extra_opts:
- --strip-components=1
notify: Restart containerd
@@ -138,6 +138,6 @@
- name: Containerd | Ensure containerd is started and enabled
systemd_service:
name: containerd
daemon_reload: yes
enabled: yes
daemon_reload: true
enabled: true
state: started

View File

@@ -3,7 +3,7 @@
systemd_service:
name: cri-dockerd
daemon_reload: true
masked: no
masked: false
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | restart docker.service
@@ -27,5 +27,5 @@
- name: Cri-dockerd | enable cri-dockerd service
service:
name: cri-dockerd.service
enabled: yes
enabled: true
listen: Restart and enable cri-dockerd

View File

@@ -8,5 +8,5 @@
service:
name: crio
state: restarted
enabled: yes
enabled: true
listen: Restart crio

View File

@@ -1,7 +1,7 @@
---
- name: Prepare
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true
@@ -19,7 +19,7 @@
- name: Prepare CNI
hosts: all
gather_facts: False
gather_facts: false
become: true
vars:
ignore_assert_errors: true

View File

@@ -5,9 +5,9 @@
- name: Cri-o | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Cri-o | set is_ostree

View File

@@ -8,7 +8,7 @@
lineinfile:
dest: /etc/yum.repos.d/amzn2-extras.repo
line: "[amzn2extra-docker]"
check_mode: yes
check_mode: true
register: amzn2_extras_docker_repo
when:
- amzn2_extras_file_stat.stat.exists
@@ -19,7 +19,7 @@
section: amzn2extra-docker
option: enabled
value: "0"
backup: yes
backup: true
mode: "0644"
when:
- amzn2_extras_file_stat.stat.exists

View File

@@ -1,7 +1,7 @@
---
- name: Get crictl completion
command: "{{ bin_dir }}/crictl completion"
changed_when: False
changed_when: false
register: cri_completion
check_mode: false

View File

@@ -39,7 +39,7 @@
state: present
- name: Docker-storage-setup | install and run container-storage-setup
become: yes
become: true
script: |
install_container_storage_setup.sh \
{{ docker_container_storage_setup_repository }} \

View File

@@ -3,7 +3,7 @@
systemd_service:
name: docker
daemon_reload: true
masked: no
masked: false
listen: Restart docker
- name: Docker | reload docker.socket

View File

@@ -2,9 +2,9 @@
- name: Check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Set is_ostree
@@ -66,7 +66,7 @@
path: /etc/apt/sources.list
regexp: 'buster-backports'
state: absent
backup: yes
backup: true
when:
- ansible_os_family == 'Debian'
- ansible_distribution_release == "buster"
@@ -183,7 +183,7 @@
- name: Ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
with_items:
- docker

View File

@@ -21,9 +21,9 @@
shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
args:
executable: /bin/bash
changed_when: False
changed_when: false
register: system_nameservers
check_mode: no
check_mode: false
- name: Check system search domains
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
@@ -31,9 +31,9 @@
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
args:
executable: /bin/bash
changed_when: False
changed_when: false
register: system_search_domains
check_mode: no
check_mode: false
- name: Add system nameservers to docker options
set_fact:

View File

@@ -14,7 +14,7 @@
src: "{{ item.src }}"
dest: "{{ bin_dir }}/{{ item.dest }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- { src: "{{ downloads.gvisor_runsc.dest }}", dest: "runsc" }
- { src: "{{ downloads.gvisor_containerd_shim.dest }}", dest: "containerd-shim-runsc-v1" }

View File

@@ -11,7 +11,7 @@
mode: "0755"
owner: root
group: root
remote_src: yes
remote_src: true
- name: Kata-containers | Create config directory
file:

View File

@@ -1,7 +1,7 @@
---
- name: Get nerdctl completion
command: "{{ bin_dir }}/nerdctl completion bash"
changed_when: False
changed_when: false
register: nerdctl_completion
check_mode: false

View File

@@ -2,9 +2,9 @@
- name: Runc | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Runc | set is_ostree

View File

@@ -2,9 +2,9 @@
- name: Skopeo | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
- name: Skopeo | set is_ostree

View File

@@ -2,9 +2,9 @@
- name: Validate-container-engine | check if fedora coreos
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: ostree
tags:
- facts
@@ -30,8 +30,8 @@
- name: Check if containerd is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- containerd.service$
paths:
@@ -45,8 +45,8 @@
- name: Check if docker is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- docker.service$
paths:
@@ -60,8 +60,8 @@
- name: Check if crio is installed
find:
file_type: file
recurse: yes
use_regex: yes
recurse: true
use_regex: true
patterns:
- crio.service$
paths:

View File

@@ -5,7 +5,7 @@
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false
check_mode: no
check_mode: false
when: not download_always_pull
- name: Check_pull_required | Set pull_required if the desired image is not yet loaded

View File

@@ -26,12 +26,12 @@
- name: Download_container | Determine if image is in cache
stat:
path: "{{ image_path_cached }}"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
delegate_to: localhost
connection: local
delegate_facts: no
delegate_facts: false
register: cache_image
changed_when: false
become: false
@@ -57,7 +57,7 @@
- name: Download_container | Download image if required
command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}"
delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}"
delegate_facts: yes
delegate_facts: true
run_once: "{{ download_run_once }}"
register: pull_task_result
until: pull_task_result is succeeded
@@ -72,7 +72,7 @@
- name: Download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
delegate_facts: false
register: container_save_status
failed_when: container_save_status.stderr
run_once: true
@@ -99,7 +99,7 @@
dest: "{{ image_path_final }}"
use_ssh_args: true
mode: push
delegate_facts: no
delegate_facts: false
register: upload_image
failed_when: not upload_image
until: upload_image is succeeded

View File

@@ -24,13 +24,13 @@
owner: "{{ download.owner | default(omit) }}"
mode: "0755"
state: directory
recurse: yes
recurse: true
- name: Download_file | Create local cache directory
file:
path: "{{ file_path_cached | dirname }}"
state: directory
recurse: yes
recurse: true
delegate_to: localhost
connection: local
delegate_facts: false
@@ -45,7 +45,7 @@
file:
path: "{{ file_path_cached | dirname }}"
state: directory
recurse: yes
recurse: true
delegate_to: "{{ download_delegate }}"
delegate_facts: false
run_once: true

View File

@@ -5,7 +5,7 @@
dest: "{{ download.dest | dirname }}"
owner: "{{ download.owner | default(omit) }}"
mode: "{{ download.mode | default(omit) }}"
copy: no
copy: false
extra_opts: "{{ download.unarchive_extra_opts | default(omit) }}"
when:
- download.unarchive | default(false)

View File

@@ -62,7 +62,7 @@
register: docker_images
failed_when: false
changed_when: false
check_mode: no
check_mode: false
when: download_container
- name: Prep_download | Create staging directory on remote node
@@ -81,7 +81,7 @@
mode: "0755"
delegate_to: localhost
connection: local
delegate_facts: no
delegate_facts: false
run_once: true
become: false
when:

View File

@@ -23,9 +23,9 @@
- name: Stat etcd v2 data directory
stat:
path: "{{ etcd_data_dir }}/member"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: etcd_data_dir_member
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0

View File

@@ -26,7 +26,7 @@
- name: Wait for etcd up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: no
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result
@@ -41,7 +41,7 @@
- name: Wait for etcd-events up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
validate_certs: no
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
register: result

View File

@@ -17,9 +17,9 @@
- name: "Check certs | Register ca and etcd admin/member certs on etcd hosts"
stat:
path: "{{ etcd_cert_dir }}/{{ item }}"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
register: etcd_member_certs
when: inventory_hostname in groups['etcd']
with_items:

View File

@@ -6,8 +6,8 @@
register: etcd_cluster_is_healthy
failed_when: false
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_cluster_setup
@@ -27,8 +27,8 @@
register: etcd_events_cluster_is_healthy
failed_when: false
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_events_cluster_setup
@@ -49,7 +49,7 @@
template:
src: "etcd-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd.service
backup: yes
backup: true
mode: "0644"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250
@@ -60,7 +60,7 @@
template:
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-events.service
backup: yes
backup: true
mode: "0644"
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
@@ -77,7 +77,7 @@
service:
name: etcd
state: started
enabled: yes
enabled: true
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_cluster_setup
@@ -86,7 +86,7 @@
service:
name: etcd-events
state: started
enabled: yes
enabled: true
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_events_cluster_setup
@@ -99,8 +99,8 @@
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_cluster_setup
@@ -122,8 +122,8 @@
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
check_mode: no
run_once: yes
check_mode: false
run_once: true
when:
- is_etcd_master
- etcd_events_cluster_setup
@@ -141,7 +141,7 @@
register: etcd_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
check_mode: false
when: is_etcd_master and etcd_cluster_setup
tags:
- facts
@@ -157,7 +157,7 @@
register: etcd_events_member_in_cluster
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
check_mode: false
when: is_etcd_master and etcd_events_cluster_setup
tags:
- facts

View File

@@ -6,7 +6,7 @@
state: directory
owner: "{{ etcd_owner }}"
mode: "{{ etcd_cert_dir_mode }}"
recurse: yes
recurse: true
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
file:
@@ -14,7 +14,7 @@
state: directory
owner: root
mode: "0700"
run_once: yes
run_once: true
when: inventory_hostname == groups['etcd'][0]
- name: Gen_certs | write openssl config
@@ -22,7 +22,7 @@
src: "openssl.conf.j2"
dest: "{{ etcd_config_dir }}/openssl.conf"
mode: "0640"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when:
- gen_certs | default(false)
@@ -33,7 +33,7 @@
src: "make-ssl-etcd.sh.j2"
dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
mode: "0700"
run_once: yes
run_once: true
when:
- gen_certs | default(false)
- inventory_hostname == groups['etcd'][0]
@@ -43,7 +43,7 @@
environment:
MASTERS: "{{ groups['gen_master_certs_True'] | ansible.builtin.intersect(groups['etcd']) | join(' ') }}"
HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['kube_control_plane']) | join(' ') }}"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when: gen_certs | default(false)
notify: Set etcd_secret_changed
@@ -52,7 +52,7 @@
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment:
HOSTS: "{{ groups['gen_node_certs_True'] | ansible.builtin.intersect(groups['k8s_cluster']) | join(' ') }}"
run_once: yes
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
@@ -153,4 +153,4 @@
state: directory
owner: "{{ etcd_owner }}"
mode: "{{ etcd_cert_dir_mode }}"
recurse: yes
recurse: true

View File

@@ -21,7 +21,7 @@
executable: /bin/bash
no_log: "{{ not (unsafe_show_logs | bool) }}"
register: etcd_node_certs
check_mode: no
check_mode: false
delegate_to: "{{ groups['etcd'][0] }}"
changed_when: false

View File

@@ -29,7 +29,7 @@
dest: "{{ bin_dir }}/etcd"
owner: 'root'
mode: "0750"
backup: yes
backup: true
when: etcd_cluster_setup
- name: Install etcd-events launch script
@@ -38,5 +38,5 @@
dest: "{{ bin_dir }}/etcd-events"
owner: 'root'
mode: "0750"
backup: yes
backup: true
when: etcd_events_cluster_setup

View File

@@ -25,7 +25,7 @@
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- etcd
when: etcd_cluster_setup

View File

@@ -32,7 +32,7 @@
executable: /bin/bash
register: etcd_events_member_in_cluster
changed_when: false
check_mode: no
check_mode: false
tags:
- facts
environment:
@@ -46,4 +46,4 @@
service:
name: etcd-events
state: started
enabled: yes
enabled: true

View File

@@ -33,7 +33,7 @@
executable: /bin/bash
register: etcd_member_in_cluster
changed_when: false
check_mode: no
check_mode: false
retries: "{{ etcd_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
until: etcd_member_in_cluster.rc == 0
@@ -50,4 +50,4 @@
service:
name: etcd
state: started
enabled: yes
enabled: true

View File

@@ -33,7 +33,7 @@
command: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial"
register: "etcd_client_cert_serial_result"
changed_when: false
check_mode: no
check_mode: false
when:
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"

View File

@@ -24,7 +24,7 @@
unarchive:
src: "{{ downloads.etcd.dest }}"
dest: "{{ local_release_dir }}/"
remote_src: yes
remote_src: true
when: container_manager in ['crio', 'containerd']
- name: Copy etcdctl and etcdutl binary from download dir
@@ -32,7 +32,7 @@
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}"
mode: "0755"
remote_src: yes
remote_src: true
with_items:
- etcdctl
- etcdutl

View File

@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@@ -8,10 +8,10 @@
ansible.posix.synchronize:
src: "{{ downloads.yq.dest }}"
dest: "{{ bin_dir }}/yq"
compress: no
perms: yes
owner: no
group: no
compress: false
perms: true
owner: false
group: false
delegate_to: "{{ inventory_hostname }}"
- name: Kubernetes Apps | Set ArgoCD template list
@@ -49,17 +49,17 @@
ansible.posix.synchronize:
src: "{{ local_release_dir }}/{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.file }}"
compress: no
perms: yes
owner: no
group: no
compress: false
perms: true
owner: false
group: false
delegate_to: "{{ inventory_hostname }}"
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
when:
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Set ArgoCD namespace for remote manifests
become: yes
become: true
command: |
{{ bin_dir }}/yq eval-all -i '.metadata.namespace="{{ argocd_namespace }}"' {{ kube_config_dir }}/{{ item.file }}
with_items: "{{ argocd_templates | selectattr('url', 'defined') | list }}"
@@ -69,7 +69,7 @@
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Create ArgoCD manifests from templates
become: yes
become: true
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
@@ -81,7 +81,7 @@
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Install ArgoCD
become: yes
become: true
kube:
name: ArgoCD
kubectl: "{{ bin_dir }}/kubectl"
@@ -93,7 +93,7 @@
# https://github.com/argoproj/argo-cd/blob/master/docs/faq.md#i-forgot-the-admin-password-how-do-i-reset-it
- name: Kubernetes Apps | Set ArgoCD custom admin password
become: yes
become: true
shell: |
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n {{ argocd_namespace }} patch secret argocd-secret -p \
'{

View File

@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@@ -21,7 +21,7 @@ vsphere_csi_controller_replicas: 1
csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
vsphere_csi_aggressive_node_drain: False
vsphere_csi_aggressive_node_drain: false
vsphere_csi_aggressive_node_unreachable_timeout: 300
vsphere_csi_aggressive_node_not_ready_timeout: 300

View File

@@ -37,13 +37,13 @@
- name: Helm | Get helm completion
command: "{{ bin_dir }}/helm completion bash"
changed_when: False
changed_when: false
register: helm_completion
check_mode: False
check_mode: false
- name: Helm | Install helm completion
copy:
dest: /etc/bash_completion.d/helm.sh
content: "{{ helm_completion.stdout }}"
mode: "0755"
become: True
become: true

View File

@@ -2,13 +2,13 @@
- name: Get installed pip version
command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip --version"
register: pip_version_output
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Get installed PyYAML version
command: "{{ ansible_python_interpreter if ansible_python_interpreter is defined else 'python' }} -m pip show PyYAML"
register: pyyaml_version_output
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Install pip

View File

@@ -24,15 +24,15 @@
- name: Krew | Get krew completion
command: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} completion bash"
changed_when: False
changed_when: false
register: krew_completion
check_mode: False
ignore_errors: yes # noqa ignore-errors
check_mode: false
ignore_errors: true # noqa ignore-errors
- name: Krew | Install krew completion
copy:
dest: /etc/bash_completion.d/krew.sh
content: "{{ krew_completion.stdout }}"
mode: "0755"
become: True
become: true
when: krew_completion.rc == 0

View File

@@ -13,7 +13,7 @@
- name: Weave | Wait for Weave to become available
uri:
url: http://127.0.0.1:6784/status
return_content: yes
return_content: true
register: weave_status
retries: 180
delay: 5

View File

@@ -30,9 +30,9 @@
copy:
src: "{{ kube_config_dir }}/admin.conf"
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
remote_src: yes
remote_src: true
mode: "0600"
backup: yes
backup: true
- name: Create kube artifacts dir
file:
@@ -41,8 +41,8 @@
state: directory
delegate_to: localhost
connection: local
become: no
run_once: yes
become: false
run_once: true
when: kubeconfig_localhost
- name: Wait for k8s apiserver
@@ -54,7 +54,7 @@
- name: Get admin kubeconfig from remote host
slurp:
src: "{{ kube_config_dir }}/admin.conf"
run_once: yes
run_once: true
register: raw_admin_kubeconfig
when: kubeconfig_localhost
@@ -83,21 +83,21 @@
mode: "0600"
delegate_to: localhost
connection: local
become: no
run_once: yes
become: false
run_once: true
when: kubeconfig_localhost
- name: Copy kubectl binary to ansible host
fetch:
src: "{{ bin_dir }}/kubectl"
dest: "{{ artifacts_dir }}/kubectl"
flat: yes
validate_checksum: no
flat: true
validate_checksum: false
register: copy_binary_result
until: copy_binary_result is not failed
retries: 20
become: no
run_once: yes
become: false
run_once: true
when: kubectl_localhost
- name: Create helper script kubectl.sh on ansible host
@@ -107,8 +107,8 @@
${BASH_SOURCE%/*}/kubectl --kubeconfig=${BASH_SOURCE%/*}/admin.conf "$@"
dest: "{{ artifacts_dir }}/kubectl.sh"
mode: "0755"
become: no
run_once: yes
become: false
run_once: true
delegate_to: localhost
connection: local
when: kubectl_localhost and kubeconfig_localhost

View File

@@ -81,7 +81,7 @@
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10259/healthz
validate_certs: no
validate_certs: false
register: scheduler_result
until: scheduler_result.status == 200
retries: 60
@@ -95,7 +95,7 @@
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
url: https://{{ endpoint }}:10257/healthz
validate_certs: no
validate_certs: false
register: controller_manager_result
until: controller_manager_result.status == 200
retries: 60
@@ -107,7 +107,7 @@
- name: Master | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
register: result
until: result.status == 200
retries: 60

View File

@@ -3,7 +3,7 @@
- name: Check which kube-control nodes are already members of the cluster
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
register: kube_control_planes_raw
ignore_errors: yes
ignore_errors: true
changed_when: false
- name: Set fact joined_control_planes
@@ -12,7 +12,7 @@
delegate_to: "{{ item }}"
loop: "{{ groups['kube_control_plane'] }}"
when: kube_control_planes_raw is succeeded
run_once: yes
run_once: true
- name: Set fact first_kube_control_plane
set_fact:

View File

@@ -2,9 +2,9 @@
- name: Check if secret for encrypting data at rest already exist
stat:
path: "{{ kube_cert_dir }}/secrets_encryption.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: secrets_encryption_file
- name: Slurp secrets_encryption file if it exists

View File

@@ -4,7 +4,7 @@
src: "{{ kube_cert_dir }}/{{ item }}"
dest: "{{ kube_cert_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
remote_src: true
with_items:
- apiserver.crt
- apiserver.key
@@ -19,7 +19,7 @@
src: "{{ kube_config_dir }}/{{ item }}"
dest: "{{ kube_config_dir }}/{{ item }}.old"
mode: preserve
remote_src: yes
remote_src: true
with_items:
- admin.conf
- controller-manager.conf

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/{{ item }}"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
with_items:
- admin.conf
- controller-manager.conf

View File

@@ -25,7 +25,7 @@
- name: Parse certificate key if not set
set_fact:
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
run_once: yes
run_once: true
when:
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
@@ -35,7 +35,7 @@
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
mode: "0640"
backup: yes
backup: true
when:
- inventory_hostname != first_kube_control_plane
- not kubeadm_already_run.stat.exists

View File

@@ -13,9 +13,9 @@
- name: Kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubeadm_already_run
- name: Kubeadm | Backup kubeadm certs / kubeconfig

View File

@@ -4,7 +4,7 @@
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-certificate-data: '
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
backup: true
notify:
- "Master | reload kubelet"
@@ -13,6 +13,6 @@
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ client-key-data: '
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: yes
backup: true
notify:
- "Master | reload kubelet"

View File

@@ -120,7 +120,7 @@
- name: Renew K8S control plane certificates monthly 2/2
systemd_service:
name: k8s-certs-renew.timer
enabled: yes
enabled: true
state: started
daemon_reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates

View File

@@ -14,17 +14,17 @@
- name: Check if kubelet.conf exists
stat:
path: "{{ kube_config_dir }}/kubelet.conf"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubelet_conf
- name: Check if kubeadm CA cert is accessible
stat:
path: "{{ kube_cert_dir }}/ca.crt"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubeadm_ca_stat
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
@@ -79,7 +79,7 @@
template:
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes
backup: true
mode: "0640"
when: not is_kube_master
@@ -140,7 +140,7 @@
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: 'server:'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
when:
- kubeadm_config_api_fqdn is not defined
- not is_kube_master
@@ -152,7 +152,7 @@
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
backup: true
when:
- not is_kube_master
- loadbalancer_apiserver is defined

View File

@@ -2,7 +2,7 @@
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
client_cert: "{{ kube_apiserver_client_cert }}"
client_key: "{{ kube_apiserver_client_key }}"
register: result

View File

@@ -8,7 +8,7 @@
executable: /bin/bash
register: docker_cgroup_driver_result
changed_when: false
check_mode: no
check_mode: false
- name: Set kubelet_cgroup_driver_detected fact for docker
set_fact:

View File

@@ -11,7 +11,7 @@
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubelet.env"
setype: "{{ (preinstall_selinux_state != 'disabled') | ternary('etc_t', omit) }}"
backup: yes
backup: true
mode: "0600"
notify: Node | restart kubelet
tags:
@@ -32,7 +32,7 @@
template:
src: "kubelet.service.j2"
dest: "/etc/systemd/system/kubelet.service"
backup: "yes"
backup: true
mode: "0600"
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:kubelet.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
@@ -48,7 +48,7 @@
- name: Enable kubelet
service:
name: kubelet
enabled: yes
enabled: true
state: started
tags:
- kubelet

View File

@@ -17,14 +17,14 @@
dest: "{{ haproxy_config_dir }}/haproxy.cfg"
owner: root
mode: "0755"
backup: yes
backup: true
- name: Haproxy | Get checksum from config
stat:
path: "{{ haproxy_config_dir }}/haproxy.cfg"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
register: haproxy_stat
- name: Haproxy | Write static pod

View File

@@ -16,9 +16,9 @@
- name: Kube-vip | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubeadm_already_run
- name: Kube-vip | Set admin.conf

View File

@@ -17,14 +17,14 @@
dest: "{{ nginx_config_dir }}/nginx.conf"
owner: root
mode: "0755"
backup: yes
backup: true
- name: Nginx-proxy | Get checksum from config
stat:
path: "{{ nginx_config_dir }}/nginx.conf"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
register: nginx_stat
- name: Nginx-proxy | Write static pod

View File

@@ -51,10 +51,10 @@
ansible.posix.sysctl:
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes
sysctl_set: true
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
reload: true
when: kube_apiserver_node_port_range is defined
tags:
- kube-proxy
@@ -66,7 +66,7 @@
register: modinfo_br_netfilter
failed_when: modinfo_br_netfilter.rc not in [0, 1]
changed_when: false
check_mode: no
check_mode: false
# TODO: Remove once upstream issue is fixed
# https://github.com/ansible-collections/community.general/issues/7717
@@ -97,7 +97,7 @@
command: "sysctl net.bridge.bridge-nf-call-iptables"
failed_when: false
changed_when: false
check_mode: no
check_mode: false
register: sysctl_bridge_nf_call_iptables
- name: Enable bridge-nf-call tables
@@ -106,7 +106,7 @@
state: present
sysctl_file: "{{ sysctl_file_path }}"
value: "1"
reload: yes
reload: true
when: sysctl_bridge_nf_call_iptables.rc == 0
with_items:
- net.bridge.bridge-nf-call-iptables

View File

@@ -11,7 +11,7 @@
executable: /bin/bash
failed_when: false
changed_when: false
check_mode: no
check_mode: false
register: kubelet_container_check
- name: "Pre-upgrade | copy /var/lib/cni from kubelet"

View File

@@ -31,9 +31,9 @@
- name: Preinstall | kube-apiserver configured
stat:
path: "{{ kube_manifest_dir }}/kube-apiserver.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kube_apiserver_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
@@ -42,9 +42,9 @@
- name: Preinstall | kube-controller configured
stat:
path: "{{ kube_manifest_dir }}/kube-controller-manager.yaml"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kube_controller_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
@@ -109,7 +109,7 @@
- name: Preinstall | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: no
validate_certs: false
register: result
until: result.status == 200
retries: 60

View File

@@ -2,9 +2,9 @@
- name: Check if /etc/fstab exists
stat:
path: "/etc/fstab"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fstab_file
- name: Remove swapfile from /etc/fstab

View File

@@ -12,24 +12,24 @@
register: resolvconf
failed_when: false
changed_when: false
check_mode: no
check_mode: false
- name: Check existence of /etc/resolvconf/resolv.conf.d
stat:
path: /etc/resolvconf/resolv.conf.d
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
failed_when: false
register: resolvconfd_path
- name: Check status of /etc/resolv.conf
stat:
path: /etc/resolv.conf
follow: no
get_attributes: no
get_checksum: no
get_mime: no
follow: false
get_attributes: false
get_checksum: false
get_mime: false
failed_when: false
register: resolvconf_stat
@@ -72,7 +72,7 @@
register: systemd_resolved_enabled
failed_when: false
changed_when: false
check_mode: no
check_mode: false
- name: Set default dns if remove_default_searchdomains is false
set_fact:
@@ -94,9 +94,9 @@
- name: Check if kubelet is configured
stat:
path: "{{ kube_config_dir }}/kubelet.env"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kubelet_configured
changed_when: false
@@ -121,9 +121,9 @@
- name: Check if /etc/dhclient.conf exists
stat:
path: /etc/dhclient.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: dhclient_stat
- name: Target dhclient conf file for /etc/dhclient.conf
@@ -134,9 +134,9 @@
- name: Check if /etc/dhcp/dhclient.conf exists
stat:
path: /etc/dhcp/dhclient.conf
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: dhcp_dhclient_stat
- name: Target dhclient conf file for /etc/dhcp/dhclient.conf
@@ -218,9 +218,9 @@
- name: Check /usr readonly
stat:
path: "/usr"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: usr
- name: Set alternate flexvolume path

View File

@@ -44,7 +44,7 @@
assert:
that: item.value | type_debug == 'bool'
msg: "{{ item.value }} isn't a bool"
run_once: yes
run_once: true
with_items:
- { name: download_run_once, value: "{{ download_run_once }}" }
- { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
@@ -172,21 +172,21 @@
that:
- kube_service_addresses | ansible.utils.ipaddr('net')
msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range"
run_once: yes
run_once: true
- name: "Check that kube_pods_subnet is a network range"
assert:
that:
- kube_pods_subnet | ansible.utils.ipaddr('net')
msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range"
run_once: yes
run_once: true
- name: "Check that kube_pods_subnet does not collide with kube_service_addresses"
assert:
that:
- kube_pods_subnet | ansible.utils.ipaddr(kube_service_addresses) | string == 'None'
msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses"
run_once: yes
run_once: true
- name: "Check that IP range is enough for the nodes"
assert:
@@ -194,7 +194,7 @@
- 2 ** (kube_network_node_prefix - kube_pods_subnet | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
msg: "Not enough IPs are available for the desired node count."
when: kube_network_plugin != 'calico'
run_once: yes
run_once: true
- name: Stop if unknown dns mode
assert:
@@ -246,7 +246,7 @@
# TODO: Clean this task up when we drop backward compatibility support for `etcd_kubeadm_enabled`
- name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker and etcd_kubeadm_enabled is not defined
run_once: yes
run_once: true
when: etcd_kubeadm_enabled is defined
block:
- name: Warn the user if they are still using `etcd_kubeadm_enabled`
@@ -292,7 +292,7 @@
assert:
that: containerd_version is version(containerd_min_version_required, '>=')
msg: "containerd_version is too low. Minimum version {{ containerd_min_version_required }}"
run_once: yes
run_once: true
when:
- containerd_version not in ['latest', 'edge', 'stable']
- container_manager == 'containerd'

View File

@@ -48,9 +48,9 @@
- name: Check if kubernetes kubeadm compat cert dir exists
stat:
path: "{{ kube_cert_compat_dir }}"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: kube_cert_compat_dir_check
when:
- inventory_hostname in groups['k8s_cluster']

View File

@@ -16,7 +16,7 @@
options ndots:{{ ndots }} timeout:{{ dns_timeout | default('2') }} attempts:{{ dns_attempts | default('2') }}
state: present
insertbefore: BOF
create: yes
create: true
backup: "{{ not resolvconf_stat.stat.islnk }}"
marker: "# Ansible entries {mark}"
mode: "0644"

View File

@@ -3,7 +3,7 @@
file:
path: "/etc/NetworkManager/conf.d"
state: directory
recurse: yes
recurse: true
- name: NetworkManager | Prevent NetworkManager from managing Calico interfaces (cali*/tunl*/vxlan.calico)
copy:

View File

@@ -6,7 +6,7 @@
option: servers
value: "{{ nameserverentries }}"
mode: '0600'
backup: yes
backup: true
when:
- nameserverentries != "127.0.0.53" or systemd_resolved_enabled.rc != 0
notify: Preinstall | update resolvconf for networkmanager
@@ -23,7 +23,7 @@
option: searches
value: "{{ (default_searchdomains | default([]) + searchdomains | default([])) | join(',') }}"
mode: '0600'
backup: yes
backup: true
notify: Preinstall | update resolvconf for networkmanager
- name: NetworkManager | Add DNS options to NM configuration
@@ -33,5 +33,5 @@
option: options
value: "ndots:{{ ndots }},timeout:{{ dns_timeout | default('2') }},attempts:{{ dns_attempts | default('2') }}"
mode: '0600'
backup: yes
backup: true
notify: Preinstall | update resolvconf for networkmanager

View File

@@ -34,7 +34,7 @@
- name: Update package management cache (APT)
apt:
update_cache: yes
update_cache: true
cache_valid_time: 3600
when: ansible_os_family == "Debian"
tags:

View File

@@ -3,9 +3,9 @@
- name: Confirm selinux deployed
stat:
path: /etc/selinux/config
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
when:
- ansible_os_family == "RedHat"
- "'Amazon' not in ansible_distribution"
@@ -27,8 +27,8 @@
dest: /etc/gai.conf
line: "precedence ::ffff:0:0/96 100"
state: present
create: yes
backup: yes
create: true
backup: true
mode: "0644"
when:
- disable_ipv6_dns
@@ -47,9 +47,9 @@
- name: Stat sysctl file configuration
stat:
path: "{{ sysctl_file_path }}"
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: sysctl_file_stat
tags:
- bootstrap-os
@@ -75,7 +75,7 @@
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
reload: true
- name: Enable ipv6 forwarding
ansible.posix.sysctl:
@@ -83,15 +83,15 @@
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
reload: true
when: enable_dual_stack_networks | bool
- name: Check if we need to set fs.may_detach_mounts
stat:
path: /proc/sys/fs/may_detach_mounts
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: fs_may_detach_mounts
ignore_errors: true # noqa ignore-errors
@@ -101,7 +101,7 @@
name: fs.may_detach_mounts
value: 1
state: present
reload: yes
reload: true
when: fs_may_detach_mounts.stat.exists | d(false)
- name: Ensure kubelet expected parameters are set
@@ -110,7 +110,7 @@
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
reload: true
with_items:
- { name: kernel.keys.root_maxbytes, value: 25000000 }
- { name: kernel.keys.root_maxkeys, value: 1000000 }
@@ -133,7 +133,7 @@
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
reload: true
with_items: "{{ additional_sysctl }}"
- name: Disable fapolicyd service

View File

@@ -11,17 +11,17 @@
{% endfor %}
delegate_to: localhost
connection: local
delegate_facts: yes
run_once: yes
delegate_facts: true
run_once: true
- name: Hosts | populate inventory into hosts file
blockinfile:
path: /etc/hosts
block: "{{ hostvars.localhost.etc_hosts_inventory_block }}"
state: "{{ 'present' if populate_inventory_to_hosts_file else 'absent' }}"
create: yes
backup: yes
unsafe_writes: yes
create: true
backup: true
unsafe_writes: true
marker: "# Ansible inventory hosts {mark}"
mode: "0644"
@@ -31,8 +31,8 @@
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name }}"
state: present
backup: yes
unsafe_writes: yes
backup: true
unsafe_writes: true
when:
- populate_loadbalancer_apiserver_to_hosts_file
- loadbalancer_apiserver is defined
@@ -69,8 +69,8 @@
line: "{{ item.key }} {{ item.value | join(' ') }}"
regexp: "^{{ item.key }}.*$"
state: present
backup: yes
unsafe_writes: yes
backup: true
unsafe_writes: true
loop: "{{ etc_hosts_localhosts_dict_target | default({}) | dict2items }}"
# gather facts to update ansible_fqdn

View File

@@ -6,10 +6,10 @@
{{ item }}
{% endfor %}
path: "{{ dhclientconffile }}"
create: yes
create: true
state: present
insertbefore: BOF
backup: yes
backup: true
marker: "# Ansible entries {mark}"
mode: "0644"
notify: Preinstall | propagate resolvconf to k8s components

View File

@@ -7,7 +7,7 @@
blockinfile:
path: "{{ dhclientconffile }}"
state: absent
backup: yes
backup: true
marker: "# Ansible entries {mark}"
notify: Preinstall | propagate resolvconf to k8s components

View File

@@ -22,7 +22,7 @@
- name: Check if growpart needs to be run
command: growpart -N {{ device }} {{ partition }}
failed_when: False
failed_when: false
changed_when: "'NOCHANGE:' not in growpart_needed.stdout"
register: growpart_needed
environment:
@@ -30,7 +30,7 @@
- name: Check fs type
command: file -Ls {{ root_device }}
changed_when: False
changed_when: false
register: fs_type
- name: Run growpart # noqa no-handler

View File

@@ -121,9 +121,9 @@
- name: Check if we are running inside a Azure VM
stat:
path: /var/lib/waagent/
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: azure_check
when:
- not dns_late

View File

@@ -2,9 +2,9 @@
- name: "Check_tokens | check if the tokens have already been generated on first master"
stat:
path: "{{ kube_token_dir }}/known_tokens.csv"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: known_tokens_master
run_once: true
@@ -23,9 +23,9 @@
- name: "Check tokens | check if a cert already exists"
stat:
path: "{{ kube_token_dir }}/known_tokens.csv"
get_attributes: no
get_checksum: yes
get_mime: no
get_attributes: false
get_checksum: true
get_mime: false
register: known_tokens
- name: "Check_tokens | Set 'sync_tokens' to true"

View File

@@ -4,7 +4,7 @@
src: "kube-gen-token.sh"
dest: "{{ kube_script_dir }}/kube-gen-token.sh"
mode: "0700"
run_once: yes
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
@@ -17,7 +17,7 @@
- "{{ groups['kube_control_plane'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
run_once: yes
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
@@ -30,14 +30,14 @@
- "{{ groups['kube_node'] }}"
register: gentoken_node
changed_when: "'Added' in gentoken_node.stdout"
run_once: yes
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
- name: Gen_tokens | Get list of tokens from first master
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list
check_mode: no
check_mode: false
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when: sync_tokens | default(false)
@@ -47,7 +47,7 @@
args:
executable: /bin/bash
register: tokens_data
check_mode: no
check_mode: false
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when: sync_tokens | default(false)

View File

@@ -8,10 +8,10 @@
gather_subset: '!all,network'
filter: "ansible_default_ipv4"
delegate_to: "{{ item }}"
delegate_facts: yes
delegate_facts: true
when: hostvars[item].ansible_default_ipv4 is not defined
loop: "{{ (ansible_play_hosts_all + [groups['kube_control_plane'][0]]) | unique if ansible_limit is defined else (groups['k8s_cluster'] | default([]) + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique }}"
run_once: yes
run_once: true
ignore_unreachable: true
tags: always
@@ -26,9 +26,9 @@
{% endfor %}
delegate_to: localhost
connection: local
delegate_facts: yes
become: no
run_once: yes
delegate_facts: true
become: false
run_once: true
- name: Set fallback_ips
set_fact:

View File

@@ -26,9 +26,9 @@
127.0.0.1,localhost,{{ kube_service_addresses }},{{ kube_pods_subnet }},svc,svc.{{ dns_domain }}
delegate_to: localhost
connection: local
delegate_facts: yes
become: no
run_once: yes
delegate_facts: true
become: false
run_once: true
- name: Populates no_proxy to all hosts
set_fact:

View File

@@ -3,7 +3,7 @@
service:
name: calico-rr
state: stopped
enabled: no
enabled: false
failed_when: false
- name: Calico-rr | Delete obsolete files

View File

@@ -4,7 +4,7 @@
that:
- ipip is not defined
msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if legacy encapsulation variables are detected (ipip_mode)
@@ -12,7 +12,7 @@
that:
- ipip_mode is not defined
msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks)
@@ -20,7 +20,7 @@
that:
- calcio_ipam_autoallocateblocks is not defined
msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
@@ -32,7 +32,7 @@
msg: "When using cloud_provider azure and network_plugin calico calico_ipip_mode must be 'Never' and calico_vxlan_mode 'Always' or 'CrossSubnet'"
when:
- cloud_provider is defined and cloud_provider == 'azure'
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Stop if supported Calico versions
@@ -40,21 +40,21 @@
that:
- "calico_version in calico_crds_archive_checksums.keys()"
msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.keys() }}"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Check if calicoctl.sh exists
stat:
path: "{{ bin_dir }}/calicoctl.sh"
register: calicoctl_sh_exists
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Check if calico ready
command: "{{ bin_dir }}/calicoctl.sh get ClusterInformation default"
register: calico_ready
run_once: True
ignore_errors: True
run_once: true
ignore_errors: true
retries: 5
delay: 10
until: calico_ready.rc == 0
@@ -62,7 +62,7 @@
when: calicoctl_sh_exists.stat.exists
- name: Check that current calico version is enough for upgrade
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0
block:
@@ -91,7 +91,7 @@
when:
- peer_with_calico_rr
- inventory_hostname == groups['kube_control_plane'][0]
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check that calico_rr nodes are in k8s_cluster group"
@@ -101,7 +101,7 @@
msg: "calico_rr must be a child group of k8s_cluster group"
when:
- '"calico_rr" in group_names'
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check vars defined correctly"
@@ -110,7 +110,7 @@
- "calico_pool_name is defined"
- "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')"
msg: "calico_pool_name contains invalid characters"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check calico network backend defined correctly"
@@ -118,11 +118,11 @@
that:
- "calico_network_backend in ['bird', 'vxlan', 'none']"
msg: "calico network backend is not 'bird', 'vxlan' or 'none'"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode defined correctly"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
assert:
that:
@@ -137,7 +137,7 @@
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
when:
- "calico_ipip_mode in ['Always', 'CrossSubnet']"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip and vxlan mode if simultaneously enabled"
@@ -147,23 +147,23 @@
msg: "IP in IP and VXLAN mode is mutualy exclusive modes"
when:
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Get Calico {{ calico_pool_name }} configuration"
command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json"
failed_when: False
changed_when: False
check_mode: no
failed_when: false
changed_when: false
check_mode: false
register: calico
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_conf"
set_fact:
calico_pool_conf: '{{ calico.stdout | from_json }}'
when: calico.rc == 0 and calico.stdout
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check if inventory match current cluster configuration"
@@ -176,7 +176,7 @@
msg: "Your inventory doesn't match the current cluster configuration"
when:
- calico_pool_conf is defined
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if calico_apiserver_enabled"
@@ -185,7 +185,7 @@
msg: "When using calico apiserver you need to use the kubernetes datastore"
when:
- calico_apiserver_enabled
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check kdd calico_datastore if typha_enabled"
@@ -194,7 +194,7 @@
msg: "When using typha you need to use the kubernetes datastore"
when:
- typha_enabled
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Check ipip mode is Never for calico ipv6"
@@ -204,5 +204,5 @@
msg: "Calico doesn't support ipip tunneling for the IPv6"
when:
- enable_dual_stack_networks
run_once: True
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"

View File

@@ -14,7 +14,7 @@
src: "{{ downloads.calicoctl.dest }}"
dest: "{{ bin_dir }}/calicoctl"
mode: "0755"
remote_src: yes
remote_src: true
- name: Calico | Create calico certs directory
file:
@@ -31,7 +31,7 @@
dest: "{{ calico_cert_dir }}/{{ item.d }}"
state: hard
mode: "0640"
force: yes
force: true
with_items:
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
@@ -61,7 +61,7 @@
- name: Calico | wait for etcd
uri:
url: "{{ etcd_access_addresses.split(',') | first }}/health"
validate_certs: no
validate_certs: false
client_cert: "{{ calico_cert_dir }}/cert.crt"
client_key: "{{ calico_cert_dir }}/key.pem"
register: result
@@ -165,8 +165,8 @@
- name: Calico | Get existing FelixConfiguration
command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json"
register: _felix_cmd
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
- name: Calico | Set kubespray FelixConfiguration
set_fact:
@@ -201,7 +201,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}"
changed_when: False
changed_when: false
- name: Calico | Configure Calico IP Pool
when:
@@ -210,8 +210,8 @@
- name: Calico | Get existing calico network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json"
register: _calico_pool_cmd
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
- name: Calico | Set kubespray calico network pool
set_fact:
@@ -251,7 +251,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}"
changed_when: False
changed_when: false
- name: Calico | Configure Calico IPv6 Pool
when:
@@ -261,8 +261,8 @@
- name: Calico | Get existing calico ipv6 network pool
command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json"
register: _calico_pool_ipv6_cmd
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
- name: Calico | Set kubespray calico network pool
set_fact:
@@ -302,19 +302,19 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}"
changed_when: False
changed_when: false
- name: Populate Service External IPs
set_fact:
_service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}"
with_items: "{{ calico_advertise_service_external_ips }}"
run_once: yes
run_once: true
- name: Populate Service LoadBalancer IPs
set_fact:
_service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}"
with_items: "{{ calico_advertise_service_loadbalancer_ips }}"
run_once: yes
run_once: true
- name: "Determine nodeToNodeMesh needed state"
set_fact:
@@ -322,7 +322,7 @@
when:
- peer_with_router | default(false) or peer_with_calico_rr | default(false)
- inventory_hostname in groups['k8s_cluster']
run_once: yes
run_once: true
- name: Calico | Configure Calico BGP
when:
@@ -331,8 +331,8 @@
- name: Calico | Get existing BGP Configuration
command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json"
register: _bgp_config_cmd
ignore_errors: True
changed_when: False
ignore_errors: true
changed_when: false
- name: Calico | Set kubespray BGP Configuration
set_fact:
@@ -366,7 +366,7 @@
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}"
changed_when: False
changed_when: false
- name: Calico | Create calico manifests
template:

View File

@@ -10,11 +10,11 @@
file: _copr:copr.fedorainfracloud.org:jdoss:wireguard
description: Copr repo for wireguard owned by jdoss
baseurl: "{{ calico_wireguard_repo }}"
gpgcheck: yes
gpgcheck: true
gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg
skip_if_unavailable: yes
enabled: yes
repo_gpgcheck: no
skip_if_unavailable: true
enabled: true
repo_gpgcheck: false
when:
- ansible_os_family in ['RedHat']
- ansible_distribution not in ['Fedora']

View File

@@ -2,9 +2,9 @@
- name: Reset | check vxlan.calico network device
stat:
path: /sys/class/net/vxlan.calico
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: vxlan
- name: Reset | remove the network vxlan.calico device created by calico
@@ -14,9 +14,9 @@
- name: Reset | check dummy0 network device
stat:
path: /sys/class/net/dummy0
get_attributes: no
get_checksum: no
get_mime: no
get_attributes: false
get_checksum: false
get_mime: false
register: dummy0
- name: Reset | remove the network device created by calico

Some files were not shown because too many files have changed in this diff Show More