Move to Ansible 3.4.0 (#7672)

* Ansible: move to Ansible 3.4.0 which uses ansible-base 2.10.10

* Docs: add a note about ansible upgrade post 2.9.x

* CI: ensure ansible is removed before ansible 3.x is installed to avoid pip failures

* Ansible: use newer ansible-lint

* Fix ansible-lint 5.0.11 found issues

* syntax issues
* risky-file-permissions
* var-naming
* role-name
* molecule tests

* Mitogen: use 0.3.0rc1 which adds support for ansible 2.10+

* Pin ansible-base to 2.10.11 to get package fix on RHEL8
This commit is contained in:
Cristian Calin
2021-07-12 10:00:47 +03:00
committed by GitHub
parent b0e4c375a7
commit 7516fe142f
103 changed files with 298 additions and 129 deletions

View File

@@ -19,3 +19,4 @@
template:
src: ssh-bastion.conf
dest: "{{ playbook_dir }}/ssh-bastion.conf"
mode: 0640

View File

@@ -12,6 +12,7 @@
value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true
mode: 0644
become: true
when: not skip_http_proxy_on_os_packages
@@ -32,6 +33,7 @@
section: "{{ item }}"
option: enabled
value: "1"
mode: 0644
with_items:
- ol7_latest
- ol7_addons
@@ -56,6 +58,7 @@
section: "ol{{ ansible_distribution_major_version }}_addons"
option: "{{ item.option }}"
value: "{{ item.value }}"
mode: 0644
with_items:
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
- { option: "enabled", value: "1" }

View File

@@ -11,7 +11,7 @@
- name: Remove podman network cni
raw: "podman network rm podman"
become: true
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when: need_bootstrap.rc != 0
- name: Clean up possible pending packages on fedora coreos
@@ -43,7 +43,7 @@
- name: Reboot immediately for updated ostree, please run playbook again if failed first time.
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
become: true
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
ignore_unreachable: yes
when: need_bootstrap.rc != 0

View File

@@ -17,6 +17,7 @@
value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true
mode: 0644
become: true
when: not skip_http_proxy_on_os_packages

View File

@@ -10,7 +10,7 @@
register: stat_result
- name: Create the /etc/sysconfig/proxy empty file
file:
file: # noqa risky-file-permissions
path: /etc/sysconfig/proxy
state: touch
when:

View File

@@ -12,6 +12,7 @@
value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true
mode: 0644
become: true
when: not skip_http_proxy_on_os_packages
@@ -19,7 +20,7 @@
command: /sbin/subscription-manager status
register: rh_subscription_status
changed_when: "rh_subscription_status != 0"
ignore_errors: true
ignore_errors: true # noqa ignore-errors
become: true
- name: RHEL subscription Organization ID/Activation Key registration
@@ -35,12 +36,13 @@
service_level_agreement: "{{ rh_subscription_sla }}"
sync: true
notify: RHEL auto-attach subscription
ignore_errors: true
ignore_errors: true # noqa ignore-errors
become: true
when:
- rh_subscription_org_id is defined
- rh_subscription_status.changed
# this task has no_log set to prevent logging security sensitive information such as subscription passwords
- name: RHEL subscription Username/Password registration
redhat_subscription:
state: present
@@ -54,8 +56,9 @@
service_level_agreement: "{{ rh_subscription_sla }}"
sync: true
notify: RHEL auto-attach subscription
ignore_errors: true
ignore_errors: true # noqa ignore-errors
become: true
no_log: true
when:
- rh_subscription_username is defined
- rh_subscription_status.changed

View File

@@ -4,4 +4,4 @@
become: true
roles:
- role: kubespray-defaults
- role: containerd
- role: container-engine/containerd

View File

@@ -23,12 +23,14 @@
template:
src: "fedora_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
mode: 0644
when: ansible_distribution == "Fedora"
- name: Configure containerd repository on RedHat/OracleLinux/CentOS/AlmaLinux
template:
src: "rh_containerd.repo.j2"
dest: "{{ yum_repo_dir }}/containerd.repo"
mode: 0644
when:
- ansible_os_family == "RedHat"
- ansible_distribution not in ["Fedora", "Amazon"]

View File

@@ -58,11 +58,13 @@
file:
path: /etc/systemd/system/containerd.service.d
state: directory
mode: 0755
- name: Write containerd proxy drop-in
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/containerd.service.d/http-proxy.conf
mode: 0644
notify: restart containerd
when: http_proxy is defined or https_proxy is defined
@@ -116,7 +118,7 @@
- not is_ostree
- containerd_package_info.pkgs|length > 0
- include_role:
- include_role: # noqa unnamed-task
name: container-engine/crictl
# you can sometimes end up in a state where everything is installed

View File

@@ -4,4 +4,4 @@
become: true
roles:
- role: kubespray-defaults
- role: cri-o
- role: container-engine/cri-o

View File

@@ -53,6 +53,7 @@
option: enabled
value: "0"
backup: yes
mode: 0644
when:
- ansible_distribution in ["Amazon"]
- amzn2_extras_file_stat.stat.exists
@@ -119,6 +120,7 @@
section: "{{ item.section }}"
option: enabled
value: 1
mode: 0644
become: true
when: is_ostree
loop:

View File

@@ -46,7 +46,7 @@
import_tasks: "crio_repo.yml"
when: crio_add_repos
- include_role:
- include_role: # noqa unnamed-task
name: container-engine/crictl
- name: Build a list of crio runtimes with Katacontainers runtimes
@@ -69,11 +69,13 @@
file:
path: "{{ item }}"
state: directory
mode: 0755
- name: Install cri-o config
template:
src: crio.conf.j2
dest: /etc/crio/crio.conf
mode: 0644
register: config_install
- name: Add skopeo pkg to install
@@ -129,6 +131,7 @@
copy:
src: mounts.conf
dest: /etc/containers/mounts.conf
mode: 0644
when:
- ansible_os_family == 'RedHat'
notify: restart crio
@@ -147,6 +150,7 @@
section: storage.options.overlay
option: mountopt
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
mode: 0644
- name: Create directory registries configs
file:
@@ -159,6 +163,7 @@
template:
src: registry-mirror.conf.j2
dest: "/etc/containers/registries.conf.d/{{ item.prefix }}.conf"
mode: 0644
loop: "{{ crio_registries_mirrors }}"
notify: restart crio
@@ -166,6 +171,7 @@
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/crio.service.d/http-proxy.conf
mode: 0644
notify: restart crio
when: http_proxy is defined or https_proxy is defined

View File

@@ -4,4 +4,4 @@
become: true
roles:
- role: kubespray-defaults
- role: docker
- role: container-engine/docker

View File

@@ -80,12 +80,14 @@
template:
src: "fedora_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker.repo"
mode: 0644
when: ansible_distribution == "Fedora" and not is_ostree
- name: Configure docker repository on RedHat/CentOS/Oracle/AlmaLinux Linux
template:
src: "rh_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker-ce.repo"
mode: 0644
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
@@ -145,7 +147,7 @@
state: started
when: docker_task_result is not changed
rescue:
- debug:
- debug: # noqa unnamed-task
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
file:

View File

@@ -3,11 +3,13 @@
file:
path: /etc/systemd/system/docker.service.d
state: directory
mode: 0755
- name: Write docker proxy drop-in
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
mode: 0644
notify: restart docker
when: http_proxy is defined or https_proxy is defined
@@ -25,6 +27,7 @@
template:
src: docker.service.j2
dest: /etc/systemd/system/docker.service
mode: 0644
register: docker_service_file
notify: restart docker
when:
@@ -35,12 +38,14 @@
template:
src: docker-options.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
mode: 0644
notify: restart docker
- name: Write docker dns systemd drop-in
template:
src: docker-dns.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
mode: 0644
notify: restart docker
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
@@ -55,7 +60,9 @@
template:
src: docker-orphan-cleanup.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
mode: 0644
notify: restart docker
when: docker_orphan_clean_up | bool
- meta: flush_handlers
- name: Flush handlers
meta: flush_handlers

View File

@@ -7,5 +7,5 @@
container_manager: containerd
roles:
- role: kubespray-defaults
- role: containerd
- role: gvisor
- role: container-engine/containerd
- role: container-engine/gvisor

View File

@@ -5,7 +5,7 @@
roles:
- role: kubespray-defaults
- role: bootstrap-os
- role: ../adduser
- role: adduser
user: "{{ addusers.kube }}"
tasks:
- include_tasks: "../../../../download/tasks/download_file.yml"
@@ -20,8 +20,8 @@
kube_network_plugin: cni
roles:
- role: kubespray-defaults
- role: ../network_plugin/cni
- role: crictl
- role: network_plugin/cni
- role: container-engine/crictl
tasks:
- name: Copy test container files
copy:

View File

@@ -6,5 +6,5 @@
kata_containers_enabled: true
roles:
- role: kubespray-defaults
- role: containerd
- role: kata-containers
- role: container-engine/containerd
- role: container-engine/kata-containers

View File

@@ -15,11 +15,13 @@
file:
path: "{{ kata_containers_config_dir }}"
state: directory
mode: 0755
- name: kata-containers | Set configuration
template:
src: "{{ item }}.j2"
dest: "{{ kata_containers_config_dir }}/{{ item }}"
mode: 0644
with_items:
- configuration-qemu.toml

View File

@@ -1,3 +1,4 @@
# noqa role-name - this is a meta role that doesn't need a name
---
dependencies:
- role: container-engine/kata-containers

View File

@@ -18,7 +18,7 @@
when:
- not download_always_pull
- debug:
- debug: # noqa unnamed-task
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
- name: download_container | Determine if image is in cache

View File

@@ -48,6 +48,7 @@
- not download_localhost
# This must always be called, to check if the checksum matches. On no-match the file is re-downloaded.
# This task will avoid logging it's parameters to not leak environment passwords in the log
- name: download_file | Download item
get_url:
url: "{{ download.url }}"
@@ -67,6 +68,7 @@
retries: 4
delay: "{{ retry_stagger | default(5) }}"
environment: "{{ proxy_env }}"
no_log: true
- name: download_file | Copy file back to ansible host file cache
synchronize:

View File

@@ -38,7 +38,7 @@
run_once: true
register: test_become
changed_when: false
ignore_errors: true
ignore_errors: true # noqa ignore-errors
become: true
when:
- download_localhost
@@ -53,7 +53,7 @@
run_once: true
register: test_docker
changed_when: false
ignore_errors: true
ignore_errors: true # noqa ignore-errors
become: false
when:
- download_localhost

View File

@@ -18,6 +18,7 @@
template:
src: "kubeadm-images.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
mode: 0644
when:
- not skip_kubeadm_images|default(false)

View File

@@ -45,6 +45,7 @@
src: "etcd-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd.service
backup: yes
mode: 0644
when: is_etcd_master and etcd_cluster_setup
- name: Configure | Copy etcd-events.service systemd file
@@ -52,6 +53,7 @@
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-events.service
backup: yes
mode: 0644
when: is_etcd_master and etcd_events_cluster_setup
- name: Configure | reload systemd
@@ -65,7 +67,7 @@
name: etcd
state: started
enabled: yes
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}"
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_cluster_setup
# when scaling new etcd will fail to start
@@ -74,7 +76,7 @@
name: etcd-events
state: started
enabled: yes
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_events_cluster_setup
- name: Configure | Wait for etcd cluster to be healthy
@@ -126,7 +128,7 @@
- name: Configure | Check if member is in etcd cluster
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
register: etcd_member_in_cluster
ignore_errors: true
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
when: is_etcd_master and etcd_cluster_setup
@@ -142,7 +144,7 @@
- name: Configure | Check if member is in etcd-events cluster
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
register: etcd_events_member_in_cluster
ignore_errors: true
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
when: is_etcd_master and etcd_events_cluster_setup

View File

@@ -21,6 +21,7 @@
template:
src: "openssl.conf.j2"
dest: "{{ etcd_config_dir }}/openssl.conf"
mode: 0640
run_once: yes
delegate_to: "{{ groups['etcd'][0] }}"
when:

View File

@@ -3,6 +3,7 @@
template:
src: etcd.env.j2
dest: /etc/etcd.env
mode: 0640
notify: restart etcd
when: is_etcd_master and etcd_cluster_setup
@@ -10,5 +11,6 @@
template:
src: etcd-events.env.j2
dest: /etc/etcd-events.env
mode: 0640
notify: restart etcd-events
when: is_etcd_master and etcd_events_cluster_setup

View File

@@ -21,6 +21,7 @@
src: "{{ etcd_cert_dir }}/ca.pem"
dest: "{{ ca_cert_path }}"
remote_src: true
mode: 0640
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503

View File

@@ -3,7 +3,7 @@
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
register: createdby_annotation
changed_when: false
ignore_errors: true
ignore_errors: true # noqa ignore-errors
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -30,6 +30,7 @@
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0640
register: psp_manifests
with_items:
- {file: psp.yml, type: psp, name: psp}
@@ -61,6 +62,7 @@
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
mode: 0640
register: node_crb_manifest
when:
- rbac_enabled
@@ -86,6 +88,7 @@
template:
src: "node-webhook-cr.yml.j2"
dest: "{{ kube_config_dir }}/node-webhook-cr.yml"
mode: 0640
register: node_webhook_cr_manifest
when:
- rbac_enabled
@@ -111,6 +114,7 @@
template:
src: "node-webhook-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-webhook-crb.yml"
mode: 0640
register: node_webhook_crb_manifest
when:
- rbac_enabled
@@ -139,7 +143,7 @@
- cloud_provider == 'oci'
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml mode=0640
when: inventory_hostname == groups['kube_control_plane']|last
- name: PriorityClass | Create k8s-cluster-critical

View File

@@ -3,6 +3,7 @@
copy:
src: "oci-rbac.yml"
dest: "{{ kube_config_dir }}/oci-rbac.yml"
mode: 0640
when:
- cloud_provider is defined
- cloud_provider == 'oci'

View File

@@ -12,7 +12,7 @@
- name: CephFS Provisioner | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
@@ -21,7 +21,7 @@
- name: CephFS Provisioner | Remove legacy storageclass
shell: |
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:

View File

@@ -12,7 +12,7 @@
- name: RBD Provisioner | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
ignore_errors: yes
ignore_errors: true # noqa ignore-errrors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
@@ -21,7 +21,7 @@
- name: RBD Provisioner | Remove legacy storageclass
shell: |
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
ignore_errors: yes
ignore_errors: true # noqa ignore-errrors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
@@ -63,6 +63,7 @@
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
mode: 0644
with_items: "{{ rbd_provisioner_templates }}"
register: rbd_provisioner_manifests
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -12,7 +12,7 @@
- name: Cert Manager | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:

View File

@@ -55,7 +55,7 @@
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
register: metallb_secret
become: true
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -12,12 +12,12 @@
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30
delay: 10
ignore_errors: yes
ignore_errors: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
changed_when: false

View File

@@ -12,7 +12,7 @@
- apiserver-kubelet-client.key
- front-proxy-client.crt
- front-proxy-client.key
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
- name: Backup old confs
copy:
@@ -25,4 +25,4 @@
- controller-manager.conf
- kubelet.conf
- scheduler.conf
ignore_errors: yes
ignore_errors: true # noqa ignore-errors

View File

@@ -50,18 +50,21 @@
file:
path: "{{ audit_policy_file | dirname }}"
state: directory
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
mode: 0640
when: kubernetes_audit|default(false) or kubernetes_audit_webhook|default(false)
- name: Write api audit webhook config yaml
template:
src: apiserver-audit-webhook-config.yaml.j2
dest: "{{ audit_webhook_config_file }}"
mode: 0640
when: kubernetes_audit_webhook|default(false)
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.

View File

@@ -7,12 +7,14 @@
template:
src: webhook-token-auth-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-token-auth-config.yaml"
mode: 0640
when: kube_webhook_token_auth|default(false)
- name: Create webhook authorization config
template:
src: webhook-authorization-config.yaml.j2
dest: "{{ kube_config_dir }}/webhook-authorization-config.yaml"
mode: 0640
when: kube_webhook_authorization|default(false)
- name: Create kube-scheduler config
@@ -40,7 +42,7 @@
when: ansible_os_family in ["Debian","RedHat"]
tags:
- kubectl
ignore_errors: True
ignore_errors: true # noqa ignore-errors
- name: Set kubectl bash completion file permissions
file:
@@ -52,7 +54,7 @@
tags:
- kubectl
- upgrade
ignore_errors: True
ignore_errors: true # noqa ignore-errors
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
set_fact:
@@ -77,12 +79,13 @@
template:
src: k8s-certs-renew.sh.j2
dest: "{{ bin_dir }}/k8s-certs-renew.sh"
mode: '755'
mode: 0755
- name: Renew K8S control plane certificates monthly 1/2
template:
src: "{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}"
mode: 0644
with_items:
- k8s-certs-renew.service
- k8s-certs-renew.timer

View File

@@ -61,6 +61,7 @@
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes
mode: 0640
when: not is_kube_master
- name: Join to cluster if needed

View File

@@ -35,8 +35,10 @@
- node_labels is defined
- node_labels is mapping
- debug: var=role_node_labels
- debug: var=inventory_node_labels
- debug: # noqa unnamed-task
var: role_node_labels
- debug: # noqa unnamed-task
var: inventory_node_labels
- name: Set label to node
command: >-

View File

@@ -18,6 +18,7 @@
src: "kubelet.env.{{ kubeletConfig_api_version }}.j2"
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
mode: 0640
notify: Node | restart kubelet
tags:
- kubelet
@@ -27,6 +28,7 @@
template:
src: "kubelet-config.{{ kubeletConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubelet-config.yaml"
mode: 0640
notify: Kubelet | restart kubelet
tags:
- kubelet
@@ -37,6 +39,7 @@
src: "kubelet.service.j2"
dest: "/etc/systemd/system/kubelet.service"
backup: "yes"
mode: 0644
notify: Node | restart kubelet
tags:
- kubelet

View File

@@ -31,3 +31,4 @@
template:
src: manifests/haproxy.manifest.j2
dest: "{{ kube_manifest_dir }}/haproxy.yml"
mode: 0640

View File

@@ -31,3 +31,4 @@
template:
src: manifests/nginx-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/nginx-proxy.yml"
mode: 0640

View File

@@ -57,6 +57,7 @@
file:
path: /etc/modules-load.d
state: directory
mode: 0755
- name: Enable br_netfilter module
modprobe:
@@ -68,6 +69,7 @@
copy:
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
content: br_netfilter
mode: 0644
when: modinfo_br_netfilter.rc == 0
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
@@ -108,7 +110,7 @@
name: nf_conntrack_ipv4
state: present
register: modprobe_nf_conntrack_ipv4
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- kube_proxy_mode == 'ipvs'
tags:
@@ -117,6 +119,7 @@
- name: Persist ip_vs modules
copy:
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
mode: 0644
content: |
ip_vs
ip_vs_rr

View File

@@ -16,4 +16,4 @@
- name: Disable swap
command: /sbin/swapoff -a
when: swapon.stdout
ignore_errors: "{{ ansible_check_mode }}"
ignore_errors: "{{ ansible_check_mode }}" # noqa ignore-errors

View File

@@ -4,6 +4,7 @@
path: "{{ item }}"
state: directory
owner: kube
mode: 0755
when: inventory_hostname in groups['k8s_cluster']
become: true
tags:
@@ -28,6 +29,7 @@
path: "{{ item }}"
state: directory
owner: root
mode: 0755
when: inventory_hostname in groups['k8s_cluster']
become: true
tags:
@@ -59,6 +61,7 @@
src: "{{ kube_cert_dir }}"
dest: "{{ kube_cert_compat_dir }}"
state: link
mode: 0755
when:
- inventory_hostname in groups['k8s_cluster']
- kube_cert_dir != kube_cert_compat_dir
@@ -69,6 +72,7 @@
path: "{{ item }}"
state: directory
owner: kube
mode: 0755
with_items:
- "/etc/cni/net.d"
- "/opt/cni/bin"

View File

@@ -18,6 +18,7 @@
create: yes
backup: yes
marker: "# Ansible entries {mark}"
mode: 0644
notify: Preinstall | propagate resolvconf to k8s components
- name: Remove search/domain/nameserver options before block

View File

@@ -19,6 +19,7 @@
[keyfile]
unmanaged-devices+=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
dest: /etc/NetworkManager/conf.d/calico.conf
mode: 0644
when:
- nm_check.rc == 0
- kube_network_plugin == "calico"
@@ -32,5 +33,6 @@
[keyfile]
unmanaged-devices+=interface-name:kube-ipvs0;interface-name:nodelocaldns
dest: /etc/NetworkManager/conf.d/k8s.conf
mode: 0644
when: nm_check.rc == 0
notify: Preinstall | reload NetworkManager

View File

@@ -30,6 +30,7 @@
state: present
create: yes
backup: yes
mode: 0644
when:
- disable_ipv6_dns
- not ansible_os_family in ["Flatcar Container Linux by Kinvolk"]
@@ -59,6 +60,7 @@
file:
name: "{{ sysctl_file_path | dirname }}"
state: directory
mode: 0755
- name: Enable ip forwarding
sysctl:

View File

@@ -22,6 +22,7 @@
backup: yes
unsafe_writes: yes
marker: "# Ansible inventory hosts {mark}"
mode: 0644
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file

View File

@@ -11,6 +11,7 @@
insertbefore: BOF
backup: yes
marker: "# Ansible entries {mark}"
mode: 0644
notify: Preinstall | propagate resolvconf to k8s components
when: dhclientconffile is defined

View File

@@ -91,7 +91,8 @@
# We need to make sure the network is restarted early enough so that docker can later pick up the correct system
# nameservers and search domains
- meta: flush_handlers
- name: Flush handlers
meta: flush_handlers
- name: Check if we are running inside a Azure VM
stat:

View File

@@ -16,7 +16,7 @@
until: pods_not_ready.stdout.find("cilium")==-1
retries: 30
delay: 10
ignore_errors: yes
fail_when: false
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cilium | Hubble install

View File

@@ -23,7 +23,7 @@
slurp:
src: /etc/cni/net.d/10-kuberouter.conflist
register: cni_config_slurp
ignore_errors: true
ignore_errors: true # noqa ignore-errors
- name: kube-router | Set cni_config variable
set_fact:

View File

@@ -2,7 +2,7 @@
- name: Get etcd endpoint health
command: "{{ bin_dir }}/etcdctl endpoint health"
register: etcd_endpoint_health
ignore_errors: true
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
environment:
@@ -38,13 +38,13 @@
state: absent
delegate_to: "{{ item }}"
with_items: "{{ groups['broken_etcd'] }}"
ignore_errors: true
ignore_errors: true # noqa ignore-errors
when:
- groups['broken_etcd']
- has_quorum
- name: Delete old certificates
# noqa 302 - rm is ok here for now
# noqa 302 ignore-error - rm is ok here for now
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
with_items: "{{ groups['broken_etcd'] }}"
register: delete_old_cerificates

View File

@@ -13,6 +13,7 @@
copy:
src: "{{ etcd_snapshot }}"
dest: /tmp/snapshot.db
mode: 0640
when: etcd_snapshot is defined
- name: Stop etcd

View File

@@ -1,5 +1,5 @@
---
- name: Delete node # noqa 301
- name: Delete node # noqa 301 ignore-errors
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
ignore_errors: yes
ignore_errors: true

View File

@@ -27,7 +27,7 @@
- name: Lookup etcd member id
shell: "{{ bin_dir }}/etcdctl member list | grep {{ node_ip }} | cut -d, -f1"
register: etcd_member_id
ignore_errors: true
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: no
tags:

View File

@@ -86,7 +86,7 @@
when:
- crictl.stat.exists
- container_manager in ["crio", "containerd"]
ignore_errors: true
ignore_errors: true # noqa ignore-errors
- name: reset | force remove all cri containers
command: "{{ bin_dir }}/crictl rm -a -f"
@@ -129,7 +129,7 @@
when:
- crictl.stat.exists
- container_manager == "containerd"
ignore_errors: true
ignore_errors: true # noqa ignore-errors
- block:
- name: reset | force remove all cri pods
@@ -206,7 +206,7 @@
- name: Clear IPVS virtual server table
command: "ipvsadm -C"
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
when:
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
@@ -306,7 +306,7 @@
- /etc/modules-load.d/kube_proxy-ipvs.conf
- /etc/modules-load.d/kubespray-br_netfilter.conf
- /usr/libexec/kubernetes
ignore_errors: yes
ignore_errors: true # noqa ignore-errors
tags:
- files
@@ -333,7 +333,7 @@
- dns
- name: reset | include file with reset tasks specific to the network_plugin if exists
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | path_join | realpath }}"
when:
- kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
tags:

View File

@@ -29,10 +29,12 @@
register: patch_kube_proxy_state
when: current_kube_proxy_state.stdout | trim | lower != "linux"
- debug: msg={{ patch_kube_proxy_state.stdout_lines }}
- debug: # noqa unnamed-task
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
when: patch_kube_proxy_state is not skipped
- debug: msg={{ patch_kube_proxy_state.stderr_lines }}
- debug: # noqa unnamed-task
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped
tags: init
when: