Fix outdated tag and experimental ansible-lint rules (#10254)

* project: fix outdated tag and experimental

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: remove no longer useful noqa 301

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: replace unnamed-task by name[missing]

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix daemon-reload -> daemon_reload

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
This commit is contained in:
Arthur Outhenin-Chalandre
2023-06-30 11:51:57 +02:00
committed by GitHub
parent 4f85b75087
commit f8f197e26b
56 changed files with 101 additions and 122 deletions

View File

@@ -22,7 +22,6 @@
name: containerd
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_containerd

View File

@@ -196,7 +196,7 @@
register: service_start
- name: cri-o | trigger service restart only when needed
service: # noqa 503
service:
name: crio
state: restarted
when:

View File

@@ -63,7 +63,6 @@
name: crio
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_crio

View File

@@ -143,7 +143,7 @@
state: started
when: docker_task_result is not changed
rescue:
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
file:

View File

@@ -101,6 +101,6 @@
- /etc/docker
ignore_errors: true # noqa ignore-errors
- name: Docker | systemctl daemon-reload # noqa 503
- name: Docker | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true

View File

@@ -26,7 +26,7 @@
check_mode: no
- name: check system search domains
# noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# Therefore -o pipefail is not applicable in this specific instance
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
args:

View File

@@ -14,7 +14,7 @@
when: http_proxy is defined or https_proxy is defined
- name: get systemd version
# noqa 303 - systemctl is called intentionally here
# noqa command-instead-of-module - systemctl is called intentionally here
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
args:
executable: /bin/bash

View File

@@ -1,7 +1,7 @@
---
# The image_info_command depends on the Container Runtime and will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false

View File

@@ -18,7 +18,7 @@
when:
- not download_always_pull
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"
- name: download_container | Determine if image is in cache
@@ -68,7 +68,7 @@
- not image_is_cached
- name: download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
register: container_save_status
@@ -108,7 +108,7 @@
- download_force_cache
- name: download_container | Load image into the local container registry
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
register: container_load_status
failed_when: container_load_status is failed
when:

View File

@@ -21,7 +21,7 @@
- asserts
- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost
connection: local
run_once: true
@@ -57,7 +57,7 @@
- asserts
- name: prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
no_log: "{{ not (unsafe_show_logs|bool) }}"
register: docker_images
failed_when: false

View File

@@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
- name: Join Member | Add member to etcd-events cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0

View File

@@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd cluster # noqa 301 305
- name: Join Member | Add member to etcd cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr

View File

@@ -24,14 +24,14 @@
mode: 0640
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa no-handler
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
- name: Gen_certs | update ca-certificates (RedHat) # noqa no-handler
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa no-handler
command: clrtrust add "{{ ca_cert_path }}"
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"

View File

@@ -1,6 +1,6 @@
---
- name: Add Helm repositories
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" # noqa args[module]
loop: "{{ repositories }}"
- name: Update Helm repositories
@@ -15,5 +15,5 @@
- helm_update
- name: Install Helm Applications
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" # noqa args[module]
loop: "{{ releases }}"

View File

@@ -16,7 +16,7 @@
dest: "{{ local_release_dir }}/krew.yml"
mode: 0644
- name: Krew | Install krew # noqa 301 305
- name: Krew | Install krew # noqa command-instead-of-shell
shell: "{{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }} install --archive={{ local_release_dir }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz --manifest={{ local_release_dir }}/krew.yml"
environment:
KREW_ROOT: "{{ krew_root_dir }}"

View File

@@ -12,7 +12,7 @@
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30

View File

@@ -100,5 +100,5 @@
name: k8s-certs-renew.timer
enabled: yes
state: started
daemon-reload: "{{ k8s_certs_units is changed }}"
daemon_reload: "{{ k8s_certs_units is changed }}"
when: auto_renew_certificates

View File

@@ -8,7 +8,7 @@
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed|default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 503
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash

View File

@@ -35,9 +35,9 @@
- node_labels is defined
- node_labels is mapping
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: role_node_labels
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
var: inventory_node_labels
- name: Set label to node

View File

@@ -93,7 +93,7 @@
- not (disable_host_nameservers | default(false))
- name: NetworkManager | Check if host has NetworkManager
# noqa 303 Should we use service_facts for this?
# noqa command-instead-of-module - Should we use service_facts for this?
command: systemctl is-active --quiet NetworkManager.service
register: networkmanager_enabled
failed_when: false
@@ -101,7 +101,7 @@
check_mode: false
- name: check systemd-resolved
# noqa 303 Should we use service_facts for this?
# noqa command-instead-of-module - Should we use service_facts for this?
command: systemctl is-active systemd-resolved
register: systemd_resolved_enabled
failed_when: false

View File

@@ -33,12 +33,12 @@
changed_when: False
register: fs_type
- name: run growpart # noqa 503
- name: run growpart # noqa no-handler
command: growpart {{ device }} {{ partition }}
when: growpart_needed.changed
environment:
LC_ALL: C
- name: run xfs_growfs # noqa 503
- name: run xfs_growfs # noqa no-handler
command: xfs_growfs {{ root_device }}
when: growpart_needed.changed and 'XFS' in fs_type.stdout

View File

@@ -5,7 +5,7 @@
- name: Calico-rr | Configuring node tasks
include_tasks: update-node.yml
- name: Calico-rr | Set label for route reflector # noqa 301
- name: Calico-rr | Set label for route reflector
command: >-
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
'i-am-a-route-reflector=true' --overwrite

View File

@@ -6,7 +6,7 @@
set_fact:
retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}"
- name: Calico | Set label for route reflector # noqa 301 305
- name: Calico | Set label for route reflector # noqa command-instead-of-shell
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite"
changed_when: false
register: calico_rr_id_label
@@ -29,7 +29,7 @@
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
- name: Calico-rr | Configure route reflector # noqa 301 305
- name: Calico-rr | Configure route reflector # noqa command-instead-of-shell
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
args:
stdin: "{{ calico_rr_node_patched | to_json }}"

View File

@@ -72,7 +72,7 @@
when: calico_datastore == "etcd"
- name: Calico | Check if calico network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
# noqa risky-shell-pipe - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
args:
@@ -95,7 +95,7 @@
- calico_pool_cidr is defined
- name: Calico | Check if calico IPv6 network pool has already been configured
# noqa 306 - grep will exit 1 if no match found
# noqa risky-shell-pipe - grep will exit 1 if no match found
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l
args:

View File

@@ -1,6 +1,6 @@
---
- name: Calico | Set label for groups nodes # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
- name: Calico | Set label for groups nodes
command: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite"
changed_when: false
register: calico_group_id_label
until: calico_group_id_label is succeeded

View File

@@ -11,7 +11,7 @@
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Wait for pods to run
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare
register: pods_not_ready
until: pods_not_ready.stdout.find("cilium")==-1
retries: "{{ cilium_rolling_restart_wait_retries_count | int }}"

View File

@@ -43,7 +43,6 @@
- has_quorum
- name: Delete old certificates
# noqa 302 ignore-error - rm is ok here for now
shell: "rm {{ etcd_cert_dir }}/*{{ item }}*"
with_items: "{{ groups['broken_etcd'] }}"
register: delete_old_cerificates

View File

@@ -26,7 +26,7 @@
path: "{{ etcd_data_dir }}"
state: absent
- name: Restore etcd snapshot # noqa 301 305
- name: Restore etcd snapshot # noqa command-instead-of-shell
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
environment:
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"

View File

@@ -9,7 +9,7 @@
changed_when: false
run_once: true
- name: remove-node | Drain node except daemonsets resource # noqa 301
- name: remove-node | Drain node except daemonsets resource
command: >-
{{ kubectl }} drain
--force

View File

@@ -38,7 +38,7 @@
tags:
- docker
- name: reset | systemctl daemon-reload # noqa 503
- name: reset | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true
when: services_removed.changed
@@ -174,7 +174,7 @@
tags:
- services
- name: reset | gather mounted kubelet dirs # noqa 301
- name: reset | gather mounted kubelet dirs
shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
args:
executable: /bin/bash
@@ -185,7 +185,7 @@
tags:
- mounts
- name: reset | unmount kubelet dirs # noqa 301
- name: reset | unmount kubelet dirs
command: umount -f {{ item }}
with_items: "{{ mounted_dirs.stdout_lines }}"
register: umount_dir

View File

@@ -29,11 +29,11 @@
register: patch_kube_proxy_state
when: current_kube_proxy_state.stdout | trim | lower != "linux"
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stdout_lines }}"
when: patch_kube_proxy_state is not skipped
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "{{ patch_kube_proxy_state.stderr_lines }}"
when: patch_kube_proxy_state is not skipped
tags: init