Add noqa and disable .ansible-lint global exclusions (#6410)

This commit is contained in:
Maxime Guyot
2020-07-27 15:24:17 +02:00
committed by GitHub
parent b680cdd0e4
commit e70f27dd79
74 changed files with 163 additions and 170 deletions

View File

@@ -48,7 +48,7 @@
timeout: 180
# NOTE(mattymo): Please forgive this workaround
- name: Generate admin kubeconfig with external api endpoint
- name: Generate admin kubeconfig with external api endpoint # noqa 302
shell: >-
mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
{{ bin_dir }}/kubeadm

View File

@@ -22,7 +22,7 @@
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Calculate kubeadm CA cert hash
- name: Calculate kubeadm CA cert hash # noqa 306
shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
register: kubeadm_ca_hash
when:
@@ -107,7 +107,7 @@
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
# incorrectly to first master, creating SPoF.
- name: Update server field in kube-proxy kubeconfig
- name: Update server field in kube-proxy kubeconfig # noqa 306
shell: >-
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
@@ -131,7 +131,7 @@
group: root
mode: "0644"
- name: Restart all kube-proxy pods to ensure that they load the new configmap
- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"
@@ -157,7 +157,7 @@
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
# is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"

View File

@@ -47,7 +47,7 @@
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Forcefully delete old static pods
- name: kubeadm | Forcefully delete old static pods # noqa 306
shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when:

View File

@@ -8,7 +8,7 @@
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed|default(false)
- name: "Pre-upgrade | Delete master containers forcefully"
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 306 503
shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]

View File

@@ -45,7 +45,7 @@
tags:
- kube-proxy
- name: Verify if br_netfilter module exists
- name: Verify if br_netfilter module exists # noqa 305
shell: "modinfo br_netfilter"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management

View File

@@ -1,5 +1,5 @@
---
- name: "Pre-upgrade | check if kubelet container exists"
- name: "Pre-upgrade | check if kubelet container exists" # noqa 306
shell: >-
{% if container_manager in ['crio', 'docker'] %}
docker ps -af name=kubelet | grep kubelet

View File

@@ -29,7 +29,7 @@
- Preinstall | reload kubelet
when: is_fedora_coreos
- name: Preinstall | reload NetworkManager
- name: Preinstall | reload NetworkManager # noqa 303
command: systemctl restart NetworkManager.service
when: is_fedora_coreos

View File

@@ -158,7 +158,7 @@
when:
- kube_network_plugin == 'calico'
- name: "Get current version of calico cluster version"
- name: "Get current version of calico cluster version" # noqa 306
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
register: calico_version_on_server
run_once: yes

View File

@@ -24,14 +24,14 @@
set_fact:
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
- name: check resolvconf
- name: check resolvconf # noqa 305
shell: which resolvconf
register: resolvconf
failed_when: false
changed_when: false
check_mode: no
- name: check systemd-resolved
- name: check systemd-resolved # noqa 303
command: systemctl is-active systemd-resolved
register: systemd_resolved_enabled
failed_when: false

View File

@@ -1,5 +1,5 @@
---
- name: Update package management cache (zypper) - SUSE
- name: Update package management cache (zypper) - SUSE # noqa 305
shell: zypper -n --gpg-auto-import-keys ref
register: make_cache_output
until: make_cache_output is succeeded

View File

@@ -20,12 +20,12 @@
changed_when: False
register: fs_type
- name: run growpart
- name: run growpart # noqa 503
command: growpart /dev/sda 1
when: growpart_needed.changed
environment:
LC_ALL: C
- name: run xfs_growfs
- name: run xfs_growfs # noqa 503
command: xfs_growfs /dev/sda1
when: growpart_needed.changed and 'XFS' in fs_type.stdout

View File

@@ -34,7 +34,7 @@
delegate_to: "{{ groups['kube-master'][0] }}"
when: gen_tokens|default(false)
- name: Gen_tokens | Get list of tokens from first master
- name: Gen_tokens | Get list of tokens from first master # noqa 305
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
register: tokens_list
check_mode: no
@@ -42,7 +42,7 @@
run_once: true
when: sync_tokens|default(false)
- name: Gen_tokens | Gather tokens
- name: Gen_tokens | Gather tokens # noqa 306
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
args:
warn: false
@@ -52,7 +52,7 @@
run_once: true
when: sync_tokens|default(false)
- name: Gen_tokens | Copy tokens on masters
- name: Gen_tokens | Copy tokens on masters # noqa 306
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
when:
- inventory_hostname in groups['kube-master']