mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-17 11:10:09 -03:30
Add noqa and disable .ansible-lint global exclusions (#6410)
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
- name: Install crictl config # noqa 404
|
||||
template:
|
||||
src: ../templates/crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||
shell:
|
||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
when:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
- name: Install crictl config # noqa 404
|
||||
template:
|
||||
src: ../templates/crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
@@ -21,7 +21,7 @@
|
||||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
|
||||
- name: Get crictl completion
|
||||
- name: Get crictl completion # noqa 305
|
||||
shell: "{{ bin_dir }}/crictl completion"
|
||||
changed_when: False
|
||||
register: cri_completion
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
- ansible_distribution == "CentOS"
|
||||
- ansible_distribution_major_version == "8"
|
||||
|
||||
- name: Ensure latest version of libseccom installed
|
||||
- name: Ensure latest version of libseccom installed # noqa 303
|
||||
command: "yum update -y libseccomp"
|
||||
when:
|
||||
- ansible_distribution == "CentOS"
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
||||
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||
shell:
|
||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||
when:
|
||||
|
||||
@@ -28,13 +28,13 @@
|
||||
set_fact:
|
||||
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
|
||||
|
||||
- name: check system nameservers
|
||||
- name: check system nameservers # noqa 306
|
||||
shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
|
||||
changed_when: False
|
||||
register: system_nameservers
|
||||
check_mode: no
|
||||
|
||||
- name: check system search domains
|
||||
- name: check system search domains # noqa 306
|
||||
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
||||
changed_when: False
|
||||
register: system_search_domains
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
notify: restart docker
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
- name: get systemd version
|
||||
- name: get systemd version # noqa 306
|
||||
# noqa 303 - systemctl is called intentionally here
|
||||
shell: systemctl --version | head -n 1 | cut -d " " -f 2
|
||||
register: systemd_version
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
|
||||
# It will output something like the following:
|
||||
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
||||
- name: check_pull_required | Generate a list of information about the images on a node
|
||||
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305
|
||||
shell: "{{ image_info_command }}"
|
||||
no_log: true
|
||||
register: docker_images
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
- pull_required or download_run_once
|
||||
- not image_is_cached
|
||||
|
||||
- name: download_container | Save and compress image
|
||||
- name: download_container | Save and compress image # noqa 305
|
||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
|
||||
delegate_to: "{{ download_delegate }}"
|
||||
delegate_facts: no
|
||||
@@ -103,7 +103,7 @@
|
||||
- pull_required
|
||||
- download_force_cache
|
||||
|
||||
- name: download_container | Load image into docker
|
||||
- name: download_container | Load image into docker # noqa 305
|
||||
shell: "{{ image_load_command }}"
|
||||
register: container_load_status
|
||||
failed_when: container_load_status is failed
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
- localhost
|
||||
- asserts
|
||||
|
||||
- name: prep_download | On localhost, check if user has access to docker without using sudo
|
||||
- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305
|
||||
shell: "{{ image_info_command_on_localhost }}"
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
@@ -68,7 +68,7 @@
|
||||
- localhost
|
||||
- asserts
|
||||
|
||||
- name: prep_download | Register docker images info
|
||||
- name: prep_download | Register docker images info # noqa 305
|
||||
shell: "{{ image_info_command }}"
|
||||
no_log: true
|
||||
register: docker_images
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
mode: "0755"
|
||||
state: file
|
||||
|
||||
- name: prep_kubeadm_images | Generate list of required images
|
||||
- name: prep_kubeadm_images | Generate list of required images # noqa 306
|
||||
shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns"
|
||||
register: kubeadm_images_raw
|
||||
run_once: true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
- name: Configure | Check if etcd cluster is healthy # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
register: etcd_cluster_is_healthy
|
||||
failed_when: false
|
||||
@@ -16,7 +16,7 @@
|
||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
- name: Configure | Check if etcd-events cluster is healthy # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
failed_when: false
|
||||
@@ -73,7 +73,7 @@
|
||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | Wait for etcd cluster to be healthy
|
||||
- name: Configure | Wait for etcd cluster to be healthy # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
register: etcd_cluster_is_healthy
|
||||
until: etcd_cluster_is_healthy.rc == 0
|
||||
@@ -94,7 +94,7 @@
|
||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||
|
||||
- name: Configure | Wait for etcd-events cluster to be healthy
|
||||
- name: Configure | Wait for etcd-events cluster to be healthy # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
until: etcd_events_cluster_is_healthy.rc == 0
|
||||
|
||||
@@ -139,7 +139,7 @@
|
||||
inventory_hostname in groups['k8s-cluster']) and
|
||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
- name: Gen_certs | Copy certs on nodes # noqa 306
|
||||
shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Join Member | Add member to etcd-events cluster
|
||||
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
|
||||
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
@@ -24,7 +24,7 @@
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
|
||||
- name: Join Member | Ensure member is in etcd-events cluster
|
||||
- name: Join Member | Ensure member is in etcd-events cluster # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
|
||||
register: etcd_events_member_in_cluster
|
||||
changed_when: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Join Member | Add member to etcd cluster
|
||||
- name: Join Member | Add member to etcd cluster # noqa 301 305
|
||||
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
@@ -24,7 +24,7 @@
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
|
||||
- name: Join Member | Ensure member is in etcd cluster
|
||||
- name: Join Member | Ensure member is in etcd cluster # noqa 306
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
|
||||
@@ -23,14 +23,14 @@
|
||||
remote_src: true
|
||||
register: etcd_ca_cert
|
||||
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
|
||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) # noqa 503
|
||||
command: update-ca-certificates
|
||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
|
||||
|
||||
- name: Gen_certs | update ca-certificates (RedHat)
|
||||
- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
|
||||
command: update-ca-trust extract
|
||||
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
||||
|
||||
- name: Gen_certs | update ca-certificates (ClearLinux)
|
||||
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
|
||||
command: clrtrust add "{{ ca_cert_path }}"
|
||||
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
register: helmcert_master
|
||||
run_once: true
|
||||
|
||||
- name: Gen_helm_tiller_certs | run cert generation script
|
||||
- name: Gen_helm_tiller_certs | run cert generation script # noqa 301
|
||||
run_once: yes
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
|
||||
@@ -57,7 +57,7 @@
|
||||
with_items:
|
||||
- "{{ helm_client_certs }}"
|
||||
|
||||
- name: Gen_helm_tiller_certs | Gather helm client certs
|
||||
- name: Gen_helm_tiller_certs | Gather helm client certs # noqa 306
|
||||
# noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module
|
||||
shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
@@ -85,7 +85,7 @@
|
||||
mode: "0600"
|
||||
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
||||
|
||||
- name: Gen_helm_tiller_certs | Unpack helm certs on masters
|
||||
- name: Gen_helm_tiller_certs | Unpack helm certs on masters # noqa 306
|
||||
shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}"
|
||||
no_log: true
|
||||
changed_when: false
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
- helm_version is version('v3.0.0', '<')
|
||||
|
||||
# FIXME: https://github.com/helm/helm/issues/6374
|
||||
- name: Helm | Install/upgrade helm
|
||||
- name: Helm | Install/upgrade helm # noqa 306
|
||||
shell: >
|
||||
{{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }}
|
||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||
@@ -78,7 +78,7 @@
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
# FIXME: https://github.com/helm/helm/issues/4063
|
||||
- name: Helm | Force apply tiller overrides if necessary
|
||||
- name: Helm | Force apply tiller overrides if necessary # noqa 306
|
||||
shell: >
|
||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
|
||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||
@@ -108,7 +108,7 @@
|
||||
- helm_version is version('v3.0.0', '>=')
|
||||
- helm_stable_repo_url is defined
|
||||
|
||||
- name: Make sure bash_completion.d folder exists
|
||||
- name: Make sure bash_completion.d folder exists # noqa 503
|
||||
file:
|
||||
name: "/etc/bash_completion.d/"
|
||||
state: directory
|
||||
@@ -116,7 +116,7 @@
|
||||
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
||||
- ansible_os_family in ["ClearLinux"]
|
||||
|
||||
- name: Helm | Set up bash completion
|
||||
- name: Helm | Set up bash completion # noqa 503
|
||||
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
|
||||
when:
|
||||
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: "calico upgrade complete"
|
||||
- name: "calico upgrade complete" # noqa 305
|
||||
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Rotate Tokens | Get default token name
|
||||
- name: Rotate Tokens | Get default token name # noqa 306
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
|
||||
register: default_token
|
||||
changed_when: false
|
||||
@@ -29,7 +29,7 @@
|
||||
|
||||
# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
|
||||
# instead of filtering manually
|
||||
- name: Rotate Tokens | Get all serviceaccount tokens to expire
|
||||
- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
|
||||
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
timeout: 180
|
||||
|
||||
# NOTE(mattymo): Please forgive this workaround
|
||||
- name: Generate admin kubeconfig with external api endpoint
|
||||
- name: Generate admin kubeconfig with external api endpoint # noqa 302
|
||||
shell: >-
|
||||
mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
|
||||
{{ bin_dir }}/kubeadm
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Calculate kubeadm CA cert hash
|
||||
- name: Calculate kubeadm CA cert hash # noqa 306
|
||||
shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
||||
register: kubeadm_ca_hash
|
||||
when:
|
||||
@@ -107,7 +107,7 @@
|
||||
|
||||
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
|
||||
# incorrectly to first master, creating SPoF.
|
||||
- name: Update server field in kube-proxy kubeconfig
|
||||
- name: Update server field in kube-proxy kubeconfig # noqa 306
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
|
||||
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
||||
@@ -131,7 +131,7 @@
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
@@ -157,7 +157,7 @@
|
||||
|
||||
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
|
||||
# is fixed
|
||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
when:
|
||||
- old_apiserver_cert.stat.exists
|
||||
|
||||
- name: kubeadm | Forcefully delete old static pods
|
||||
- name: kubeadm | Forcefully delete old static pods # noqa 306
|
||||
shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
|
||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
when:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
register: kube_apiserver_manifest_replaced
|
||||
when: etcd_secret_changed|default(false)
|
||||
|
||||
- name: "Pre-upgrade | Delete master containers forcefully"
|
||||
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 306 503
|
||||
shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Verify if br_netfilter module exists
|
||||
- name: Verify if br_netfilter module exists # noqa 305
|
||||
shell: "modinfo br_netfilter"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: "Pre-upgrade | check if kubelet container exists"
|
||||
- name: "Pre-upgrade | check if kubelet container exists" # noqa 306
|
||||
shell: >-
|
||||
{% if container_manager in ['crio', 'docker'] %}
|
||||
docker ps -af name=kubelet | grep kubelet
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
- Preinstall | reload kubelet
|
||||
when: is_fedora_coreos
|
||||
|
||||
- name: Preinstall | reload NetworkManager
|
||||
- name: Preinstall | reload NetworkManager # noqa 303
|
||||
command: systemctl restart NetworkManager.service
|
||||
when: is_fedora_coreos
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@
|
||||
when:
|
||||
- kube_network_plugin == 'calico'
|
||||
|
||||
- name: "Get current version of calico cluster version"
|
||||
- name: "Get current version of calico cluster version" # noqa 306
|
||||
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||
register: calico_version_on_server
|
||||
run_once: yes
|
||||
|
||||
@@ -24,14 +24,14 @@
|
||||
set_fact:
|
||||
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
|
||||
|
||||
- name: check resolvconf
|
||||
- name: check resolvconf # noqa 305
|
||||
shell: which resolvconf
|
||||
register: resolvconf
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: check systemd-resolved
|
||||
- name: check systemd-resolved # noqa 303
|
||||
command: systemctl is-active systemd-resolved
|
||||
register: systemd_resolved_enabled
|
||||
failed_when: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Update package management cache (zypper) - SUSE
|
||||
- name: Update package management cache (zypper) - SUSE # noqa 305
|
||||
shell: zypper -n --gpg-auto-import-keys ref
|
||||
register: make_cache_output
|
||||
until: make_cache_output is succeeded
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
changed_when: False
|
||||
register: fs_type
|
||||
|
||||
- name: run growpart
|
||||
- name: run growpart # noqa 503
|
||||
command: growpart /dev/sda 1
|
||||
when: growpart_needed.changed
|
||||
environment:
|
||||
LC_ALL: C
|
||||
|
||||
- name: run xfs_growfs
|
||||
- name: run xfs_growfs # noqa 503
|
||||
command: xfs_growfs /dev/sda1
|
||||
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when: gen_tokens|default(false)
|
||||
|
||||
- name: Gen_tokens | Get list of tokens from first master
|
||||
- name: Gen_tokens | Get list of tokens from first master # noqa 305
|
||||
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
||||
register: tokens_list
|
||||
check_mode: no
|
||||
@@ -42,7 +42,7 @@
|
||||
run_once: true
|
||||
when: sync_tokens|default(false)
|
||||
|
||||
- name: Gen_tokens | Gather tokens
|
||||
- name: Gen_tokens | Gather tokens # noqa 306
|
||||
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
warn: false
|
||||
@@ -52,7 +52,7 @@
|
||||
run_once: true
|
||||
when: sync_tokens|default(false)
|
||||
|
||||
- name: Gen_tokens | Copy tokens on masters
|
||||
- name: Gen_tokens | Copy tokens on masters # noqa 306
|
||||
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
|
||||
when:
|
||||
- inventory_hostname in groups['kube-master']
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Calico-rr | Pre-upgrade tasks
|
||||
include_tasks: pre.yml
|
||||
|
||||
- name: Calico-rr | Fetch current node object
|
||||
- name: Calico-rr | Fetch current node object # noqa 301
|
||||
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
|
||||
register: calico_rr_node
|
||||
until: calico_rr_node is succeeded
|
||||
@@ -15,12 +15,12 @@
|
||||
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
||||
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
||||
|
||||
- name: Calico-rr | Configure route reflector
|
||||
- name: Calico-rr | Configure route reflector # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
||||
args:
|
||||
stdin: "{{ calico_rr_node_patched | to_json }}"
|
||||
|
||||
- name: Calico-rr | Set label for route reflector
|
||||
- name: Calico-rr | Set label for route reflector # noqa 301
|
||||
command: >-
|
||||
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
||||
'i-am-a-route-reflector=true' --overwrite
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
when:
|
||||
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
|
||||
|
||||
- name: "Get current version of calico cluster version"
|
||||
- name: "Get current version of calico cluster version" # noqa 306
|
||||
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||
register: calico_version_on_server
|
||||
run_once: yes
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
mode: 0755
|
||||
remote_src: yes
|
||||
|
||||
- name: Calico | Check if host has NetworkManager
|
||||
- name: Calico | Check if host has NetworkManager # noqa 303
|
||||
command: systemctl show NetworkManager
|
||||
register: nm_check
|
||||
failed_when: false
|
||||
@@ -84,7 +84,7 @@
|
||||
run_once: true
|
||||
when: calico_datastore == "etcd"
|
||||
|
||||
- name: Calico | Check if calico network pool has already been configured
|
||||
- name: Calico | Check if calico network pool has already been configured # noqa 306
|
||||
shell: >
|
||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
||||
register: calico_conf
|
||||
@@ -131,7 +131,7 @@
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
|
||||
- name: Calico | Configure calico network pool (version < v3.3.0)
|
||||
- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306
|
||||
shell: >
|
||||
echo "
|
||||
{ "kind": "IPPool",
|
||||
@@ -149,7 +149,7 @@
|
||||
- 'calico_conf.stdout == "0"'
|
||||
- calico_version is version("v3.3.0", "<")
|
||||
|
||||
- name: Calico | Configure calico network pool (version >= v3.3.0)
|
||||
- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306
|
||||
shell: >
|
||||
echo "
|
||||
{ "kind": "IPPool",
|
||||
@@ -176,7 +176,7 @@
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
run_once: yes
|
||||
|
||||
- name: Calico | Set global as_num
|
||||
- name: Calico | Set global as_num # noqa 306
|
||||
shell: >
|
||||
echo '
|
||||
{ "kind": "BGPConfiguration",
|
||||
@@ -192,7 +192,7 @@
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Calico | Configure peering with router(s) at global scope
|
||||
- name: Calico | Configure peering with router(s) at global scope # noqa 306
|
||||
shell: >
|
||||
echo '{
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
@@ -214,7 +214,7 @@
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- peer_with_router|default(false)
|
||||
|
||||
- name: Calico | Configure peering with route reflectors at global scope
|
||||
- name: Calico | Configure peering with route reflectors at global scope # noqa 306
|
||||
shell: |
|
||||
echo '{
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
@@ -236,7 +236,7 @@
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- peer_with_calico_rr|default(false)
|
||||
|
||||
- name: Calico | Configure route reflectors to peer with each other
|
||||
- name: Calico | Configure route reflectors to peer with each other # noqa 306
|
||||
shell: >
|
||||
echo '{
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
@@ -309,7 +309,7 @@
|
||||
- inventory_hostname not in groups['kube-master']
|
||||
- calico_datastore == "kdd"
|
||||
|
||||
- name: Calico | Configure node asNumber for per node peering
|
||||
- name: Calico | Configure node asNumber for per node peering # noqa 306
|
||||
shell: >
|
||||
echo '{
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
@@ -333,7 +333,7 @@
|
||||
- local_as is defined
|
||||
- groups['calico-rr'] | default([]) | length == 0
|
||||
|
||||
- name: Calico | Configure peering with router(s) at node scope
|
||||
- name: Calico | Configure peering with router(s) at node scope # noqa 306
|
||||
shell: >
|
||||
echo '{
|
||||
"apiVersion": "projectcalico.org/v3",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Calico | Get kubelet hostname
|
||||
- name: Calico | Get kubelet hostname # noqa 306
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
||||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
||||
|
||||
@@ -8,11 +8,11 @@
|
||||
command: ip link del dummy0
|
||||
when: dummy0.stat.exists
|
||||
|
||||
- name: reset | get remaining routes set by bird
|
||||
- name: reset | get remaining routes set by bird # noqa 301
|
||||
command: ip route show proto bird
|
||||
register: bird_routes
|
||||
|
||||
- name: reset | remove remaining routes set by bird
|
||||
- name: reset | remove remaining routes set by bird # noqa 301
|
||||
command: "ip route del {{ bird_route }} proto bird"
|
||||
with_items: "{{ bird_routes.stdout_lines }}"
|
||||
loop_control:
|
||||
|
||||
@@ -16,11 +16,11 @@
|
||||
- "etcdv2"
|
||||
- "etcdv3"
|
||||
|
||||
- name: "Tests data migration (dry-run)"
|
||||
- name: "Tests data migration (dry-run)" # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
register: calico_upgrade_test_data
|
||||
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
|
||||
|
||||
- name: "If test migration is success continue with calico data real migration"
|
||||
- name: "If test migration is success continue with calico data real migration" # noqa 301 305
|
||||
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
|
||||
register: calico_upgrade_migration_data
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
- contiv_kubectl.stat.exists
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: reset | Copy contiv temporary cleanup script
|
||||
- name: reset | Copy contiv temporary cleanup script # noqa 404
|
||||
copy:
|
||||
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
|
||||
dest: /opt/cni/bin/cleanup
|
||||
@@ -31,7 +31,7 @@
|
||||
when:
|
||||
- contiv_kubectl.stat.exists
|
||||
|
||||
- name: reset | Lay down contiv cleanup template
|
||||
- name: reset | Lay down contiv cleanup template # noqa 404
|
||||
template:
|
||||
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
|
||||
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Kube-OVN | Label ovn-db node
|
||||
- name: Kube-OVN | Label ovn-db node # noqa 305
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
|
||||
when:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Macvlan | Retrieve Pod Cidr
|
||||
- name: Macvlan | Retrieve Pod Cidr # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
|
||||
register: node_pod_cidr_cmd
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
@@ -8,7 +8,7 @@
|
||||
set_fact:
|
||||
node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
|
||||
|
||||
- name: Macvlan | Retrieve default gateway network interface
|
||||
- name: Macvlan | Retrieve default gateway network interface # noqa 301
|
||||
become: false
|
||||
raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/'
|
||||
register: node_default_gateway_interface_cmd
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Get etcd endpoint health
|
||||
- name: Get etcd endpoint health # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl endpoint health"
|
||||
register: etcd_endpoint_health
|
||||
ignore_errors: true
|
||||
@@ -57,7 +57,7 @@
|
||||
- groups['broken_etcd']
|
||||
- "item.rc != 0 and not 'No such file or directory' in item.stderr"
|
||||
|
||||
- name: Get etcd cluster members
|
||||
- name: Get etcd cluster members # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member list"
|
||||
register: member_list
|
||||
changed_when: false
|
||||
@@ -73,7 +73,7 @@
|
||||
- not healthy
|
||||
- has_quorum
|
||||
|
||||
- name: Remove broken cluster members
|
||||
- name: Remove broken cluster members # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
||||
environment:
|
||||
ETCDCTL_API: 3
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Save etcd snapshot
|
||||
- name: Save etcd snapshot # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
|
||||
environment:
|
||||
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
@@ -25,7 +25,7 @@
|
||||
path: "{{ etcd_data_dir }}"
|
||||
state: absent
|
||||
|
||||
- name: Restore etcd snapshot
|
||||
- name: Restore etcd snapshot # noqa 301 305
|
||||
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
|
||||
environment:
|
||||
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Wait for apiserver
|
||||
- name: Wait for apiserver # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl get nodes"
|
||||
environment:
|
||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
@@ -10,7 +10,7 @@
|
||||
changed_when: false
|
||||
when: groups['broken_kube-master']
|
||||
|
||||
- name: Delete broken kube-master nodes from cluster
|
||||
- name: Delete broken kube-master nodes from cluster # noqa 305
|
||||
shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
||||
environment:
|
||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Delete node
|
||||
- name: Delete node # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
ignore_errors: yes
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: cordon-node | Mark all nodes as unschedulable before drain
|
||||
- name: cordon-node | Mark all nodes as unschedulable before drain # noqa 301
|
||||
command: >-
|
||||
{{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }}
|
||||
with_items:
|
||||
@@ -9,7 +9,7 @@
|
||||
run_once: true
|
||||
ignore_errors: yes
|
||||
|
||||
- name: remove-node | Drain node except daemonsets resource
|
||||
- name: remove-node | Drain node except daemonsets resource # noqa 301
|
||||
command: >-
|
||||
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
|
||||
--force
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
delegate_to: "{{ groups['etcd']|first }}"
|
||||
when: inventory_hostname in groups['etcd']
|
||||
|
||||
- name: Remove etcd member from cluster
|
||||
- name: Remove etcd member from cluster # noqa 305
|
||||
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
|
||||
@@ -41,12 +41,12 @@
|
||||
tags:
|
||||
- docker
|
||||
|
||||
- name: reset | systemctl daemon-reload
|
||||
- name: reset | systemctl daemon-reload # noqa 503
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
when: services_removed.changed or docker_dropins_removed.changed
|
||||
|
||||
- name: reset | remove all containers
|
||||
- name: reset | remove all containers # noqa 306
|
||||
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
|
||||
register: remove_all_containers
|
||||
retries: 4
|
||||
@@ -56,7 +56,7 @@
|
||||
tags:
|
||||
- docker
|
||||
|
||||
- name: reset | restart docker if needed
|
||||
- name: reset | restart docker if needed # noqa 503
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
@@ -64,7 +64,7 @@
|
||||
tags:
|
||||
- docker
|
||||
|
||||
- name: reset | stop all cri containers
|
||||
- name: reset | stop all cri containers # noqa 306
|
||||
shell: "crictl ps -aq | xargs -r crictl -t 60s stop"
|
||||
register: remove_all_cri_containers
|
||||
retries: 5
|
||||
@@ -75,7 +75,7 @@
|
||||
- containerd
|
||||
when: container_manager in ["crio", "containerd"]
|
||||
|
||||
- name: reset | remove all cri containers
|
||||
- name: reset | remove all cri containers # noqa 306
|
||||
shell: "crictl ps -aq | xargs -r crictl -t 60s rm"
|
||||
register: remove_all_cri_containers
|
||||
retries: 5
|
||||
@@ -86,7 +86,7 @@
|
||||
- containerd
|
||||
when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true)
|
||||
|
||||
- name: reset | stop all cri pods
|
||||
- name: reset | stop all cri pods # noqa 306
|
||||
shell: "crictl pods -q | xargs -r crictl -t 60s stopp"
|
||||
register: remove_all_cri_containers
|
||||
retries: 5
|
||||
@@ -97,7 +97,7 @@
|
||||
- containerd
|
||||
when: container_manager in ["crio", "containerd"]
|
||||
|
||||
- name: reset | remove all cri pods
|
||||
- name: reset | remove all cri pods # noqa 306
|
||||
shell: "crictl pods -q | xargs -r crictl -t 60s rmp"
|
||||
register: remove_all_cri_containers
|
||||
retries: 5
|
||||
@@ -130,7 +130,7 @@
|
||||
tags:
|
||||
- services
|
||||
|
||||
- name: reset | gather mounted kubelet dirs
|
||||
- name: reset | gather mounted kubelet dirs # noqa 306 301
|
||||
shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
|
||||
args:
|
||||
warn: false
|
||||
@@ -139,7 +139,7 @@
|
||||
tags:
|
||||
- mounts
|
||||
|
||||
- name: reset | unmount kubelet dirs
|
||||
- name: reset | unmount kubelet dirs # noqa 301
|
||||
command: umount -f {{ item }}
|
||||
with_items: "{{ mounted_dirs.stdout_lines }}"
|
||||
register: umount_dir
|
||||
@@ -161,7 +161,7 @@
|
||||
tags:
|
||||
- iptables
|
||||
|
||||
- name: Clear IPVS virtual server table
|
||||
- name: Clear IPVS virtual server table # noqa 305
|
||||
shell: "ipvsadm -C"
|
||||
when:
|
||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
Reference in New Issue
Block a user