Add noqa and disable .ansible-lint global exclusions (#6410)

This commit is contained in:
Maxime Guyot
2020-07-27 15:24:17 +02:00
committed by GitHub
parent b680cdd0e4
commit e70f27dd79
74 changed files with 163 additions and 170 deletions

View File

@@ -2,7 +2,7 @@
- name: Calico-rr | Pre-upgrade tasks
include_tasks: pre.yml
- name: Calico-rr | Fetch current node object
- name: Calico-rr | Fetch current node object # noqa 301
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
register: calico_rr_node
until: calico_rr_node is succeeded
@@ -15,12 +15,12 @@
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
- name: Calico-rr | Configure route reflector
- name: Calico-rr | Configure route reflector # noqa 301 305
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
args:
stdin: "{{ calico_rr_node_patched | to_json }}"
- name: Calico-rr | Set label for route reflector
- name: Calico-rr | Set label for route reflector # noqa 301
command: >-
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
'i-am-a-route-reflector=true' --overwrite

View File

@@ -37,7 +37,7 @@
when:
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
- name: "Get current version of calico cluster version"
- name: "Get current version of calico cluster version" # noqa 306
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
register: calico_version_on_server
run_once: yes

View File

@@ -6,7 +6,7 @@
mode: 0755
remote_src: yes
- name: Calico | Check if host has NetworkManager
- name: Calico | Check if host has NetworkManager # noqa 303
command: systemctl show NetworkManager
register: nm_check
failed_when: false
@@ -84,7 +84,7 @@
run_once: true
when: calico_datastore == "etcd"
- name: Calico | Check if calico network pool has already been configured
- name: Calico | Check if calico network pool has already been configured # noqa 306
shell: >
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
register: calico_conf
@@ -131,7 +131,7 @@
loop_control:
label: "{{ item.item.file }}"
- name: Calico | Configure calico network pool (version < v3.3.0)
- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306
shell: >
echo "
{ "kind": "IPPool",
@@ -149,7 +149,7 @@
- 'calico_conf.stdout == "0"'
- calico_version is version("v3.3.0", "<")
- name: Calico | Configure calico network pool (version >= v3.3.0)
- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306
shell: >
echo "
{ "kind": "IPPool",
@@ -176,7 +176,7 @@
- inventory_hostname in groups['k8s-cluster']
run_once: yes
- name: Calico | Set global as_num
- name: Calico | Set global as_num # noqa 306
shell: >
echo '
{ "kind": "BGPConfiguration",
@@ -192,7 +192,7 @@
when:
- inventory_hostname == groups['kube-master'][0]
- name: Calico | Configure peering with router(s) at global scope
- name: Calico | Configure peering with router(s) at global scope # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
@@ -214,7 +214,7 @@
- inventory_hostname == groups['kube-master'][0]
- peer_with_router|default(false)
- name: Calico | Configure peering with route reflectors at global scope
- name: Calico | Configure peering with route reflectors at global scope # noqa 306
shell: |
echo '{
"apiVersion": "projectcalico.org/v3",
@@ -236,7 +236,7 @@
- inventory_hostname == groups['kube-master'][0]
- peer_with_calico_rr|default(false)
- name: Calico | Configure route reflectors to peer with each other
- name: Calico | Configure route reflectors to peer with each other # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
@@ -309,7 +309,7 @@
- inventory_hostname not in groups['kube-master']
- calico_datastore == "kdd"
- name: Calico | Configure node asNumber for per node peering
- name: Calico | Configure node asNumber for per node peering # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",
@@ -333,7 +333,7 @@
- local_as is defined
- groups['calico-rr'] | default([]) | length == 0
- name: Calico | Configure peering with router(s) at node scope
- name: Calico | Configure peering with router(s) at node scope # noqa 306
shell: >
echo '{
"apiVersion": "projectcalico.org/v3",

View File

@@ -1,5 +1,5 @@
---
- name: Calico | Get kubelet hostname
- name: Calico | Get kubelet hostname # noqa 306
shell: >-
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1

View File

@@ -8,11 +8,11 @@
command: ip link del dummy0
when: dummy0.stat.exists
- name: reset | get remaining routes set by bird
- name: reset | get remaining routes set by bird # noqa 301
command: ip route show proto bird
register: bird_routes
- name: reset | remove remaining routes set by bird
- name: reset | remove remaining routes set by bird # noqa 301
command: "ip route del {{ bird_route }} proto bird"
with_items: "{{ bird_routes.stdout_lines }}"
loop_control:

View File

@@ -16,11 +16,11 @@
- "etcdv2"
- "etcdv3"
- name: "Tests data migration (dry-run)"
- name: "Tests data migration (dry-run)" # noqa 301 305
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
register: calico_upgrade_test_data
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
- name: "If test migration is success continue with calico data real migration"
- name: "If test migration is success continue with calico data real migration" # noqa 301 305
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
register: calico_upgrade_migration_data

View File

@@ -21,7 +21,7 @@
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Copy contiv temporary cleanup script
- name: reset | Copy contiv temporary cleanup script # noqa 404
copy:
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
dest: /opt/cni/bin/cleanup
@@ -31,7 +31,7 @@
when:
- contiv_kubectl.stat.exists
- name: reset | Lay down contiv cleanup template
- name: reset | Lay down contiv cleanup template # noqa 404
template:
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset

View File

@@ -1,5 +1,5 @@
---
- name: Kube-OVN | Label ovn-db node
- name: Kube-OVN | Label ovn-db node # noqa 305
shell: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
when:

View File

@@ -1,5 +1,5 @@
---
- name: Macvlan | Retrieve Pod Cidr
- name: Macvlan | Retrieve Pod Cidr # noqa 301
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
register: node_pod_cidr_cmd
delegate_to: "{{ groups['kube-master'][0] }}"
@@ -8,7 +8,7 @@
set_fact:
node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
- name: Macvlan | Retrieve default gateway network interface
- name: Macvlan | Retrieve default gateway network interface # noqa 301
become: false
raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/'
register: node_default_gateway_interface_cmd