Add noqa and disable .ansible-lint global exclusions (#6410)

This commit is contained in:
Maxime Guyot
2020-07-27 15:24:17 +02:00
committed by GitHub
parent b680cdd0e4
commit e70f27dd79
74 changed files with 163 additions and 170 deletions

View File

@@ -18,7 +18,7 @@
instance_tags: "{{ aws.tags }}"
register: ec2
- name: Template the inventory
- name: Template the inventory # noqa 404
template:
src: ../templates/inventory-aws.j2
dest: "{{ inventory_path }}"

View File

@@ -86,7 +86,7 @@
msg: "{{ droplets }}, {{ inventory_path }}"
when: state == 'present'
- name: Template the inventory
- name: Template the inventory # noqa 404
template:
src: ../templates/inventory-do.j2
dest: "{{ inventory_path }}"

View File

@@ -49,7 +49,7 @@
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
with_items: '{{ gce.instance_data }}'
- name: Template the inventory
- name: Template the inventory # noqa 404
template:
src: ../templates/inventory-gce.j2
dest: "{{ inventory_path }}"
@@ -60,7 +60,7 @@
state: directory
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Template fake hosts group vars
- name: Template fake hosts group vars # noqa 404
template:
src: ../templates/fake_hosts.yml.j2
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"

View File

@@ -29,7 +29,7 @@
loop_control:
index_var: vm_id
- name: Wait for vms to have ipaddress assigned
- name: Wait for vms to have ipaddress assigned # noqa 301 306
shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'"
register: vm_ips
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"

View File

@@ -16,7 +16,7 @@
state: absent
name: "{{ test_name }}"
- name: Wait for namespace {{ test_name }} to be fully deleted
- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305
shell: kubectl get ns {{ test_name }}
register: delete_namespace
failed_when:

View File

@@ -7,7 +7,7 @@
expire_days: 2
tasks:
- name: Generate uniq bucket name prefix
- name: Generate uniq bucket name prefix # noqa 301
raw: date +%Y%m%d
register: out
@@ -52,7 +52,7 @@
no_log: True
failed_when: false
- name: Apply the lifecycle rules
- name: Apply the lifecycle rules # noqa 301
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
environment:
BOTO_CONFIG: "{{ dir }}/.boto"

View File

@@ -15,7 +15,7 @@
- import_role:
name: cluster-dump
- name: Check kubectl output
- name: Check kubectl output # noqa 301 305
shell: "{{ bin_dir }}/kubectl get nodes"
register: get_nodes
no_log: true
@@ -23,7 +23,7 @@
- debug:
msg: "{{ get_nodes.stdout.split('\n') }}"
- name: Check that all nodes are running and ready
- name: Check that all nodes are running and ready # noqa 301 305
shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
register: get_nodes_yaml
until:

View File

@@ -15,7 +15,7 @@
- import_role:
name: cluster-dump
- name: Check kubectl output
- name: Check kubectl output # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
register: get_pods
no_log: true
@@ -23,7 +23,7 @@
- debug:
msg: "{{ get_pods.stdout.split('\n') }}"
- name: Check that all pods are running and ready
- name: Check that all pods are running and ready # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
register: run_pods_log
until:
@@ -36,7 +36,7 @@
failed_when: false
no_log: true
- name: Check kubectl output
- name: Check kubectl output # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
register: get_pods
no_log: true

View File

@@ -15,10 +15,10 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Create test namespace
- name: Create test namespace # noqa 301 305
shell: "{{ bin_dir }}/kubectl create namespace test"
- name: Run 2 busybox pods in test ns
- name: Run 2 busybox pods in test ns # noqa 301 305
shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null"
loop:
- busybox1
@@ -27,7 +27,7 @@
- import_role:
name: cluster-dump
- name: Check that all pods are running and ready
- name: Check that all pods are running and ready # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
register: run_pods_log
until:
@@ -40,7 +40,7 @@
failed_when: false
no_log: true
- name: Get pod names
- name: Get pod names # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
register: pods
no_log: true
@@ -49,19 +49,19 @@
msg: "{{ pods.stdout.split('\n') }}"
failed_when: not run_pods_log is success
- name: Get hostnet pods
- name: Get hostnet pods # noqa 301
command: "{{ bin_dir }}/kubectl get pods -n test -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: hostnet_pods
no_log: true
- name: Get running pods
- name: Get running pods # noqa 301
command: "{{ bin_dir }}/kubectl get pods -n test -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: running_pods
no_log: true
- name: Check kubectl output
- name: Check kubectl output # noqa 301 305
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
register: get_pods
no_log: true
@@ -89,7 +89,7 @@
- item in pods_running
with_items: "{{ pod_ips }}"
- name: Ping between pods is working
- name: Ping between pods is working # noqa 305
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when:
- not item[0] in pods_hostnet
@@ -98,7 +98,7 @@
- "{{ pod_names }}"
- "{{ pod_ips }}"
- name: Ping between hostnet pods is working
- name: Ping between hostnet pods is working # noqa 305
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
when:
- item[0] in pods_hostnet

View File

@@ -1,7 +1,7 @@
---
- hosts: kube-node
tasks:
- name: Test tunl0 routes
- name: Test tunl0 routes # noqa 306
shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
when:
- (ipip|default(true) or cloud_provider is defined)
@@ -14,7 +14,7 @@
netchecker_port: 31081
tasks:
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305
shell: "ethtool --offload flannel.1 rx off tx off"
ignore_errors: true
when:
@@ -33,7 +33,7 @@
- import_role:
name: cluster-dump
- name: Wait for netchecker server
- name: Wait for netchecker server # noqa 306
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
register: ncs_pod
until: ncs_pod.stdout.find('Running') != -1
@@ -41,7 +41,7 @@
delay: 10
when: inventory_hostname == groups['kube-master'][0]
- name: Wait for netchecker agents
- name: Wait for netchecker agents # noqa 306
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
register: nca_pod
until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
@@ -214,7 +214,7 @@
- inventory_hostname == groups['kube-master'][0]
- kube_network_plugin_multus|default(false)|bool
- name: Check secondary macvlan interface
- name: Check secondary macvlan interface # noqa 305
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output
until: output.rc == 0

View File

@@ -1,5 +1,5 @@
---
- name: Generate dump folder
- name: Generate dump folder # noqa 305
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
no_log: true
when: inventory_hostname in groups['kube-master']