mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-18 03:30:07 -03:30
Yamllint fixes (#4410)
* Lint everything in the repository with yamllint * yamllint fixes: syntax fixes only * yamllint fixes: move comments to play names * yamllint fixes: indent comments in .gitlab-ci.yml file
This commit is contained in:
committed by
Kubernetes Prow Robot
parent
483f1d2ca0
commit
9ffc65f8f3
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: "2015-06-15"
|
||||
|
||||
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||
@@ -34,4 +35,3 @@ imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- set_fact:
|
||||
base_dir: "{{playbook_dir}}/.generated/"
|
||||
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
---
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
@@ -33,7 +34,7 @@
|
||||
# Delete docs
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
@@ -55,7 +56,7 @@
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
#groups: sudo
|
||||
# groups: sudo
|
||||
append: yes
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
@@ -18,7 +19,7 @@
|
||||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
#recreate: yes
|
||||
# recreate: yes
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
- name: Upgrade all packages to the latest version (yum)
|
||||
yum:
|
||||
name: '*'
|
||||
state: latest
|
||||
name: '*'
|
||||
state: latest
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
@@ -22,4 +22,3 @@
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
|
||||
@@ -22,9 +22,9 @@ galaxy_info:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
|
||||
@@ -12,5 +12,5 @@
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -22,9 +22,9 @@ galaxy_info:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
|
||||
@@ -33,24 +33,24 @@
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume.
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
@@ -63,13 +63,13 @@
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
@@ -79,4 +79,3 @@
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
|
||||
@@ -2,23 +2,23 @@
|
||||
- name: "Load lvm kernel modules"
|
||||
become: true
|
||||
with_items:
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install glusterfs mount utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -38,4 +38,4 @@
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
|
||||
@@ -18,8 +18,8 @@
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- set_fact:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
register: "heketi_service"
|
||||
changed_when: false
|
||||
- name: "Ensure heketi service is available."
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
|
||||
@@ -2,15 +2,15 @@
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
- name: "Get volume group information."
|
||||
@@ -34,13 +34,13 @@
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
|
||||
Reference in New Issue
Block a user