Replace kube-master with kube_control_plane (#7256)

This replaces kube-master with kube_control_plane because of [1]:

  The Kubernetes project is moving away from wording that is
  considered offensive. A new working group WG Naming was created
  to track this work, and the word "master" was declared as offensive.
  A proposal was formalized for replacing the word "master" with
  "control plane". This means it should be removed from source code,
  documentation, and user-facing configuration from Kubernetes and
  its sub-projects.

NOTE: The reason why this changes it to kube_control_plane not
      kube-control-plane is for valid group names on ansible.

[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
This commit is contained in:
Kenichi Omichi
2021-03-23 17:26:05 -07:00
committed by GitHub
parent d53fd29e34
commit 486b223e01
159 changed files with 564 additions and 485 deletions

View File

@@ -12,7 +12,7 @@ platforms:
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@@ -12,25 +12,25 @@ platforms:
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: centos7
box: centos/7
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: centos8
box: centos/8
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: fedora
box: fedora/33-cloud-base
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@@ -15,14 +15,14 @@ platforms:
memory: 1024
nested: true
groups:
- kube-master
- kube_control_plane
- name: ubuntu20
box: generic/ubuntu2004
cpus: 1
memory: 1024
nested: true
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@@ -30,7 +30,7 @@ download_container: true
# if this is set to true, uses the localhost for download_run_once mode
# (requires docker and sudo to access docker). You may want this option for
# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes.
# Otherwise, uses the first node in the kube-master group to store images
# Otherwise, uses the first node in the kube_control_plane group to store images
# in the download_run_once mode.
download_localhost: false
@@ -42,8 +42,8 @@ download_always_pull: false
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
download_validate_certs: true
# Use the first kube-master if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
# Use the first kube_control_plane if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}"
# Arch of Docker images and needed packages
image_arch: "{{host_architecture | default('amd64')}}"
@@ -733,7 +733,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
crictl:
file: true
@@ -883,7 +883,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
@@ -973,7 +973,7 @@ downloads:
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
nodelocaldns:
enabled: "{{ enable_nodelocaldns }}"
@@ -991,7 +991,7 @@ downloads:
tag: "{{ dnsautoscaler_image_tag }}"
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
testbox:
enabled: false
@@ -1011,7 +1011,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
registry:
enabled: "{{ registry_enabled }}"
@@ -1038,7 +1038,7 @@ downloads:
tag: "{{ metrics_server_image_tag }}"
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
addon_resizer:
# Currently addon_resizer is only used by metrics server
@@ -1048,7 +1048,7 @@ downloads:
tag: "{{ addon_resizer_image_tag }}"
sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
@@ -1219,7 +1219,7 @@ downloads:
tag: "{{ dashboard_image_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
dashboard_metrics_scrapper:
enabled: "{{ dashboard_enabled }}"
@@ -1228,7 +1228,7 @@ downloads:
tag: "{{ dashboard_metrics_scraper_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
download_defaults:
container: false

View File

@@ -18,7 +18,7 @@
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads|default(false)
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
tags:
- download
- upload

View File

@@ -6,7 +6,7 @@
ignore_errors: true
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Delete kubeadm CoreDNS
kube:
@@ -17,7 +17,7 @@
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation.stdout != 'kubespray'
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
@@ -29,4 +29,4 @@
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -20,7 +20,7 @@
clusterIP: "{{ skydns_server }}"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
@@ -38,6 +38,6 @@
coredns_ordinal_suffix: "-secondary"
when:
- dns_mode == 'coredns_dual'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns

View File

@@ -6,7 +6,7 @@
with_items:
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
register: manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start dashboard
kube:
@@ -17,4 +17,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -9,12 +9,12 @@
until: result.status == 200
retries: 20
delay: 1
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Cleanup DNS
import_tasks: cleanup_dns.yml
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- coredns
@@ -24,7 +24,7 @@
import_tasks: "coredns.yml"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
@@ -32,7 +32,7 @@
import_tasks: "nodelocaldns.yml"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
@@ -50,7 +50,7 @@
- "{{ nodelocaldns_manifests.results | default({}) }}"
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
register: resource_result
until: resource_result is succeeded

View File

@@ -28,7 +28,7 @@
with_items: "{{ netchecker_templates }}"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start Netchecker Resources
kube:
@@ -39,4 +39,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -10,7 +10,7 @@
secondaryclusterIP: "{{ skydns_server_secondary }}"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
@@ -39,7 +39,7 @@
{%- endif -%}
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns

View File

@@ -7,7 +7,7 @@
template:
src: controller-manager-config.yml.j2
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Slurp Configuration"
@@ -18,14 +18,14 @@
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Manifests"
@@ -33,5 +33,5 @@
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci

View File

@@ -9,14 +9,14 @@
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
@@ -24,7 +24,7 @@
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Render templates for PodSecurityPolicy
template:
@@ -37,7 +37,7 @@
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
kube:
@@ -52,7 +52,7 @@
delay: 6
with_items: "{{ psp_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
@@ -64,7 +64,7 @@
register: node_crb_manifest
when:
- rbac_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
@@ -80,7 +80,7 @@
when:
- rbac_enabled
- node_crb_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
template:
@@ -90,7 +90,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Apply webhook ClusterRole
@@ -104,7 +104,7 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_cr_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
@@ -115,7 +115,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Grant system:nodes the webhook ClusterRole
@@ -129,7 +129,7 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_crb_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- include_tasks: oci.yml
@@ -140,7 +140,7 @@
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
when: inventory_hostname == groups['kube-master']|last
when: inventory_hostname == groups['kube_control_plane']|last
- name: PriorityClass | Create k8s-cluster-critical
kube:
@@ -149,4 +149,4 @@
resource: "PriorityClass"
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
state: latest
when: inventory_hostname == groups['kube-master']|last
when: inventory_hostname == groups['kube_control_plane']|last

View File

@@ -6,7 +6,7 @@
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply OCI RBAC
kube:
@@ -15,4 +15,4 @@
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -38,7 +38,7 @@
- { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
register: container_engine_accelerator_manifests
when:
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container
- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
kube:
@@ -51,4 +51,4 @@
with_items:
- "{{ container_engine_accelerator_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported

View File

@@ -6,7 +6,7 @@
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
mode: "0664"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: crun | Apply manifests
kube:
@@ -16,4 +16,4 @@
filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -20,7 +20,7 @@
with_items: "{{ kata_containers_templates }}"
register: kata_containers_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kata Containers | Apply manifests
kube:
@@ -31,4 +31,4 @@
state: "latest"
with_items: "{{ kata_containers_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -9,7 +9,7 @@
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
register: aws_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: aws-ebs-csi-driver
- name: AWS CSI Driver | Apply Manifests
@@ -20,7 +20,7 @@
with_items:
- "{{ aws_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -8,14 +8,14 @@
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Generate Manifests
@@ -30,7 +30,7 @@
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
- {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
register: azure_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Apply Manifests
@@ -41,7 +41,7 @@
with_items:
- "{{ azure_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -20,14 +20,14 @@
dest: "{{ kube_config_dir }}/cinder_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/cinder_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Generate Manifests
@@ -43,7 +43,7 @@
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
register: cinder_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Apply Manifests
@@ -54,7 +54,7 @@
with_items:
- "{{ cinder_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -8,7 +8,7 @@
- {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
- {name: volumesnapshots, file: volumesnapshots.yml}
register: csi_crd_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: csi-driver
- name: CSI CRD | Apply Manifests
@@ -20,7 +20,7 @@
with_items:
- "{{ csi_crd_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -11,14 +11,14 @@
dest: "{{ kube_config_dir }}/cloud-sa.json"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
slurp:
src: "{{ kube_config_dir }}/cloud-sa.json"
register: gcp_cred_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Generate Manifests
@@ -31,7 +31,7 @@
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
register: gcp_pd_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Apply Manifests
@@ -42,7 +42,7 @@
with_items:
- "{{ gcp_pd_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -9,7 +9,7 @@
mode: 0640
with_items:
- vsphere-csi-cloud-config
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: vsphere-csi-driver
- name: vSphere CSI Driver | Generate Manifests
@@ -21,13 +21,13 @@
- vsphere-csi-controller-ss.yml
- vsphere-csi-node.yml
register: vsphere_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: vsphere-csi-driver
- name: vSphere CSI Driver | Generate a CSI secret manifest
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
tags: vsphere-csi-driver
@@ -35,7 +35,7 @@
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
tags: vsphere-csi-driver
@@ -47,7 +47,7 @@
with_items:
- "{{ vsphere_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item }}"

View File

@@ -6,7 +6,7 @@ dependencies:
- cloud_provider == "external"
- external_cloud_provider is defined
- external_cloud_provider == "openstack"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- external-cloud-controller
- external-openstack
@@ -16,7 +16,7 @@ dependencies:
- cloud_provider == "external"
- external_cloud_provider is defined
- external_cloud_provider == "vsphere"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- external-cloud-controller
- external-vsphere

View File

@@ -20,14 +20,14 @@
dest: "{{ kube_config_dir }}/external_openstack_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/external_openstack_cloud_config"
register: external_openstack_cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Generate Manifests
@@ -42,7 +42,7 @@
- {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml}
- {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml}
register: external_openstack_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Apply Manifests
@@ -53,7 +53,7 @@
with_items:
- "{{ external_openstack_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -9,7 +9,7 @@
mode: 0640
with_items:
- external-vsphere-cpi-cloud-config
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Controller | Generate Manifests
@@ -22,20 +22,20 @@
- external-vsphere-cloud-controller-manager-role-bindings.yml
- external-vsphere-cloud-controller-manager-ds.yml
register: external_vsphere_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest
command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml"
register: external_vsphere_configmap_manifest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
stdin: "{{ external_vsphere_configmap_manifest.stdout }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Controller | Apply Manifests
@@ -46,7 +46,7 @@
with_items:
- "{{ external_vsphere_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item }}"

View File

@@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -23,7 +23,7 @@
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -35,7 +35,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Templates list
set_fact:
@@ -65,7 +65,7 @@
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
with_items: "{{ cephfs_provisioner_templates }}"
register: cephfs_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Apply manifests
kube:
@@ -76,4 +76,4 @@
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ cephfs_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -7,7 +7,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Create claim root dir
file:
@@ -42,7 +42,7 @@
dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
with_items: "{{ local_path_provisioner_templates }}"
register: local_path_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Apply manifests
kube:
@@ -53,4 +53,4 @@
filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_path_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -42,7 +42,7 @@
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
with_items: "{{ local_volume_provisioner_templates }}"
register: local_volume_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Volume Provisioner | Apply manifests
kube:
@@ -53,6 +53,6 @@
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_volume_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
loop_control:
label: "{{ item.item.file }}"

View File

@@ -3,7 +3,7 @@ dependencies:
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
when:
- local_volume_provisioner_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- apps
- local-volume-provisioner

View File

@@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -23,7 +23,7 @@
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -35,7 +35,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Templates list
set_fact:
@@ -65,7 +65,7 @@
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
with_items: "{{ rbd_provisioner_templates }}"
register: rbd_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Apply manifests
kube:
@@ -76,4 +76,4 @@
filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ rbd_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -20,7 +20,7 @@
- { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy }
register: alb_ingress_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: ALB Ingress Controller | Apply manifests
kube:
@@ -32,4 +32,4 @@
state: "latest"
with_items: "{{ alb_ingress_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -8,7 +8,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Templates list
set_fact:
@@ -29,7 +29,7 @@
loop: "{{ ingress_ambassador_templates }}"
register: ingress_ambassador_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Apply manifests
kube:
@@ -41,7 +41,7 @@
state: "latest"
loop: "{{ ingress_ambassador_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
# load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded
@@ -57,7 +57,7 @@
loop: "{{ ingress_ambassador_cr_templates }}"
register: ingress_ambassador_cr_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Apply AmbassadorInstallation
kube:
@@ -69,4 +69,4 @@
state: "latest"
loop: "{{ ingress_ambassador_cr_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/cert_manager"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -26,7 +26,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Templates list
set_fact:
@@ -54,7 +54,7 @@
with_items: "{{ cert_manager_templates }}"
register: cert_manager_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Apply manifests
kube:
@@ -65,12 +65,12 @@
state: "latest"
with_items: "{{ cert_manager_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Wait for Webhook pods become ready
command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s"
register: cert_manager_webhook_pods_ready
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Create ClusterIssuer manifest
template:
@@ -78,7 +78,7 @@
dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
register: cert_manager_clusterissuer_manifest
when:
- inventory_hostname == groups['kube-master'][0] and cert_manager_webhook_pods_ready is succeeded
- inventory_hostname == groups['kube_control_plane'][0] and cert_manager_webhook_pods_ready is succeeded
- name: Cert Manager | Apply ClusterIssuer manifest
kube:
@@ -86,4 +86,4 @@
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
state: "latest"
when: inventory_hostname == groups['kube-master'][0] and cert_manager_clusterissuer_manifest is succeeded
when: inventory_hostname == groups['kube_control_plane'][0] and cert_manager_clusterissuer_manifest is succeeded

View File

@@ -8,7 +8,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: NGINX Ingress Controller | Templates list
set_fact:
@@ -38,7 +38,7 @@
with_items: "{{ ingress_nginx_templates }}"
register: ingress_nginx_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: NGINX Ingress Controller | Apply manifests
kube:
@@ -50,4 +50,4 @@
state: "latest"
with_items: "{{ ingress_nginx_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -2,7 +2,7 @@
dependencies:
- role: kubernetes-apps/ansible
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- role: kubernetes-apps/helm
when:
@@ -13,21 +13,21 @@ dependencies:
- role: kubernetes-apps/registry
when:
- registry_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- registry
- role: kubernetes-apps/metrics_server
when:
- metrics_server_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- metrics_server
- role: kubernetes-apps/csi_driver/csi_crd
when:
- cinder_csi_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- csi-driver
@@ -69,19 +69,19 @@ dependencies:
- role: kubernetes-apps/persistent_volumes
when:
- persistent_volumes_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- persistent_volumes
- role: kubernetes-apps/snapshots
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags:
- snapshots
- csi-driver
- role: kubernetes-apps/container_runtimes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- container-runtimes
@@ -94,13 +94,13 @@ dependencies:
when:
- cloud_provider is defined
- cloud_provider == "oci"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- oci
- role: kubernetes-apps/metallb
when:
- metallb_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- metallb

View File

@@ -22,7 +22,7 @@
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
@@ -30,7 +30,7 @@
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: "Kubernetes Apps | Lay Down MetalLB"
become: true
@@ -38,7 +38,7 @@
with_items: ["metallb.yml", "metallb-config.yml"]
register: "rendering"
when:
- "inventory_hostname == groups['kube-master'][0]"
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: "Kubernetes Apps | Install and configure MetalLB"
kube:
@@ -49,7 +49,7 @@
become: true
with_items: "{{ rendering.results }}"
when:
- "inventory_hostname == groups['kube-master'][0]"
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Check existing secret of MetalLB
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
@@ -57,18 +57,18 @@
become: true
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Create random bytes for MetalLB
command: "openssl rand -base64 32"
register: metallb_rand
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_secret.rc != 0
- name: Kubernetes Apps | Install secret of MetalLB if not existing
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
become: true
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_secret.rc != 0

View File

@@ -2,14 +2,14 @@
# If all masters have node role, there are no tainted master and toleration should not be specified.
- name: Check all masters are node or not
set_fact:
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube-master']) == groups['kube-master'] }}"
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
- name: Metrics Server | Delete addon dir
file:
path: "{{ kube_config_dir }}/addons/metrics_server"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@@ -21,7 +21,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Metrics Server | Templates list
set_fact:
@@ -43,7 +43,7 @@
with_items: "{{ metrics_server_templates }}"
register: metrics_server_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Metrics Server | Apply manifests
kube:
@@ -54,4 +54,4 @@
state: "latest"
with_items: "{{ metrics_server_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -8,4 +8,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ canal_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -8,7 +8,7 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ cilium_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Wait for pods to run
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
@@ -17,4 +17,4 @@
retries: 30
delay: 10
ignore_errors: yes
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -8,7 +8,7 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ flannel_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:

View File

@@ -6,4 +6,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ kube_ovn_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -8,7 +8,7 @@
resource: "ds"
namespace: "kube-system"
state: "latest"
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
@@ -18,6 +18,6 @@
retries: 30
delay: 10
ignore_errors: yes
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
changed_when: false

View File

@@ -8,4 +8,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -6,4 +6,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ ovn4nfv_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@@ -8,7 +8,7 @@
resource: "ds"
namespace: "kube-system"
state: "latest"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Weave | Wait for Weave to become available
uri:
@@ -18,4 +18,4 @@
retries: 180
delay: 5
until: "weave_status.status == 200 and 'Status: ready' in weave_status.content"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add AWS EBS CSI Storage Class
kube:
@@ -15,5 +15,5 @@
filename: "{{ kube_config_dir }}/aws-ebs-csi-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/azure-csi-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add Azure CSI Storage Class
kube:
@@ -15,5 +15,5 @@
filename: "{{ kube_config_dir }}/azure-csi-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/cinder-csi-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add Cinder CSI Storage Class
kube:
@@ -15,5 +15,5 @@
filename: "{{ kube_config_dir }}/cinder-csi-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add GCP PD CSI Storage Class
kube:
@@ -15,5 +15,5 @@
filename: "{{ kube_config_dir }}/gcp-pd-csi-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
@@ -15,5 +15,5 @@
filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -19,7 +19,7 @@
- {name: calico-kube-controllers, file: calico-kube-crb.yml, type: clusterrolebinding}
register: calico_kube_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- rbac_enabled or item.type not in rbac_resources
- name: Start of Calico kube controllers
@@ -33,7 +33,7 @@
with_items:
- "{{ calico_kube_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -38,7 +38,7 @@
dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}"
with_items: "{{ registry_templates }}"
register: registry_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Registry | Apply manifests
kube:
@@ -49,7 +49,7 @@
filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}"
state: "latest"
with_items: "{{ registry_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Registry | Create PVC manifests
template:
@@ -61,7 +61,7 @@
when:
- registry_storage_class != none and registry_storage_class
- registry_disk_size != none and registry_disk_size
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Registry | Apply PVC manifests
kube:
@@ -75,4 +75,4 @@
when:
- registry_storage_class != none and registry_storage_class
- registry_disk_size != none and registry_disk_size
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Snapshots | Add Cinder CSI Snapshot Class
kube:
@@ -13,5 +13,5 @@
filename: "{{ kube_config_dir }}/cinder-csi-snapshot-class.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- manifests.changed

View File

@@ -7,7 +7,7 @@
- {name: rbac-snapshot-controller, file: rbac-snapshot-controller.yml}
- {name: snapshot-controller, file: snapshot-controller.yml}
register: snapshot_controller_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: snapshot-controller
- name: Snapshot Controller | Apply Manifests
@@ -18,7 +18,7 @@
with_items:
- "{{ snapshot_controller_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@@ -28,7 +28,7 @@
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
delegate_to: "{{ item }}"
delegate_facts: true
with_inventory_hostnames: kube-master
with_inventory_hostnames: kube_control_plane
when: kube_encrypt_token_extracted is defined
- name: Write secrets for encrypting secret data at rest

View File

@@ -18,15 +18,15 @@
--upload-certs
register: kubeadm_upload_cert
when:
- inventory_hostname == groups['kube-master']|first
- inventory_hostname == groups['kube_control_plane']|first
- name: Parse certificate key if not set
set_fact:
kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
run_once: yes
when:
- hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is defined
- hostvars[groups['kube-master'][0]]['kubeadm_upload_cert'] is not skipped
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is defined
- hostvars[groups['kube_control_plane'][0]]['kubeadm_upload_cert'] is not skipped
- name: Create kubeadm ControlPlane config
template:
@@ -35,7 +35,7 @@
mode: 0640
backup: yes
when:
- inventory_hostname != groups['kube-master']|first
- inventory_hostname != groups['kube_control_plane']|first
- not kubeadm_already_run.stat.exists
- name: Wait for k8s apiserver
@@ -64,5 +64,5 @@
throttle: 1
until: kubeadm_join_control_plane is succeeded
when:
- inventory_hostname != groups['kube-master']|first
- inventory_hostname != groups['kube_control_plane']|first
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists

View File

@@ -25,7 +25,7 @@
- name: kubeadm | aggregate all SANs
set_fact:
apiserver_sans: "{{ (sans_base + groups['kube-master'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}"
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn) | unique }}"
vars:
sans_base:
- "kubernetes"
@@ -38,12 +38,12 @@
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
sans_access_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
sans_ip: "{{ groups['kube-master'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
sans_hostname: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
sans_fqdn: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
tags: facts
- name: Create audit-policy directory
@@ -86,7 +86,7 @@
register: apiserver_sans_check
changed_when: "'does match certificate' not in apiserver_sans_check.stdout"
when:
- inventory_hostname == groups['kube-master']|first
- inventory_hostname == groups['kube_control_plane']|first
- kubeadm_already_run.stat.exists
- name: kubeadm | regenerate apiserver cert 1/2
@@ -97,7 +97,7 @@
- apiserver.crt
- apiserver.key
when:
- inventory_hostname == groups['kube-master']|first
- inventory_hostname == groups['kube_control_plane']|first
- kubeadm_already_run.stat.exists
- apiserver_sans_check.changed
@@ -107,7 +107,7 @@
init phase certs apiserver
--config={{ kube_config_dir }}/kubeadm-config.yaml
when:
- inventory_hostname == groups['kube-master']|first
- inventory_hostname == groups['kube_control_plane']|first
- kubeadm_already_run.stat.exists
- apiserver_sans_check.changed
@@ -123,7 +123,7 @@
# Retry is because upload config sometimes fails
retries: 3
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
when: inventory_hostname == groups['kube-master']|first and not kubeadm_already_run.stat.exists
when: inventory_hostname == groups['kube_control_plane']|first and not kubeadm_already_run.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
@@ -132,7 +132,7 @@
- name: set kubeadm certificate key
set_fact:
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
with_items: "{{ hostvars[groups['kube-master'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}"
when:
- kubeadm_certificate_key is not defined
- (item | trim) is match('.*--certificate-key.*')
@@ -143,7 +143,7 @@
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
changed_when: false
when:
- inventory_hostname == groups['kube-master']|first
- inventory_hostname == groups['kube_control_plane']|first
- kubeadm_token is defined
- kubeadm_refresh_token
tags:
@@ -156,7 +156,7 @@
retries: 5
delay: 5
until: temp_token is succeeded
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
when: kubeadm_token is not defined
tags:
- kubeadm_token
@@ -180,7 +180,7 @@
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
with_items:
- "node-role.kubernetes.io/master:NoSchedule-"
- "node-role.kubernetes.io/control-plane:NoSchedule-"

View File

@@ -3,7 +3,7 @@
uri:
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
register: _result
retries: 60
delay: 5
@@ -23,7 +23,7 @@
# Retry is because upload config sometimes fails
retries: 3
until: kubeadm_upgrade.rc == 0
when: inventory_hostname == groups['kube-master']|first
when: inventory_hostname == groups['kube_control_plane']|first
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
@@ -40,7 +40,7 @@
--etcd-upgrade={{ etcd_kubeadm_enabled | bool | lower }}
--force
register: kubeadm_upgrade
when: inventory_hostname != groups['kube-master']|first
when: inventory_hostname != groups['kube_control_plane']|first
failed_when:
- kubeadm_upgrade.rc != 0
- '"field is immutable" not in kubeadm_upgrade.stderr'

View File

@@ -3,7 +3,7 @@ Description=Timer to renew K8S control plane certificates
[Timer]
# First Monday of each month
OnCalendar=Mon *-*-1..7 03:{{ groups['kube-master'].index(inventory_hostname) }}0:00
OnCalendar=Mon *-*-1..7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00
[Install]
WantedBy=multi-user.target

View File

@@ -16,7 +16,7 @@ nodeRegistration:
{% if kube_override_hostname|default('') %}
name: {{ kube_override_hostname }}
{% endif %}
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %}
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master

View File

@@ -1,7 +1,7 @@
---
- name: Parse certificate key if not set
set_fact:
kubeadm_certificate_key: "{{ hostvars[groups['kube-master'][0]]['kubeadm_certificate_key'] }}"
kubeadm_certificate_key: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_certificate_key'] }}"
when: kubeadm_certificate_key is undefined
- name: Pull control plane certs down

View File

@@ -25,7 +25,7 @@
get_checksum: no
get_mime: no
register: kubeadm_ca_stat
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
- name: Calculate kubeadm CA cert hash
@@ -36,14 +36,14 @@
when:
- kubeadm_ca_stat.stat is defined
- kubeadm_ca_stat.stat.exists
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
changed_when: false
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm token create"
register: temp_token
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kubeadm_token is not defined
changed_when: false
@@ -118,7 +118,7 @@
args:
executable: /bin/bash
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined
@@ -138,7 +138,7 @@
- name: Restart all kube-proxy pods to ensure that they load the new configmap
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube-master']|first }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined
@@ -151,6 +151,6 @@
include_tasks: kubeadm_etcd_node.yml
when:
- etcd_kubeadm_enabled
- inventory_hostname not in groups['kube-master']
- inventory_hostname not in groups['kube_control_plane']
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"

View File

@@ -9,7 +9,7 @@
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Set role node label to empty list
set_fact:
@@ -42,6 +42,6 @@
command: >-
{{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
loop: "{{ role_node_labels + inventory_node_labels }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
changed_when: false
...

View File

@@ -8,7 +8,7 @@
tags:
- kubeadm
when:
- not inventory_hostname in groups['kube-master']
- not inventory_hostname in groups['kube_control_plane']
- name: install | Copy kubelet binary from download dir
copy:

View File

@@ -38,6 +38,6 @@ backend kube_api_backend
default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
option httpchk GET /healthz
http-check expect status 200
{% for host in groups['kube-master'] -%}
{% for host in groups['kube_control_plane'] -%}
server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }} check check-ssl verify none
{% endfor -%}

View File

@@ -13,7 +13,7 @@ events {
stream {
upstream kube_apiserver {
least_conn;
{% for host in groups['kube-master'] -%}
{% for host in groups['kube_control_plane'] -%}
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(fallback_ips[host])) }}:{{ kube_apiserver_port }};
{% endfor -%}
}

View File

@@ -55,7 +55,7 @@
get_checksum: no
get_mime: no
register: kube_apiserver_set
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
# FIXME(mattymo): Also restart for kubeadm mode
- name: Preinstall | kube-controller configured
@@ -65,13 +65,13 @@
get_checksum: no
get_mime: no
register: kube_controller_set
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
- name: Preinstall | restart kube-controller-manager docker
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
when:
- container_manager == "docker"
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
@@ -80,7 +80,7 @@
shell: "{{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
when:
- container_manager in ['crio', 'containerd']
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
@@ -89,7 +89,7 @@
shell: "{{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
when:
- container_manager == "docker"
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
@@ -97,7 +97,7 @@
shell: "{{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
when:
- container_manager in ['crio', 'containerd']
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'

View File

@@ -1,9 +1,9 @@
---
- name: Stop if either kube-master or kube-node group is empty
- name: Stop if either kube_control_plane or kube-node group is empty
assert:
that: "groups.get('{{ item }}')"
with_items:
- kube-master
- kube_control_plane
- kube-node
run_once: true
when: not ignore_assert_errors
@@ -79,7 +79,7 @@
that: ansible_memtotal_mb >= minimal_master_memory_mb
when:
- not ignore_assert_errors
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- name: Stop if memory is too small for nodes
assert:
@@ -136,7 +136,7 @@
assert:
that: rbac_enabled and kube_api_anonymous_auth
when:
- kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube-master']
- kube_apiserver_insecure_port == 0 and inventory_hostname in groups['kube_control_plane']
- not ignore_assert_errors
- name: Stop if kernel version is too low
@@ -193,7 +193,7 @@
- kube_network_plugin == 'calico'
- 'calico_version_on_server.stdout is defined'
- calico_version_on_server.stdout
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
run_once: yes
- name: "Check that cluster_id is set if calico_rr enabled"
@@ -204,7 +204,7 @@
when:
- kube_network_plugin == 'calico'
- peer_with_calico_rr
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
run_once: yes
- name: "Check that calico_rr nodes are in k8s-cluster group"

View File

@@ -5,7 +5,7 @@
get_attributes: no
get_checksum: yes
get_mime: no
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: known_tokens_master
run_once: true
@@ -32,7 +32,7 @@
set_fact:
sync_tokens: >-
{%- set tokens = {'sync': False} -%}
{%- for server in groups['kube-master'] | intersect(ansible_play_batch)
{%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
if (not hostvars[server].known_tokens.stat.exists) or
(hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
{%- set _ = tokens.update({'sync': True}) -%}

View File

@@ -5,7 +5,7 @@
dest: "{{ kube_script_dir }}/kube-gen-token.sh"
mode: 0700
run_once: yes
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens|default(false)
- name: Gen_tokens | generate tokens for master components
@@ -14,11 +14,11 @@
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube-master'] }}"
- "{{ groups['kube_control_plane'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
run_once: yes
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens|default(false)
- name: Gen_tokens | generate tokens for node components
@@ -31,14 +31,14 @@
register: gentoken_node
changed_when: "'Added' in gentoken_node.stdout"
run_once: yes
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens|default(false)
- name: Gen_tokens | Get list of tokens from first master
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list
check_mode: no
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when: sync_tokens|default(false)
@@ -49,7 +49,7 @@
executable: /bin/bash
register: tokens_data
check_mode: no
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when: sync_tokens|default(false)
@@ -58,7 +58,7 @@
args:
executable: /bin/bash
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- sync_tokens|default(false)
- inventory_hostname != groups['kube-master'][0]
- inventory_hostname != groups['kube_control_plane'][0]
- tokens_data.stdout

View File

@@ -447,11 +447,11 @@ ssl_ca_dirs: |-
]
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}"
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(fallback_ips[groups['kube-master'][0]])) }}"
first_kube_master: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}"
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
loadbalancer_apiserver_type: "nginx"
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
@@ -483,7 +483,7 @@ kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
etcd_events_cluster_enabled: false
# etcd group can be empty when kubeadm manages etcd
etcd_hosts: "{{ groups['etcd'] | default(groups['kube-master']) }}"
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"

View File

@@ -7,7 +7,7 @@
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- if no_proxy_exclude_workers | default(false) -%}
{% set cluster_or_master = 'kube-master' %}
{% set cluster_or_master = 'kube_control_plane' %}
{%- else -%}
{% set cluster_or_master = 'k8s-cluster' %}
{%- endif -%}

View File

@@ -43,7 +43,7 @@
changed_when: False
register: calico
run_once: True
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: "Set calico_pool_conf"
set_fact:

View File

@@ -39,7 +39,7 @@
include_tasks: typha_certs.yml
when:
- typha_secure
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Install calicoctl wrapper script
template:
@@ -74,14 +74,14 @@
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined
assert:
that: "[calico_pool_cidr] | ipaddr(kube_pods_subnet) | length == 1"
msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- 'calico_conf.stdout == "0"'
- calico_pool_cidr is defined
@@ -97,7 +97,7 @@
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- enable_dual_stack_networks
- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined
@@ -105,7 +105,7 @@
that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1"
msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_pool_cidr_ipv6 is defined
- enable_dual_stack_networks
@@ -134,9 +134,9 @@
filename: "{{ kube_config_dir }}/kdd-crds.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Configure calico network pool
@@ -157,7 +157,7 @@
"vxlanMode": "{{ calico_vxlan_mode }}",
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }}
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- 'calico_conf.stdout == "0"'
- name: Calico | Configure calico ipv6 network pool (version >= v3.3.0)
@@ -176,7 +176,7 @@
"vxlanMode": "{{ calico_vxlan_mode_ipv6 }}",
"natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }}
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0"
- calico_version is version("v3.3.0", ">=")
- enable_dual_stack_networks | bool
@@ -214,7 +214,7 @@
"serviceExternalIPs": {{ _service_external_ips|default([]) }} }}
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Configure peering with router(s) at global scope
command:
@@ -238,7 +238,7 @@
with_items:
- "{{ peers|selectattr('scope','defined')|selectattr('scope','equalto', 'global')|list|default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- peer_with_router|default(false)
- name: Calico | Configure peering with route reflectors at global scope
@@ -264,7 +264,7 @@
with_items:
- "{{ groups['calico-rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- peer_with_calico_rr|default(false)
- name: Calico | Configure route reflectors to peer with each other
@@ -290,7 +290,7 @@
with_items:
- "{{ groups['calico-rr'] | default([]) }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- peer_with_calico_rr|default(false)
- name: Calico | Create calico manifests
@@ -305,7 +305,7 @@
- {name: calico, file: calico-crb.yml, type: clusterrolebinding}
register: calico_node_manifests
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- rbac_enabled or item.type not in rbac_resources
- name: Calico | Create calico manifests for typha
@@ -316,7 +316,7 @@
- {name: calico, file: calico-typha.yml, type: typha}
register: calico_node_typha_manifest
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- typha_enabled and calico_datastore == "kdd"
- name: Start Calico resources
@@ -331,7 +331,7 @@
- "{{ calico_node_manifests.results }}"
- "{{ calico_node_typha_manifest.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
@@ -340,7 +340,7 @@
wait_for:
path: /etc/cni/net.d/calico-kubeconfig
when:
- inventory_hostname not in groups['kube-master']
- inventory_hostname not in groups['kube_control_plane']
- calico_datastore == "kdd"
- name: Calico | Configure node asNumber for per node peering

View File

@@ -22,6 +22,6 @@
args:
executable: /bin/bash
register: calico_kubelet_name
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- "cloud_provider is defined"

View File

@@ -1,6 +1,6 @@
#!/bin/bash
DATASTORE_TYPE=kubernetes \
{% if inventory_hostname in groups['kube-master'] %}
{% if inventory_hostname in groups['kube_control_plane'] %}
KUBECONFIG=/etc/kubernetes/admin.conf \
{% else %}
KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \

View File

@@ -59,7 +59,7 @@
- {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding}
register: canal_manifests
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- name: Canal | Install calicoctl wrapper script
template:

View File

@@ -39,7 +39,7 @@
- {name: cilium, file: cilium-sa.yml, type: sa}
register: cilium_node_manifests
when:
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
- name: Cilium | Enable portmap addon
template:

View File

@@ -1,4 +1,4 @@
---
- import_tasks: check.yml
- include_tasks: install.yml
- include_tasks: install.yml

View File

@@ -8,4 +8,4 @@
- {name: kube-flannel, file: cni-flannel.yml, type: ds}
register: flannel_node_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -1,9 +1,9 @@
---
- name: Kube-OVN | Label ovn-db node
command: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kube-OVN | Create Kube-OVN manifests
template:

View File

@@ -1,21 +1,21 @@
---
- name: kube-router | Add annotations on kube-master
- name: kube-router | Add annotations on kube_control_plane
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_master }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
- name: kube-router | Add annotations on kube-node
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_node }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
- name: kube-router | Add common annotations on all servers
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_all }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster']

View File

@@ -55,5 +55,5 @@
template:
src: kube-router.yml.j2
dest: "{{ kube_config_dir }}/kube-router.yml"
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true

View File

@@ -3,7 +3,7 @@
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
changed_when: false
register: node_pod_cidr_cmd
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Macvlan | set node_pod_cidr
set_fact:

View File

@@ -1,9 +1,9 @@
---
- name: ovn4nfv | Label control-plane node
command: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: ovn4nfv | Create ovn4nfv-k8s manifests
template:

View File

@@ -8,22 +8,22 @@
retries: 6
delay: 10
changed_when: false
when: groups['broken_kube-master']
when: groups['broken_kube_control_plane']
- name: Delete broken kube-master nodes from cluster
- name: Delete broken kube_control_plane nodes from cluster
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube-master'] }}"
with_items: "{{ groups['broken_kube_control_plane'] }}"
register: delete_broken_kube_masters
failed_when: false
when: groups['broken_kube-master']
when: groups['broken_kube_control_plane']
- name: Fail if unable to delete broken kube-master nodes from cluster
- name: Fail if unable to delete broken kube_control_plane nodes from cluster
fail:
msg: "Unable to delete broken kube-master node: {{ item.item }}"
msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
loop: "{{ delete_broken_kube_masters.results }}"
changed_when: false
when:
- groups['broken_kube-master']
- groups['broken_kube_control_plane']
- "item.rc != 0 and not 'NotFound' in item.stderr"

View File

@@ -1,5 +1,5 @@
---
- name: Delete node # noqa 301
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube-master']|first }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
ignore_errors: yes

View File

@@ -11,7 +11,7 @@
| jq "select(. | test(\"^{{ hostvars[item]['kube_override_hostname']|default(item) }}$\"))"
loop: "{{ node.split(',') | default(groups['kube-node']) }}"
register: nodes
delegate_to: "{{ groups['kube-master']|first }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
changed_when: false
run_once: true
@@ -33,7 +33,7 @@
loop: "{{ nodes_to_drain }}"
register: result
failed_when: result.rc != 0 and not allow_ungraceful_removal
delegate_to: "{{ groups['kube-master']|first }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
run_once: true
until: result.rc == 0 or allow_ungraceful_removal
retries: "{{ drain_retries }}"

View File

@@ -1,6 +1,6 @@
---
- name: Uncordon node
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning|default(false)

View File

@@ -21,7 +21,7 @@
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
register: kubectl_node_ready
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
failed_when: false
changed_when: false
@@ -32,7 +32,7 @@
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ .spec.unschedulable }'
register: kubectl_node_schedulable
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
failed_when: false
changed_when: false
@@ -49,12 +49,12 @@
block:
- name: Cordon node
command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Check kubectl version
command: "{{ bin_dir }}/kubectl version --client --short"
register: kubectl_version
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: yes
changed_when: false
when:
@@ -90,6 +90,6 @@
fail:
msg: "Failed to drain node {{ inventory_hostname }}"
when: upgrade_node_fail_if_drain_fails
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning