Replace kube-master with kube_control_plane (#7256)

This replaces kube-master with kube_control_plane because of [1]:

  The Kubernetes project is moving away from wording that is
  considered offensive. A new working group WG Naming was created
  to track this work, and the word "master" was declared as offensive.
  A proposal was formalized for replacing the word "master" with
  "control plane". This means it should be removed from source code,
  documentation, and user-facing configuration from Kubernetes and
  its sub-projects.

NOTE: The reason why this changes it to kube_control_plane not
      kube-control-plane is for valid group names on ansible.

[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
This commit is contained in:
Kenichi Omichi
2021-03-23 17:26:05 -07:00
committed by GitHub
parent d53fd29e34
commit 486b223e01
159 changed files with 564 additions and 485 deletions

View File

@@ -4,6 +4,10 @@ instance-{{ loop.index }} ansible_ssh_host={{instance.stdout}}
{% endfor %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
[kube_control_plane]
instance-1
# TODO(oomichi): Remove all kube-master groups from this file after releasing v2.16.
[kube-master]
instance-1
@@ -13,6 +17,10 @@ instance-2
[etcd]
instance-3
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube_control_plane]
instance-1
instance-2
[kube-master]
instance-1
instance-2
@@ -25,6 +33,9 @@ instance-1
instance-2
instance-3
{% elif mode == "default" %}
[kube_control_plane]
instance-1
[kube-master]
instance-1
@@ -34,6 +45,9 @@ instance-2
[etcd]
instance-1
{% elif mode == "aio" %}
[kube_control_plane]
instance-1
[kube-master]
instance-1
@@ -46,6 +60,10 @@ instance-1
[vault]
instance-1
{% elif mode == "ha-recover" %}
[kube_control_plane]
instance-1
instance-2
[kube-master]
instance-1
instance-2
@@ -64,6 +82,11 @@ instance-2
[broken_etcd]
instance-2 etcd_member_name=etcd3
{% elif mode == "ha-recover-noquorum" %}
[kube_control_plane]
instance-3
instance-1
instance-2
[kube-master]
instance-3
instance-1

View File

@@ -65,7 +65,7 @@ fi
# Test control plane recovery
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube-master:!fake_hosts recover-control-plane.yml
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
fi
# Tests Cases

View File

@@ -2,7 +2,7 @@ node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user={{ssh_use
node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user={{ssh_user}}
node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_user}}
[kube-master]
[kube_control_plane]
node1
node2
@@ -21,12 +21,12 @@ node2
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
calico-rr
[calico-rr]
[broken_kube-master]
[broken_kube_control_plane]
node2
[broken_etcd]

View File

@@ -3,7 +3,7 @@
{% endfor %}
{% if mode is defined and mode == "separate" %}
[kube-master]
[kube_control_plane]
{{droplets.results[0].droplet.name}}
[kube-node]
@@ -15,7 +15,7 @@
[vault]
{{droplets.results[2].droplet.name}}
{% elif mode is defined and mode == "ha" %}
[kube-master]
[kube_control_plane]
{{droplets.results[0].droplet.name}}
{{droplets.results[1].droplet.name}}
@@ -30,13 +30,13 @@
{{droplets.results[1].droplet.name}}
{{droplets.results[2].droplet.name}}
[broken_kube-master]
[broken_kube_control_plane]
{{droplets.results[1].droplet.name}}
[broken_etcd]
{{droplets.results[2].droplet.name}}
{% else %}
[kube-master]
[kube_control_plane]
{{droplets.results[0].droplet.name}}
[kube-node]
@@ -53,5 +53,5 @@
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
calico-rr

View File

@@ -9,7 +9,7 @@
{{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% endif %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
[kube-master]
[kube_control_plane]
{{node1}}
[kube-node]
@@ -21,7 +21,7 @@
[vault]
{{node3}}
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube-master]
[kube_control_plane]
{{node1}}
{{node2}}
@@ -38,14 +38,14 @@
{{node2}}
{{node3}}
[broken_kube-master]
[broken_kube_control_plane]
{{node2}}
[etcd]
{{node2}}
{{node3}}
{% elif mode == "default" %}
[kube-master]
[kube_control_plane]
{{node1}}
[kube-node]
@@ -57,7 +57,7 @@
[vault]
{{node1}}
{% elif mode == "aio" %}
[kube-master]
[kube_control_plane]
{{node1}}
[kube-node]
@@ -72,7 +72,7 @@
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
calico-rr
[calico-rr]

View File

@@ -1,5 +1,5 @@
---
- hosts: kube-master
- hosts: kube_control_plane
tasks:
- name: Check the API servers are responding

View File

@@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
tasks:
- name: Force binaries directory for Flatcar Container Linux by Kinvolk

View File

@@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
tasks:
- name: Force binaries directory for Flatcar Container Linux by Kinvolk

View File

@@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
vars:
test_image_repo: busybox
test_image_tag: latest

View File

@@ -37,7 +37,7 @@
until: ncs_pod.stdout.find('Running') != -1
retries: 3
delay: 10
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Wait for netchecker agents
shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
@@ -48,12 +48,12 @@
retries: 3
delay: 10
failed_when: false
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Get netchecker pods
command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
no_log: false
with_items:
- netchecker-agent
@@ -63,14 +63,14 @@
- debug:
var: nca_pod.stdout_lines
failed_when: not nca_pod is success
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Get netchecker agents
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
return_content: yes
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
@@ -94,7 +94,7 @@
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
status_code: 200
return_content: yes
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
register: result
retries: 3
@@ -115,13 +115,13 @@
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
no_log: false
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not result is success
- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not result is success
no_log: false
with_items:
@@ -184,7 +184,7 @@
}'
EOF
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool
- name: Annotate pod with macvlan network
@@ -208,7 +208,7 @@
image: dougbtv/centos-network
EOF
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool
- name: Check secondary macvlan interface
@@ -218,5 +218,5 @@
retries: 90
changed_when: false
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool

View File

@@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
vars:
sonobuoy_version: 0.20.0
sonobuoy_arch: amd64

View File

@@ -2,17 +2,17 @@
- name: Generate dump folder
command: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
no_log: true
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
- name: Compress directory cluster-dump
archive:
path: /tmp/cluster-dump
dest: /tmp/cluster-dump.tgz
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
- name: Fetch dump file
fetch:
src: /tmp/cluster-dump.tgz
dest: "{{ lookup('env', 'CI_PROJECT_DIR') }}/cluster-dump/{{ inventory_hostname }}.tgz"
flat: true
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']