mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-02 10:08:13 -03:30
Remove non-kubeadm deployment (#3811)
* Remove non-kubeadm deployment * More cleanup * More cleanup * More cleanup * More cleanup * Fix gitlab * Try stop gce first before absent to make the delete process work * More cleanup * Fix bug with checking if kubeadm has already run * Fix bug with checking if kubeadm has already run * More fixes * Fix test * fix * Fix gitlab checkout untill kubespray 2.8 is on quay * Fixed * Add upgrade path from non-kubeadm to kubeadm. Revert ssl path * Readd secret checking * Do gitlab checks from v2.7.0 test upgrade path to 2.8.0 * fix typo * Fix CI jobs to kubeadm again. Fix broken hyperkube path * Fix gitlab * Fix rotate tokens * More fixes * More fixes * Fix tokens
This commit is contained in:
committed by
Kubernetes Prow Robot
parent
0d1be39a97
commit
ddffdb63bf
@@ -1,7 +1,4 @@
|
||||
---
|
||||
# Valid options: docker (default), rkt, or host
|
||||
kubelet_deployment_type: host
|
||||
|
||||
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
|
||||
kube_apiserver_insecure_bind_address: 127.0.0.1
|
||||
|
||||
@@ -90,12 +87,6 @@ kubelet_custom_flags: []
|
||||
## Support custom flags to be passed to kubelet only on nodes, not masters
|
||||
kubelet_node_custom_flags: []
|
||||
|
||||
# This setting is used for rkt based kubelet for deploying hyperkube
|
||||
# from a docker based registry ( controls --insecure and docker:// )
|
||||
## Empty value for quay.io containers
|
||||
## docker for docker registry containers
|
||||
kube_hyperkube_image_repo: ""
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
kube_override_hostname: >-
|
||||
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: kubernetes/secrets
|
||||
when: not kubeadm_enabled
|
||||
tags:
|
||||
- k8s-secrets
|
||||
@@ -1,11 +1,4 @@
|
||||
---
|
||||
- name: Set kubelet deployment to host if kubeadm is enabled
|
||||
set_fact:
|
||||
kubelet_deployment_type: host
|
||||
when: kubeadm_enabled
|
||||
tags:
|
||||
- kubeadm
|
||||
|
||||
- name: install | Copy kubeadm binary from download dir
|
||||
synchronize:
|
||||
src: "{{ local_release_dir }}/kubeadm"
|
||||
@@ -15,7 +8,6 @@
|
||||
owner: no
|
||||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
when: kubeadm_enabled
|
||||
tags:
|
||||
- kubeadm
|
||||
|
||||
@@ -24,15 +16,41 @@
|
||||
path: "{{ bin_dir }}/kubeadm"
|
||||
mode: "0755"
|
||||
state: file
|
||||
when: kubeadm_enabled
|
||||
tags:
|
||||
- kubeadm
|
||||
|
||||
- include_tasks: "install_{{ kubelet_deployment_type }}.yml"
|
||||
- name: install | Copy kubelet binary from download dir
|
||||
synchronize:
|
||||
src: "{{ local_release_dir }}/hyperkube"
|
||||
dest: "{{ bin_dir }}/kubelet"
|
||||
compress: no
|
||||
perms: yes
|
||||
owner: no
|
||||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
tags:
|
||||
- hyperkube
|
||||
- upgrade
|
||||
notify: restart kubelet
|
||||
|
||||
- name: install | Set kubelet binary permissions
|
||||
file:
|
||||
path: "{{ bin_dir }}/kubelet"
|
||||
mode: "0755"
|
||||
state: file
|
||||
tags:
|
||||
- hyperkube
|
||||
- upgrade
|
||||
|
||||
- name: install | Copy socat wrapper for Container Linux
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
||||
args:
|
||||
creates: "{{ bin_dir }}/socat"
|
||||
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
|
||||
- name: install | Write kubelet systemd init file
|
||||
template:
|
||||
src: "kubelet.{{ kubelet_deployment_type }}.service.j2"
|
||||
src: "kubelet.host.service.j2"
|
||||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
notify: restart kubelet
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: install | Install kubelet launch script
|
||||
template:
|
||||
src: kubelet-container.j2
|
||||
dest: "{{ bin_dir }}/kubelet"
|
||||
owner: kube
|
||||
mode: 0755
|
||||
backup: yes
|
||||
notify: restart kubelet
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
|
||||
- name: install | Copy kubelet binary from download dir
|
||||
synchronize:
|
||||
src: "{{ local_release_dir }}/hyperkube"
|
||||
dest: "{{ bin_dir }}/kubelet"
|
||||
compress: no
|
||||
perms: yes
|
||||
owner: no
|
||||
group: no
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
tags:
|
||||
- hyperkube
|
||||
- upgrade
|
||||
notify: restart kubelet
|
||||
|
||||
- name: install | Set kubelet binary permissions
|
||||
file:
|
||||
path: "{{ bin_dir }}/kubelet"
|
||||
mode: "0755"
|
||||
state: file
|
||||
tags:
|
||||
- hyperkube
|
||||
- upgrade
|
||||
|
||||
- name: install | Copy socat wrapper for Container Linux
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
||||
args:
|
||||
creates: "{{ bin_dir }}/socat"
|
||||
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
- name: Trust kubelet container
|
||||
command: >-
|
||||
/usr/bin/rkt trust
|
||||
--skip-fingerprint-review
|
||||
--root
|
||||
{{ item }}
|
||||
register: kubelet_rkt_trust_result
|
||||
until: kubelet_rkt_trust_result.rc == 0
|
||||
with_items:
|
||||
- "https://quay.io/aci-signing-key"
|
||||
- "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
|
||||
- name: create kubelet working directory
|
||||
file:
|
||||
state: directory
|
||||
path: /var/lib/kubelet
|
||||
|
||||
- name: Create kubelet service systemd directory
|
||||
file:
|
||||
path: /etc/systemd/system/kubelet.service.d
|
||||
state: directory
|
||||
|
||||
- name: Write kubelet proxy drop-in
|
||||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
notify: restart kubelet
|
||||
@@ -22,16 +22,6 @@
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
- name: Write kubelet config file (non-kubeadm)
|
||||
template:
|
||||
src: kubelet.standard.env.j2
|
||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||
backup: yes
|
||||
when: not kubeadm_enabled
|
||||
notify: restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- name: Make sure dynamic kubelet configuration directory is writeable
|
||||
file:
|
||||
path: "{{ dynamic_kubelet_configuration_dir }}"
|
||||
@@ -44,25 +34,11 @@
|
||||
src: kubelet.kubeadm.env.j2
|
||||
dest: "{{ kube_config_dir }}/kubelet.env"
|
||||
backup: yes
|
||||
when: kubeadm_enabled
|
||||
notify: restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
- kubeadm
|
||||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template:
|
||||
src: "{{ item }}-kubeconfig.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
|
||||
backup: yes
|
||||
with_items:
|
||||
- node
|
||||
- kube-proxy
|
||||
when: not kubeadm_enabled
|
||||
notify: restart kubelet
|
||||
tags:
|
||||
- kubelet
|
||||
|
||||
- name: Ensure nodePort range is reserved
|
||||
sysctl:
|
||||
name: net.ipv4.ip_local_reserved_ports
|
||||
@@ -142,26 +118,17 @@
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Write proxy manifest
|
||||
template:
|
||||
src: manifests/kube-proxy.manifest.j2
|
||||
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
||||
when:
|
||||
- not (kubeadm_enabled or kube_proxy_remove)
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
|
||||
file:
|
||||
path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
||||
state: absent
|
||||
when:
|
||||
- kubeadm_enabled or kube_proxy_remove
|
||||
- kube_proxy_remove
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Cleanup kube-proxy leftovers from node
|
||||
command: "{{ docker_bin_dir }}/docker run --rm --privileged -v /lib/modules:/lib/modules --net=host {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} kube-proxy --cleanup"
|
||||
command: "{{ local_release_dir }}/hyperkube kube-proxy --cleanup"
|
||||
when:
|
||||
- kube_proxy_remove
|
||||
# `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
service:
|
||||
name: kubelet
|
||||
state: stopped
|
||||
when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment"
|
||||
command: docker rm -fv kubelet
|
||||
@@ -26,4 +26,4 @@
|
||||
retries: 4
|
||||
until: remove_kubelet_container.rc == 0
|
||||
delay: 5
|
||||
when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0
|
||||
when: kubelet_container_check.rc == 0
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
|
||||
client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: kube-proxy-{{ cluster_name }}
|
||||
current-context: kube-proxy-{{ cluster_name }}
|
||||
@@ -1,43 +0,0 @@
|
||||
#!/bin/bash
|
||||
{{ docker_bin_dir }}/docker run \
|
||||
--net=host \
|
||||
--pid=host \
|
||||
--privileged \
|
||||
--name=kubelet \
|
||||
--restart=on-failure:5 \
|
||||
--memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
|
||||
--cpu-shares={{ kube_cpu_reserved|regex_replace('m', '') }} \
|
||||
-v /dev:/dev:rw \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
-v /etc/ssl:/etc/ssl:ro \
|
||||
-v /etc/resolv.conf:/etc/resolv.conf \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
-v {{ dir }}:{{ dir }}:ro \
|
||||
{% endfor -%}
|
||||
{% if kubelet_load_modules -%}
|
||||
-v /lib/modules:/lib/modules:ro \
|
||||
{% endif -%}
|
||||
-v /sys:/sys:ro \
|
||||
-v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
|
||||
-v /var/log:/var/log:rw \
|
||||
-v /var/lib/kubelet:/var/lib/kubelet:shared \
|
||||
-v /var/lib/calico:/var/lib/calico:shared \
|
||||
-v /var/lib/cni:/var/lib/cni:shared \
|
||||
-v /var/run:/var/run:rw \
|
||||
{# we can run into issues with double mounting /var/lib/kubelet #}
|
||||
{# surely there's a better way to do this #}
|
||||
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
|
||||
-v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
|
||||
{% endif -%}
|
||||
{% if local_volume_provisioner_enabled -%}
|
||||
{% for class in local_volume_provisioner_storage_classes -%}
|
||||
-v {{ class.host_dir }}:{{ class.host_dir }}:rw \
|
||||
-v {{ class.mount_dir }}:{{ class.mount_dir }}:rw \
|
||||
{% endfor -%}
|
||||
{% endif %}
|
||||
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
|
||||
-v /etc/os-release:/etc/os-release:ro \
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
|
||||
./hyperkube kubelet \
|
||||
"$@"
|
||||
@@ -1,31 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
After=docker.service
|
||||
Wants=docker.socket
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
EnvironmentFile={{kube_config_dir}}/kubelet.env
|
||||
ExecStart={{ bin_dir }}/kubelet \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBELET_API_SERVER \
|
||||
$KUBELET_ADDRESS \
|
||||
$KUBELET_PORT \
|
||||
$KUBELET_HOSTNAME \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBELET_ARGS \
|
||||
$DOCKER_SOCKET \
|
||||
$KUBELET_NETWORK_PLUGIN \
|
||||
$KUBELET_VOLUME_PLUGIN \
|
||||
$KUBELET_CLOUDPROVIDER
|
||||
Restart=always
|
||||
RestartSec=10s
|
||||
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
|
||||
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
|
||||
ExecReload={{ docker_bin_dir }}/docker restart kubelet
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,120 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
TimeoutStartSec=0
|
||||
LimitNOFILE=40000
|
||||
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
|
||||
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
|
||||
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
|
||||
|
||||
EnvironmentFile={{kube_config_dir}}/kubelet.env
|
||||
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
{% if kubelet_load_modules == true %}
|
||||
--volume lib-modules,kind=host,source=/lib/modules \
|
||||
{% endif %}
|
||||
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
|
||||
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
|
||||
--volume dns,kind=host,source=/etc/resolv.conf \
|
||||
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
|
||||
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--volume etcd-ssl,kind=host,source={{ etcd_config_dir }},readOnly=true \
|
||||
--volume run,kind=host,source=/run,readOnly=false \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
--volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
|
||||
{% endfor -%}
|
||||
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
||||
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
{% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-router"] %}
|
||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
|
||||
{% endif %}
|
||||
{% if kube_network_plugin in ["calico", "canal"] %}
|
||||
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=false \
|
||||
{% endif %}
|
||||
{# we can run into issues with double mounting /var/lib/kubelet #}
|
||||
{# surely there's a better way to do this #}
|
||||
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
|
||||
--volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
|
||||
{% endif -%}
|
||||
{% if local_volume_provisioner_enabled %}
|
||||
{% for class in local_volume_provisioner_storage_classes %}
|
||||
--volume local-volume-provisioner-base-dir,kind=host,source={{ class.host_dir }},readOnly=false \
|
||||
{# Not pretty, but needed to avoid double mount #}
|
||||
{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
|
||||
--volume local-volume-provisioner-mount-dir,kind=host,source={{ class.mount_dir }},readOnly=false \
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if kubelet_load_modules == true %}
|
||||
--mount volume=lib-modules,target=/lib/modules \
|
||||
{% endif %}
|
||||
--mount volume=etc-cni,target=/etc/cni \
|
||||
--mount volume=opt-cni,target=/opt/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
{% if kube_network_plugin in ["calico", "canal"] %}
|
||||
--mount volume=var-lib-calico,target=/var/lib/calico \
|
||||
{% endif %}
|
||||
--mount volume=os-release,target=/etc/os-release \
|
||||
--mount volume=dns,target=/etc/resolv.conf \
|
||||
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \
|
||||
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--mount volume=etcd-ssl,target={{ etcd_config_dir }} \
|
||||
--mount volume=run,target=/run \
|
||||
{% for dir in ssl_ca_dirs -%}
|
||||
--mount volume={{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},target={{ dir }} \
|
||||
{% endfor -%}
|
||||
--mount volume=var-lib-docker,target=/var/lib/docker \
|
||||
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--mount volume=hosts,target=/etc/hosts \
|
||||
{# we can run into issues with double mounting /var/lib/kubelet #}
|
||||
{# surely there's a better way to do this #}
|
||||
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
|
||||
--mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
|
||||
{% endif -%}
|
||||
{% if local_volume_provisioner_enabled %}
|
||||
{% for class in local_volume_provisioner_storage_classes %}
|
||||
--mount volume=local-volume-provisioner-base-dir,target={{ class.host_dir }} \
|
||||
{# Not pretty, but needed to avoid double mount #}
|
||||
{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
|
||||
--mount volume=local-volume-provisioner-mount-dir,target={{ class.mount_dir }} \
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{% if kube_hyperkube_image_repo == "docker" %}
|
||||
--insecure-options=image \
|
||||
docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
|
||||
{% else %}
|
||||
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
|
||||
{% endif %}
|
||||
--uuid-file-save=/var/run/kubelet.uuid \
|
||||
--debug --exec=/kubelet -- \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBELET_API_SERVER \
|
||||
$KUBELET_ADDRESS \
|
||||
$KUBELET_PORT \
|
||||
$KUBELET_HOSTNAME \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBELET_ARGS \
|
||||
$DOCKER_SOCKET \
|
||||
$KUBELET_REGISTER_NODE \
|
||||
$KUBELET_NETWORK_PLUGIN \
|
||||
$KUBELET_VOLUME_PLUGIN \
|
||||
$KUBELET_CLOUDPROVIDER
|
||||
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet.uuid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,151 +0,0 @@
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
|
||||
# The port for the info server to serve on
|
||||
# KUBELET_PORT="--port=10250"
|
||||
{% if kube_override_hostname|default('') %}
|
||||
# You may leave this blank to use the actual hostname
|
||||
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||
{% endif %}
|
||||
{# Base kubelet args #}
|
||||
{% set kubelet_args_base %}
|
||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||
{% if kube_version is version('v1.12.0', '<') %}
|
||||
--cadvisor-port={{ kube_cadvisor_port }} \
|
||||
{% endif %}
|
||||
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
|
||||
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
||||
{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
|
||||
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
|
||||
{% endif %}
|
||||
--client-ca-file={{ kube_cert_dir }}/ca.pem \
|
||||
--tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
|
||||
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
|
||||
--anonymous-auth=false \
|
||||
--read-only-port={{ kube_read_only_port }} \
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
{# flag got removed with 1.7.0 #}
|
||||
{% if kube_version is version('v1.7', '<') %}
|
||||
--enable-cri={{ kubelet_enable_cri }} \
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
--container-runtime=remote \
|
||||
--container-runtime-endpoint=/var/run/crio/crio.sock \
|
||||
{% endif %}
|
||||
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
|
||||
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
|
||||
--max-pods={{ kubelet_max_pods }} \
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
{% else %}
|
||||
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
{% endif %}
|
||||
{% if kubelet_authentication_token_webhook %}
|
||||
--authentication-token-webhook \
|
||||
{% endif %}
|
||||
{% if kubelet_authorization_mode_webhook %}
|
||||
--authorization-mode=Webhook \
|
||||
{% endif %}
|
||||
{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
|
||||
--cgroup-driver=systemd \
|
||||
{% endif %}
|
||||
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
|
||||
|
||||
{# DNS settings for kubelet #}
|
||||
{% if dns_mode in ['kubedns', 'coredns'] %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
|
||||
{% elif dns_mode == 'coredns_dual' %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
|
||||
{% elif dns_mode == 'dnsmasq_kubedns' %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
|
||||
{% elif dns_mode == 'manual' %}
|
||||
{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
|
||||
{% else %}
|
||||
{% set kubelet_args_cluster_dns %}{% endset %}
|
||||
{% endif %}
|
||||
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
|
||||
|
||||
{# Location of the apiserver #}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
|
||||
{% else %}
|
||||
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
|
||||
{% endif %}
|
||||
|
||||
{% set role_node_taints = [] %}
|
||||
{% if standalone_kubelet|bool %}
|
||||
{# We are on a master-only host. Make the master unschedulable in this case. #}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
|
||||
{% set dummy = role_node_taints.append('node-role.kubernetes.io/master=:NoSchedule') %}
|
||||
{% else %}
|
||||
{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
|
||||
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% set all_node_taints = node_taints|default([]) + role_node_taints %}
|
||||
|
||||
{# Node reserved CPU/memory #}
|
||||
{% if is_kube_master|bool %}
|
||||
{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
|
||||
{% else %}
|
||||
{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
|
||||
{% endif %}
|
||||
|
||||
{# Kubelet node labels #}
|
||||
{% set role_node_labels = [] %}
|
||||
{% if inventory_hostname in groups['kube-master'] %}
|
||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
|
||||
{% if not standalone_kubelet|bool %}
|
||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
|
||||
{% endif %}
|
||||
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
||||
{% if inventory_hostname in nvidia_gpu_nodes %}
|
||||
{% set dummy = role_node_labels.append('nvidia.com/gpu=true') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% set inventory_node_labels = [] %}
|
||||
{% if node_labels is defined and node_labels is mapping %}
|
||||
{% for labelname, labelvalue in node_labels.items() %}
|
||||
{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
||||
|
||||
{# Kubelet node taints for gpu #}
|
||||
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
||||
{% if inventory_hostname in nvidia_gpu_nodes %}
|
||||
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=nvidia.com/gpu=:NoSchedule{% endset %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {% if all_node_taints %}--register-with-taints={{ all_node_taints | join(',') }} {% endif %}--node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
||||
|
||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium", "kube-router"] %}
|
||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
||||
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
|
||||
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
||||
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
|
||||
{% endif %}
|
||||
|
||||
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
||||
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere", "aws"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
||||
{% elif cloud_provider is defined and cloud_provider in ["azure"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config --azure-container-registry-config={{ kube_config_dir }}/cloud_config"
|
||||
{% elif cloud_provider is defined and cloud_provider in ["oci", "external"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider=external"
|
||||
{% else %}
|
||||
KUBELET_CLOUDPROVIDER=""
|
||||
{% endif %}
|
||||
|
||||
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
@@ -1,110 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-proxy
|
||||
annotations:
|
||||
kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirst
|
||||
{% endif %}
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ kube_proxy_cpu_limit }}
|
||||
memory: {{ kube_proxy_memory_limit }}
|
||||
requests:
|
||||
cpu: {{ kube_proxy_cpu_requests }}
|
||||
memory: {{ kube_proxy_memory_requests }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10256
|
||||
failureThreshold: 8
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 15
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --v={{ kube_log_level }}
|
||||
- --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
|
||||
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
|
||||
- --cluster-cidr={{ kube_pods_subnet }}
|
||||
- --proxy-mode={{ kube_proxy_mode }}
|
||||
- --oom-score-adj=-998
|
||||
- --healthz-bind-address={{ kube_proxy_healthz_bind_address }}
|
||||
- --resource-container=""
|
||||
{% if kube_proxy_nodeport_addresses %}
|
||||
- --nodeport-addresses={{ kube_proxy_nodeport_addresses_cidr }}
|
||||
{% endif %}
|
||||
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
|
||||
- --masquerade-all
|
||||
{% elif kube_proxy_mode == 'ipvs' %}
|
||||
- --masquerade-all
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
- --feature-gates=SupportIPVSProxyMode=true
|
||||
{% endif %}
|
||||
- --ipvs-min-sync-period=5s
|
||||
- --ipvs-sync-period=5s
|
||||
- --ipvs-scheduler=rr
|
||||
{% endif %}
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: "{{ kube_config_dir }}/ssl"
|
||||
name: etc-kube-ssl
|
||||
readOnly: true
|
||||
- mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
|
||||
name: kubeconfig
|
||||
readOnly: true
|
||||
- mountPath: /var/run/dbus
|
||||
name: var-run-dbus
|
||||
readOnly: false
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
{% if ansible_os_family == 'RedHat' %}
|
||||
path: /etc/pki/tls
|
||||
{% else %}
|
||||
path: /usr/share/ca-certificates
|
||||
{% endif %}
|
||||
- name: etc-kube-ssl
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/ssl"
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
|
||||
- name: var-run-dbus
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
- hostPath:
|
||||
path: /lib/modules
|
||||
name: lib-modules
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
name: xtables-lock
|
||||
Reference in New Issue
Block a user