mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-03-17 17:07:36 -02:30
Initial commit
This commit is contained in:
41
roles/kubernetes/common/defaults/main.yml
Normal file
41
roles/kubernetes/common/defaults/main.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
# This directory is where all the additional scripts go
|
||||
# that Kubernetes normally puts in /srv/kubernetes.
|
||||
# This puts them in a sane location
|
||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
|
||||
# This directory is where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernets.
|
||||
# This puts them in a sane location.
|
||||
# Editting this value will almost surely break something. Don't
|
||||
# change it. Things like the systemd scripts are hard coded to
|
||||
# look in here. Don't do it.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_master_port: 443
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
||||
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
# This is where to save basic auth file
|
||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
|
||||
# This is where you can drop yaml/json files and the kubelet will run those
|
||||
# pods on startup
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# IP address of the DNS server.
|
||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||
# server and expose it under this IP address. The IP address must be from
|
||||
# the range specified as kube_service_addresses. This magic will actually
|
||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
||||
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
|
||||
31
roles/kubernetes/common/files/kube-gen-token.sh
Normal file
31
roles/kubernetes/common/files/kube-gen-token.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
|
||||
token_file="${token_dir}/known_tokens.csv"
|
||||
|
||||
create_accounts=($@)
|
||||
|
||||
touch "${token_file}"
|
||||
for account in "${create_accounts[@]}"; do
|
||||
if grep ",${account}," "${token_file}" ; then
|
||||
continue
|
||||
fi
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${token_file}"
|
||||
echo "${token}" > "${token_dir}/${account}.token"
|
||||
echo "Added ${account}"
|
||||
done
|
||||
115
roles/kubernetes/common/files/make-ca-cert.sh
Executable file
115
roles/kubernetes/common/files/make-ca-cert.sh
Executable file
@@ -0,0 +1,115 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Caller should set in the ev:
|
||||
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
|
||||
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
|
||||
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
|
||||
# MASTER_NAME - I'm not sure what it is...
|
||||
|
||||
# Also the following will be respected
|
||||
# CERT_DIR - where to place the finished certs
|
||||
# CERT_GROUP - who the group owner of the cert files should be
|
||||
|
||||
cert_ip="${MASTER_IP:="${1}"}"
|
||||
master_name="${MASTER_NAME:="kubernetes"}"
|
||||
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
|
||||
dns_domain="${DNS_DOMAIN:="cluster.local"}"
|
||||
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
|
||||
cert_group="${CERT_GROUP:="kube-cert"}"
|
||||
|
||||
# The following certificate pairs are created:
|
||||
#
|
||||
# - ca (the cluster's certificate authority)
|
||||
# - server
|
||||
# - kubelet
|
||||
# - kubecfg (for kubectl)
|
||||
#
|
||||
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
||||
# the certs that we need.
|
||||
|
||||
# TODO: Add support for discovery on other providers?
|
||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||
fi
|
||||
|
||||
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
|
||||
trap 'rm -rf "${tmpdir}"' EXIT
|
||||
cd "${tmpdir}"
|
||||
|
||||
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
|
||||
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
|
||||
# but is originally taken from:
|
||||
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
#
|
||||
# To update, do the following:
|
||||
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
#
|
||||
# Due to GCS caching of public objects, it may take time for this to be widely
|
||||
# distributed.
|
||||
|
||||
# Calculate the first ip address in the service range
|
||||
octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||
((octects[3]+=1))
|
||||
service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
|
||||
|
||||
# Determine appropriete subject alt names
|
||||
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
|
||||
|
||||
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
|
||||
tar xzf easy-rsa.tar.gz > /dev/null
|
||||
cd easy-rsa-master/easyrsa3
|
||||
|
||||
(./easyrsa init-pki > /dev/null 2>&1
|
||||
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
|
||||
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
|
||||
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
|
||||
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
|
||||
# If there was an error in the subshell, just die.
|
||||
# TODO(roberthbailey): add better error handling here
|
||||
echo "=== Failed to generate certificates: Aborting ==="
|
||||
exit 2
|
||||
}
|
||||
|
||||
mkdir -p "$cert_dir"
|
||||
|
||||
cp -p pki/ca.crt "${cert_dir}/ca.crt"
|
||||
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
|
||||
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
|
||||
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
|
||||
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
|
||||
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
|
||||
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
|
||||
|
||||
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
|
||||
for cert in "${CERTS[@]}"; do
|
||||
chgrp "${cert_group}" "${cert_dir}/${cert}"
|
||||
chmod 660 "${cert_dir}/${cert}"
|
||||
done
|
||||
3
roles/kubernetes/common/meta/main.yml
Normal file
3
roles/kubernetes/common/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: etcd }
|
||||
42
roles/kubernetes/common/tasks/gen_certs.yml
Normal file
42
roles/kubernetes/common/tasks/gen_certs.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
#- name: Get create ca cert script from Kubernetes
|
||||
# get_url:
|
||||
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
|
||||
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
|
||||
# force=yes
|
||||
|
||||
- name: certs | install cert generation script
|
||||
copy:
|
||||
src=make-ca-cert.sh
|
||||
dest={{ kube_script_dir }}
|
||||
mode=0500
|
||||
changed_when: false
|
||||
|
||||
# FIXME This only generates a cert for one master...
|
||||
- name: certs | run cert generation script
|
||||
command:
|
||||
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
|
||||
args:
|
||||
creates: "{{ kube_cert_dir }}/server.crt"
|
||||
environment:
|
||||
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
||||
MASTER_NAME: "{{ inventory_hostname }}"
|
||||
DNS_DOMAIN: "{{ dns_domain }}"
|
||||
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
|
||||
CERT_DIR: "{{ kube_cert_dir }}"
|
||||
CERT_GROUP: "{{ kube_cert_group }}"
|
||||
|
||||
- name: certs | check certificate permissions
|
||||
file:
|
||||
path={{ item }}
|
||||
group={{ kube_cert_group }}
|
||||
owner=kube
|
||||
mode=0440
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/ca.crt"
|
||||
- "{{ kube_cert_dir }}/server.crt"
|
||||
- "{{ kube_cert_dir }}/server.key"
|
||||
- "{{ kube_cert_dir }}/kubecfg.crt"
|
||||
- "{{ kube_cert_dir }}/kubecfg.key"
|
||||
- "{{ kube_cert_dir }}/kubelet.crt"
|
||||
- "{{ kube_cert_dir }}/kubelet.key"
|
||||
30
roles/kubernetes/common/tasks/gen_tokens.yml
Normal file
30
roles/kubernetes/common/tasks/gen_tokens.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: tokens | copy the token gen script
|
||||
copy:
|
||||
src=kube-gen-token.sh
|
||||
dest={{ kube_script_dir }}
|
||||
mode=u+x
|
||||
|
||||
- name: tokens | generate tokens for master components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
|
||||
- "{{ groups['kube-master'][0] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: tokens | generate tokens for node components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ 'system:kubelet', 'system:proxy' ]
|
||||
- "{{ groups['kube-node'] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
||||
29
roles/kubernetes/common/tasks/main.yml
Normal file
29
roles/kubernetes/common/tasks/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: define alias command for kubectl all
|
||||
lineinfile:
|
||||
dest=/etc/bash.bashrc
|
||||
line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
|
||||
regexp='^alias kball=.*$'
|
||||
state=present
|
||||
insertafter=EOF
|
||||
create=True
|
||||
|
||||
- name: create kubernetes config directory
|
||||
file: path={{ kube_config_dir }} state=directory
|
||||
|
||||
- name: create kubernetes script directory
|
||||
file: path={{ kube_script_dir }} state=directory
|
||||
|
||||
- name: Make sure manifest directory exists
|
||||
file: path={{ kube_manifest_dir }} state=directory
|
||||
|
||||
- name: write the global config file
|
||||
template:
|
||||
src: config.j2
|
||||
dest: "{{ kube_config_dir }}/config"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: secrets.yml
|
||||
tags:
|
||||
- secrets
|
||||
50
roles/kubernetes/common/tasks/secrets.yml
Normal file
50
roles/kubernetes/common/tasks/secrets.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: certs | create system kube-cert groups
|
||||
group: name={{ kube_cert_group }} state=present system=yes
|
||||
|
||||
- name: create system kube user
|
||||
user:
|
||||
name=kube
|
||||
comment="Kubernetes user"
|
||||
shell=/sbin/nologin
|
||||
state=present
|
||||
system=yes
|
||||
groups={{ kube_cert_group }}
|
||||
|
||||
- name: certs | make sure the certificate directory exits
|
||||
file:
|
||||
path={{ kube_cert_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- name: tokens | make sure the tokens directory exits
|
||||
file:
|
||||
path={{ kube_token_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- include: gen_certs.yml
|
||||
run_once: true
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Read back the CA certificate
|
||||
slurp:
|
||||
src: "{{ kube_cert_dir }}/ca.crt"
|
||||
register: ca_cert
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
|
||||
- name: certs | register the CA certificate as a fact for later use
|
||||
set_fact:
|
||||
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
|
||||
|
||||
- name: certs | write CA certificate everywhere
|
||||
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: gen_tokens.yml
|
||||
run_once: true
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
26
roles/kubernetes/common/templates/config.j2
Normal file
26
roles/kubernetes/common/templates/config.j2
Normal file
@@ -0,0 +1,26 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure various aspects of all
|
||||
# kubernetes services, including
|
||||
#
|
||||
# kube-apiserver.service
|
||||
# kube-controller-manager.service
|
||||
# kube-scheduler.service
|
||||
# kubelet.service
|
||||
# kube-proxy.service
|
||||
|
||||
# Comma separated list of nodes in the etcd cluster
|
||||
# KUBE_ETCD_SERVERS="--etcd_servers="
|
||||
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# journal message level, 0 is debug
|
||||
KUBE_LOG_LEVEL="--v=5"
|
||||
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
||||
|
||||
# How the replication controller, scheduler, and proxy
|
||||
KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
|
||||
32
roles/kubernetes/master/handlers/main.yml
Normal file
32
roles/kubernetes/master/handlers/main.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart apiserver
|
||||
- restart controller-manager
|
||||
- restart scheduler
|
||||
- restart proxy
|
||||
|
||||
- name: reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: restart apiserver
|
||||
service:
|
||||
name: kube-apiserver
|
||||
state: restarted
|
||||
|
||||
- name: restart controller-manager
|
||||
service:
|
||||
name: kube-controller-manager
|
||||
state: restarted
|
||||
|
||||
- name: restart scheduler
|
||||
service:
|
||||
name: kube-scheduler
|
||||
state: restarted
|
||||
|
||||
- name: restart proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
state: restarted
|
||||
3
roles/kubernetes/master/meta/main.yml
Normal file
3
roles/kubernetes/master/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: kubernetes/common }
|
||||
87
roles/kubernetes/master/tasks/config.yml
Normal file
87
roles/kubernetes/master/tasks/config.yml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
- name: get the node token values from token files
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:controller_manager"
|
||||
- "system:scheduler"
|
||||
- "system:kubectl"
|
||||
- "system:proxy"
|
||||
register: tokens
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
|
||||
proxy_token: "{{ tokens.results[3].content|b64decode }}"
|
||||
|
||||
- name: write the config files for api server
|
||||
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: write config file for controller-manager
|
||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the kubecfg (auth) file for controller-manager
|
||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the config file for scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for scheduler
|
||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for kubectl
|
||||
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
|
||||
|
||||
- name: write the config files for proxy
|
||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: write the kubecfg (auth) file for proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
|
||||
- name: populate users for basic auth in API
|
||||
lineinfile:
|
||||
dest: "{{ kube_users_dir }}/known_users.csv"
|
||||
create: yes
|
||||
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
|
||||
with_dict: "{{ kube_users }}"
|
||||
notify:
|
||||
- restart apiserver
|
||||
|
||||
- name: Enable apiserver
|
||||
service:
|
||||
name: kube-apiserver
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable controller-manager
|
||||
service:
|
||||
name: kube-controller-manager
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable scheduler
|
||||
service:
|
||||
name: kube-scheduler
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable kube-proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
enabled: yes
|
||||
state: started
|
||||
34
roles/kubernetes/master/tasks/install.yml
Normal file
34
roles/kubernetes/master/tasks/install.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Write kube-apiserver systemd init file
|
||||
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Write kube-controller-manager systemd init file
|
||||
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Write kube-scheduler systemd init file
|
||||
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Write kube-proxy systemd init file
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Install kubernetes binaries
|
||||
copy:
|
||||
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
|
||||
dest={{ bin_dir }}
|
||||
owner=kube
|
||||
mode=u+x
|
||||
with_items:
|
||||
- kube-apiserver
|
||||
- kube-controller-manager
|
||||
- kube-scheduler
|
||||
- kube-proxy
|
||||
- kubectl
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: Allow apiserver to bind on both secure and insecure ports
|
||||
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
|
||||
3
roles/kubernetes/master/tasks/main.yml
Normal file
3
roles/kubernetes/master/tasks/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- include: install.yml
|
||||
- include: config.yml
|
||||
25
roles/kubernetes/master/templates/apiserver.j2
Normal file
25
roles/kubernetes/master/templates/apiserver.j2
Normal file
@@ -0,0 +1,25 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure the kube-apiserver
|
||||
#
|
||||
|
||||
# The address on the local server to listen to.
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
|
||||
|
||||
# The port on the local server to listen on.
|
||||
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
|
||||
|
||||
# KUBELET_PORT="--kubelet_port=10250"
|
||||
|
||||
# Address range to use for services
|
||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
||||
|
||||
# Location of the etcd cluster
|
||||
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
|
||||
# default admission control policies
|
||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
||||
|
||||
# Add you own!
|
||||
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"
|
||||
6
roles/kubernetes/master/templates/controller-manager.j2
Normal file
6
roles/kubernetes/master/templates/controller-manager.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
###
|
||||
# The following values are used to configure the kubernetes controller-manager
|
||||
|
||||
# defaults from config and apiserver should be adequate
|
||||
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"
|
||||
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: controller-manager-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: controller-manager
|
||||
name: controller-manager-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: {{ controller_manager_token }}
|
||||
18
roles/kubernetes/master/templates/kubectl.kubeconfig.j2
Normal file
18
roles/kubernetes/master/templates/kubectl.kubeconfig.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubectl-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: {{ kube_ca_cert|b64encode }}
|
||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubectl
|
||||
name: kubectl-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubectl
|
||||
user:
|
||||
token: {{ kubectl_token }}
|
||||
7
roles/kubernetes/master/templates/proxy.j2
Normal file
7
roles/kubernetes/master/templates/proxy.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
###
|
||||
# kubernetes proxy config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
||||
18
roles/kubernetes/master/templates/proxy.kubeconfig.j2
Normal file
18
roles/kubernetes/master/templates/proxy.kubeconfig.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: proxy-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: proxy
|
||||
name: proxy-to-{{ cluster_name }}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['kube-master'][0] }}:8080
|
||||
name: {{ cluster_name }}
|
||||
users:
|
||||
- name: proxy
|
||||
user:
|
||||
token: {{ proxy_token }}
|
||||
7
roles/kubernetes/master/templates/scheduler.j2
Normal file
7
roles/kubernetes/master/templates/scheduler.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
###
|
||||
# kubernetes scheduler config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
|
||||
18
roles/kubernetes/master/templates/scheduler.kubeconfig.j2
Normal file
18
roles/kubernetes/master/templates/scheduler.kubeconfig.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: scheduler-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: scheduler
|
||||
name: scheduler-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: scheduler
|
||||
user:
|
||||
token: {{ scheduler_token }}
|
||||
@@ -0,0 +1,28 @@
|
||||
[Unit]
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Requires=etcd2.service
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/network-environment
|
||||
EnvironmentFile=-/etc/kubernetes/config
|
||||
EnvironmentFile=-/etc/kubernetes/apiserver
|
||||
User=kube
|
||||
ExecStart={{ bin_dir }}/kube-apiserver \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBE_ETCD_SERVERS \
|
||||
$KUBE_API_ADDRESS \
|
||||
$KUBE_API_PORT \
|
||||
$KUBELET_PORT \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBE_SERVICE_ADDRESSES \
|
||||
$KUBE_ADMISSION_CONTROL \
|
||||
$KUBE_API_ARGS
|
||||
Restart=on-failure
|
||||
Type=notify
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,20 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Requires=etcd2.service
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/kubernetes/config
|
||||
EnvironmentFile=-/etc/kubernetes/controller-manager
|
||||
User=kube
|
||||
ExecStart={{ bin_dir }}/kube-controller-manager \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBE_MASTER \
|
||||
$KUBE_CONTROLLER_MANAGER_ARGS
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kube-Proxy Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kube-proxy \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBE_MASTER \
|
||||
$KUBE_PROXY_ARGS
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,20 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Scheduler Plugin
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Requires=etcd2.service
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/kubernetes/config
|
||||
EnvironmentFile=-/etc/kubernetes/scheduler
|
||||
User=kube
|
||||
ExecStart={{ bin_dir }}/kube-scheduler \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBE_MASTER \
|
||||
$KUBE_SCHEDULER_ARGS
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
29
roles/kubernetes/node/files/fluentd-es.yaml
Normal file
29
roles/kubernetes/node/files/fluentd-es.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: fluentd-elasticsearch
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: fluentd-elasticsearch
|
||||
image: gcr.io/google_containers/fluentd-elasticsearch:1.11
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
args:
|
||||
- -qq
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
19
roles/kubernetes/node/handlers/main.yml
Normal file
19
roles/kubernetes/node/handlers/main.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart kubelet
|
||||
- restart proxy
|
||||
|
||||
- name: restart kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
|
||||
- name: restart proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
state: restarted
|
||||
|
||||
- name: reload systemd
|
||||
command: systemctl daemon-reload
|
||||
3
roles/kubernetes/node/meta/main.yml
Normal file
3
roles/kubernetes/node/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: kubernetes/common }
|
||||
61
roles/kubernetes/node/tasks/config.yml
Normal file
61
roles/kubernetes/node/tasks/config.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Get the node token values
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:kubelet"
|
||||
- "system:proxy"
|
||||
register: tokens
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
proxy_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
|
||||
- name: Create kubelet environment vars dir
|
||||
file: path=/etc/systemd/system/kubelet.service.d state=directory
|
||||
|
||||
- name: Write kubelet config file
|
||||
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart kubelet
|
||||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: Create proxy environment vars dir
|
||||
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
|
||||
|
||||
- name: Write proxy config file
|
||||
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for kube-proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: Enable kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Enable proxy
|
||||
service:
|
||||
name: kube-proxy
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: addons | Logging | Create Fluentd pod
|
||||
copy:
|
||||
src: fluentd-es.yaml
|
||||
dest: "{{ kube_manifest_dir }}/fluentd-es.yaml"
|
||||
when: enable_logging
|
||||
20
roles/kubernetes/node/tasks/install.yml
Normal file
20
roles/kubernetes/node/tasks/install.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Write kube-proxy systemd init file
|
||||
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Write kubelet systemd init file
|
||||
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
|
||||
notify: restart daemons
|
||||
|
||||
- name: Install kubernetes binaries
|
||||
copy:
|
||||
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
|
||||
dest={{ bin_dir }}
|
||||
owner=kube
|
||||
mode=u+x
|
||||
with_items:
|
||||
- kube-proxy
|
||||
- kubelet
|
||||
notify:
|
||||
- restart daemons
|
||||
4
roles/kubernetes/node/tasks/main.yml
Normal file
4
roles/kubernetes/node/tasks/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- include: install.yml
|
||||
- include: config.yml
|
||||
- include: temp_workaround.yml
|
||||
5
roles/kubernetes/node/tasks/temp_workaround.yml
Normal file
5
roles/kubernetes/node/tasks/temp_workaround.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: Warning Temporary workaround !!! Disable kubelet and kube-proxy on node startup
|
||||
service: name={{ item }} enabled=no
|
||||
with_items:
|
||||
- kubelet
|
||||
- kube-proxy
|
||||
21
roles/kubernetes/node/templates/kubelet.j2
Normal file
21
roles/kubernetes/node/templates/kubelet.j2
Normal file
@@ -0,0 +1,21 @@
|
||||
[Service]
|
||||
Environment="KUBE_LOGTOSTDERR=--logtostderr=true"
|
||||
Environment="KUBE_LOG_LEVEL=--v=0"
|
||||
Environment="KUBE_ALLOW_PRIV=--allow_privileged=true"
|
||||
Environment="KUBE_MASTER=--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
Environment="KUBELET_ADDRESS=--address=0.0.0.0"
|
||||
# The port for the info server to serve on
|
||||
# Environment="KUBELET_PORT=--port=10250"
|
||||
# You may leave this blank to use the actual hostname
|
||||
Environment="KUBELET_HOSTNAME=--hostname_override={{ inventory_hostname }}"
|
||||
# location of the api-server
|
||||
Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_master_port }}"
|
||||
{% if dns_setup %}
|
||||
Environment="KUBELET_ARGS=--cluster_dns={{ kube_dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% else %}
|
||||
Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% endif %}
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ overlay_network_plugin }}"
|
||||
{% endif %}
|
||||
18
roles/kubernetes/node/templates/kubelet.kubeconfig.j2
Normal file
18
roles/kubernetes/node/templates/kubelet.kubeconfig.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['kube-master'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: {{ kubelet_token }}
|
||||
6
roles/kubernetes/node/templates/proxy.j2
Normal file
6
roles/kubernetes/node/templates/proxy.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
###
|
||||
# kubernetes proxy config
|
||||
|
||||
# default config should be adequate
|
||||
[Service]
|
||||
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
||||
18
roles/kubernetes/node/templates/proxy.kubeconfig.j2
Normal file
18
roles/kubernetes/node/templates/proxy.kubeconfig.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: proxy-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: proxy
|
||||
name: proxy-to-{{ cluster_name }}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
|
||||
name: {{ cluster_name }}
|
||||
users:
|
||||
- name: proxy
|
||||
user:
|
||||
token: {{ proxy_token }}
|
||||
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kube-Proxy Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kube-proxy \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBE_MASTER \
|
||||
$KUBE_PROXY_ARGS
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,26 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
{% if overlay_network_plugin|default('') %}
|
||||
After=docker.service calico-node.service
|
||||
{% else %}
|
||||
After=docker.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
#WorkingDirectory=/var/lib/kubelet
|
||||
EnvironmentFile=/etc/network-environment
|
||||
ExecStart={{ bin_dir }}/kubelet \
|
||||
$KUBE_LOGTOSTDERR \
|
||||
$KUBE_LOG_LEVEL \
|
||||
$KUBELET_API_SERVER \
|
||||
$KUBELET_ADDRESS \
|
||||
$KUBELET_PORT \
|
||||
$KUBELET_HOSTNAME \
|
||||
$KUBE_ALLOW_PRIV \
|
||||
$KUBELET_ARGS \
|
||||
$KUBELET_NETWORK_PLUGIN
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user