Merge branch 'master' into master

This commit is contained in:
zoues
2017-05-23 09:32:28 +08:00
committed by GitHub
68 changed files with 507 additions and 332 deletions

View File

@@ -13,3 +13,6 @@
line: "enabled=0"
state: present
when: fastestmirror.stat.exists
- name: Install packages requirements for bootstrap
raw: yum -y install libselinux-python

View File

@@ -41,7 +41,7 @@ spec:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=ReplicationController/dnsmasq
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}

View File

@@ -22,8 +22,8 @@ kube_version: v1.6.4
etcd_version: v3.0.17
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v1.1.0-rc8"
calico_cni_version: "v1.5.6"
calico_version: "v1.1.3"
calico_cni_version: "v1.7.0"
calico_policy_version: "v0.5.4"
weave_version: 1.8.2
flannel_version: v0.6.2
@@ -50,10 +50,8 @@ calico_cni_image_repo: "calico/cni"
calico_cni_image_tag: "{{ calico_cni_version }}"
calico_policy_image_repo: "calico/kube-policy-controller"
calico_policy_image_tag: "{{ calico_policy_version }}"
# TODO(adidenko): switch to "calico/routereflector" when
# https://github.com/projectcalico/calico-bird/pull/27 is merged
calico_rr_image_repo: "quay.io/l23network/routereflector"
calico_rr_image_tag: "v0.1"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "v0.3.0"
exechealthz_version: 1.1
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"
@@ -61,9 +59,11 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}"
netcheck_tag: "v1.0"
netcheck_version: "v1.0"
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}"
netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
netcheck_server_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}"
weave_npc_image_repo: "weaveworks/weave-npc"
@@ -103,13 +103,13 @@ downloads:
netcheck_server:
container: true
repo: "{{ netcheck_server_img_repo }}"
tag: "{{ netcheck_tag }}"
tag: "{{ netcheck_server_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
enabled: "{{ deploy_netchecker|bool }}"
netcheck_agent:
container: true
repo: "{{ netcheck_agent_img_repo }}"
tag: "{{ netcheck_tag }}"
tag: "{{ netcheck_agent_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
enabled: "{{ deploy_netchecker|bool }}"
etcd:

View File

@@ -2,14 +2,18 @@
- name: downloading...
debug:
msg: "{{ download.url }}"
when: "{{ download.enabled|bool and not download.container|bool }}"
when:
- download.enabled|bool
- not download.container|bool
- name: Create dest directories
file:
path: "{{local_release_dir}}/{{download.dest|dirname}}"
state: directory
recurse: yes
when: "{{ download.enabled|bool and not download.container|bool }}"
when:
- download.enabled|bool
- not download.container|bool
tags: bootstrap-os
- name: Download items
@@ -23,7 +27,9 @@
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: "{{ download.enabled|bool and not download.container|bool }}"
when:
- download.enabled|bool
- not download.container|bool
- name: Extract archives
unarchive:
@@ -32,7 +38,11 @@
owner: "{{ download.owner|default(omit) }}"
mode: "{{ download.mode|default(omit) }}"
copy: no
when: "{{ download.enabled|bool and not download.container|bool and download.unarchive is defined and download.unarchive == True }}"
when:
- download.enabled|bool
- not download.container|bool
- download.unarchive is defined
- download.unarchive == True
- name: Fix permissions
file:
@@ -40,7 +50,10 @@
path: "{{local_release_dir}}/{{download.dest}}"
owner: "{{ download.owner|default(omit) }}"
mode: "{{ download.mode|default(omit) }}"
when: "{{ download.enabled|bool and not download.container|bool and (download.unarchive is not defined or download.unarchive == False) }}"
when:
- download.enabled|bool
- not download.container|bool
- (download.unarchive is not defined or download.unarchive == False)
- set_fact:
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
@@ -53,13 +66,15 @@
recurse: yes
mode: 0755
owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
when: "{{ download.enabled|bool and download.container|bool }}"
when:
- download.enabled|bool
- download.container|bool
tags: bootstrap-os
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
- name: Hack python binary path for localhost
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
when: "{{ download_delegate == 'localhost' }}"
when: download_delegate == 'localhost'
delegate_to: localhost
failed_when: false
run_once: true
@@ -73,12 +88,18 @@
delegate_to: localhost
become: false
run_once: true
when: "{{ download_run_once|bool and download.enabled|bool and download.container|bool and download_delegate == 'localhost' }}"
when:
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- download_delegate == 'localhost'
tags: localhost
- name: Make download decision if pull is required by tag or sha256
include: set_docker_image_facts.yml
when: "{{ download.enabled|bool and download.container|bool }}"
when:
- download.enabled|bool
- download.container|bool
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
tags: facts
@@ -86,7 +107,9 @@
- name: pulling...
debug:
msg: "{{ pull_args }}"
when: "{{ download.enabled|bool and download.container|bool }}"
when:
- download.enabled|bool
- download.container|bool
#NOTE(bogdando) this brings no docker-py deps for nodes
- name: Download containers if pull is required or told to always pull
@@ -95,7 +118,10 @@
until: pull_task_result|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: "{{ download.enabled|bool and download.container|bool and pull_required|bool|default(download_always_pull) }}"
when:
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
@@ -110,7 +136,10 @@
- name: "Update the 'container_changed' fact"
set_fact:
container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}"
when: "{{ download.enabled|bool and download.container|bool and pull_required|bool|default(download_always_pull) }}"
when:
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
tags: facts
@@ -120,7 +149,10 @@
path: "{{fname}}"
register: img
changed_when: false
when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}"
when:
- download.enabled|bool
- download.container|bool
- download_run_once|bool
delegate_to: "{{ download_delegate }}"
become: false
run_once: true
@@ -131,7 +163,12 @@
delegate_to: "{{ download_delegate }}"
register: saved
run_once: true
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool and (container_changed|bool or not img.stat.exists)
when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- (container_changed|bool or not img.stat.exists)
- name: Download | copy container images to ansible host
synchronize:
@@ -140,7 +177,14 @@
mode: pull
delegate_to: localhost
become: false
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname == groups['kube-master'][0] and download_delegate != "localhost" and download_run_once|bool and download.enabled|bool and download.container|bool and saved.changed
when:
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- inventory_hostname == groups['kube-master'][0]
- download_delegate != "localhost"
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- saved.changed
- name: Download | upload container images to nodes
synchronize:
@@ -153,10 +197,21 @@
until: get_task|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != groups['kube-master'][0] or
download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
tags: [upload, upgrade]
- name: Download | load container images
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool
when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
tags: [upload, upgrade]

View File

@@ -1,4 +1,12 @@
---
- include: sync_etcd_master_certs.yml
when: inventory_hostname in groups.etcd
tags: etcd-secrets
- include: sync_etcd_node_certs.yml
when: inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- name: gen_certs_vault | Read in the local credentials
command: cat /etc/vault/roles/etcd/userpass
@@ -15,7 +23,7 @@
url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ etcd_vault_creds.username }}"
headers:
Accept: application/json
Content-Type: application/json
Content-Type: application/json
method: POST
body_format: json
body:
@@ -37,7 +45,7 @@
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
issue_cert_file_group: "{{ etcd_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_hosts: "{{ groups.etcd }}"
issue_cert_ip_sans: >-
[
@@ -60,7 +68,7 @@
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
issue_cert_file_group: "{{ etcd_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
issue_cert_ip_sans: >-
[
@@ -75,3 +83,5 @@
with_items: "{{ etcd_node_certs_needed|d([]) }}"
when: inventory_hostname in etcd_node_cert_hosts
notify: set etcd_secret_changed

View File

@@ -7,20 +7,7 @@
when: cert_management == "script"
tags: [etcd-secrets, facts]
- include: gen_certs_script.yml
when: cert_management == "script"
tags: etcd-secrets
- include: sync_etcd_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups.etcd
tags: etcd-secrets
- include: sync_etcd_node_certs.yml
when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d())
- include: "gen_certs_{{ cert_management }}.yml"
tags: etcd-secrets
- include: "install_{{ etcd_deployment_type }}.yml"

View File

@@ -42,9 +42,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"

View File

@@ -17,7 +17,7 @@
- set_fact:
wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
when: "{{ 'bastion' in groups['all'] }}"
when: "'bastion' in groups['all']"
- name: wait for bastion to come back
wait_for:
@@ -27,7 +27,7 @@
timeout: 300
become: false
delegate_to: localhost
when: "is_bastion"
when: is_bastion
- name: waiting for server to come back (using bastion if necessary)
wait_for:
@@ -37,4 +37,4 @@
timeout: 300
become: false
delegate_to: "{{ wait_for_delegate }}"
when: "not is_bastion"
when: not is_bastion

View File

@@ -24,8 +24,8 @@ deploy_netchecker: false
netchecker_port: 31081
agent_report_interval: 15
netcheck_namespace: default
agent_img: "{{ netcheck_agent_img_repo }}:{{ netcheck_tag }}"
server_img: "{{ netcheck_server_img_repo }}:{{ netcheck_tag }}"
agent_img: "{{ netcheck_agent_img_repo }}:{{ netcheck_agent_tag }}"
server_img: "{{ netcheck_server_img_repo }}:{{ netcheck_server_tag }}"
# Limits for netchecker apps
netchecker_agent_cpu_limit: 30m

View File

@@ -5,7 +5,7 @@
with_items:
- {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
- {file: netchecker-server-pod.yml.j2, type: po, name: netchecker-server}
- {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server}
- {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
register: manifests
when: inventory_hostname == groups['kube-master'][0]

View File

@@ -42,7 +42,7 @@ spec:
- --namespace=kube-system
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=replicationcontroller/kubedns
- --target=Deployment/kubedns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2

View File

@@ -83,6 +83,7 @@ spec:
{% if kube_log_level == '4' %}
- --log-queries
{% endif %}
- --local=/{{ bogus_domains }}
ports:
- containerPort: 53
name: dns

View File

@@ -20,6 +20,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"

View File

@@ -24,6 +24,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"

View File

@@ -24,6 +24,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args:
- "-v=5"
- "-alsologtostderr=true"

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: netchecker-server
spec:
replicas: 1
template:
metadata:
name: netchecker-server
labels:
app: netchecker-server
namespace: {{ netcheck_namespace }}
spec:
containers:
- name: netchecker-server
image: "{{ server_img }}"
env:
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
ports:
- containerPort: 8081
hostPort: 8081
args:
- "-v=5"
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"

View File

@@ -1,28 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: netchecker-server
labels:
app: netchecker-server
namespace: {{ netcheck_namespace }}
spec:
containers:
- name: netchecker-server
image: "{{ server_img }}"
env:
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
ports:
- containerPort: 8081
hostPort: 8081
args:
- "-v=5"
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"

View File

@@ -1 +1,4 @@
helm_enabled: false
# specify a dir and attach it to helm for HELM_HOME.
helm_home_dir: "/root/.helm"

View File

@@ -1,4 +1,7 @@
---
- name: Helm | Make sure HELM_HOME directory exists
file: path={{ helm_home_dir }} state=directory
- name: Helm | Set up helm launcher
template:
src: helm-container.j2
@@ -8,7 +11,7 @@
register: helm_container
- name: Helm | Install/upgrade helm
command: "helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
when: helm_container.changed
- name: Helm | Set up bash completion

View File

@@ -3,6 +3,7 @@
--net=host \
--name=helm \
-v /etc/ssl:/etc/ssl:ro \
-v {{ helm_home_dir }}:{{ helm_home_dir }}:rw \
{% for dir in ssl_ca_dirs -%}
-v {{ dir }}:{{ dir }}:ro \
{% endfor -%}

View File

@@ -36,6 +36,13 @@ kube_apiserver_cpu_limit: 800m
kube_apiserver_memory_requests: 256M
kube_apiserver_cpu_requests: 100m
# Admission control plug-ins
kube_apiserver_admission_control:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- ResourceQuota
## Enable/Disable Kube API Server Authentication Methods
kube_basic_auth: true
@@ -51,3 +58,10 @@ kube_oidc_auth: false
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
# kube_oidc_groups_claim: groups
##Variables for custom flags
apiserver_custom_flags: []
controller_mgr_custom_flags: []
scheduler_custom_flags: []

View File

@@ -34,9 +34,9 @@
- meta: flush_handlers
- name: copy kube system namespace manifest
copy:
src: namespace.yml
- name: Write kube system namespace manifest
template:
src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes
when: inventory_hostname == groups['kube-master'][0]

View File

@@ -9,7 +9,7 @@ metadata:
spec:
hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
dnsPolicy: ClusterFirst
{% endif %}
containers:
- name: kube-apiserver
@@ -33,7 +33,7 @@ spec:
- --etcd-keyfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --apiserver-count={{ kube_apiserver_count }}
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
- --service-cluster-ip-range={{ kube_service_addresses }}
- --service-node-port-range={{ kube_apiserver_node_port_range }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
@@ -80,6 +80,13 @@ spec:
{% endif %}
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
- --anonymous-auth={{ kube_api_anonymous_auth }}
{% endif %}
{% if apiserver_custom_flags is string %}
- {{ apiserver_custom_flags }}
{% else %}
{% for flag in apiserver_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:

View File

@@ -8,7 +8,7 @@ metadata:
spec:
hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
dnsPolicy: ClusterFirst
{% endif %}
containers:
- name: kube-controller-manager
@@ -45,6 +45,13 @@ spec:
- --allocate-node-cidrs=true
- --configure-cloud-routes=true
- --cluster-cidr={{ kube_pods_subnet }}
{% endif %}
{% if controller_mgr_custom_flags is string %}
- {{ controller_mgr_custom_flags }}
{% else %}
{% for flag in controller_mgr_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:

View File

@@ -8,7 +8,7 @@ metadata:
spec:
hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
dnsPolicy: ClusterFirst
{% endif %}
containers:
- name: kube-scheduler
@@ -27,6 +27,13 @@ spec:
- --leader-elect=true
- --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level }}
{% if scheduler_custom_flags is string %}
- {{ scheduler_custom_flags }}
{% else %}
{% for flag in scheduler_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:
host: 127.0.0.1

View File

@@ -1,3 +1,6 @@
# Valid options: docker (default), rkt, or host
kubelet_deployment_type: docker
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
@@ -45,3 +48,6 @@ etcd_config_dir: /etc/ssl/etcd
kube_apiserver_node_port_range: "30000-32767"
kubelet_load_modules: false
##Support custom flags to be passed to kubelet
kubelet_custom_flags: []

View File

@@ -0,0 +1,10 @@
---
- name: install | Copy kubelet from hyperkube container
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -f /hyperkube /systembindir/kubelet"
register: kubelet_task_result
until: kubelet_task_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
tags: [hyperkube, upgrade]
notify: restart kubelet

View File

@@ -7,6 +7,12 @@
- include: pre_upgrade.yml
tags: kubelet
- name: Ensure /var/lib/cni exists
file:
path: /var/lib/cni
state: directory
mode: 0755
- include: install.yml
tags: kubelet

View File

@@ -25,6 +25,7 @@
-v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \
"$@"

View File

@@ -23,10 +23,11 @@ ExecStart={{ bin_dir }}/kubelet \
$DOCKER_SOCKET \
$KUBELET_NETWORK_PLUGIN \
$KUBELET_CLOUDPROVIDER
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
ExecReload={{ docker_bin_dir }}/docker restart kubelet
Restart=always
RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
ExecReload={{ docker_bin_dir }}/docker restart kubelet
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,30 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service docker.socket calico-node.service
Wants=docker.socket calico-node.service
{% else %}
After=docker.service
Wants=docker.socket
{% endif %}
[Service]
EnvironmentFile={{kube_config_dir}}/kubelet.env
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS \
$DOCKER_SOCKET \
$KUBELET_NETWORK_PLUGIN \
$KUBELET_CLOUDPROVIDER
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@@ -19,13 +19,13 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
{# DNS settings for kubelet #}
{% if dns_mode == 'kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster_dns={{ skydns_server }}{% endset %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster_dns={{ dns_server }}{% endset %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dns_server }}{% endset %}
{% else %}
{% set kubelet_args_cluster_dns %}{% endset %}
{% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{# Location of the apiserver #}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
@@ -44,7 +44,7 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
{% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }}"
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}

View File

@@ -20,6 +20,7 @@ ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
ExecStart=/usr/bin/rkt run \
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
--volume dns,kind=host,source=/etc/resolv.conf \
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
@@ -39,6 +40,7 @@ ExecStart=/usr/bin/rkt run \
--mount volume=opt-cni,target=/opt/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
{% endif %}
--mount volume=os-release,target=/etc/os-release \
--mount volume=dns,target=/etc/resolv.conf \
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \

View File

@@ -8,7 +8,7 @@ metadata:
spec:
hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
dnsPolicy: ClusterFirst
{% endif %}
containers:
- name: kube-proxy

View File

@@ -45,5 +45,5 @@
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
- name: Preinstall | restart kube-controller-manager
shell: "docker ps -f name=k8s-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
shell: "docker ps -f name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and kube_controller_set.stat.exists

View File

@@ -17,7 +17,10 @@
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}"
state: present
backup: yes
when: loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and apiserver_loadbalancer_domain_name is defined
when:
- loadbalancer_apiserver is defined
- loadbalancer_apiserver.address is defined
- apiserver_loadbalancer_domain_name is defined
- name: Hosts | localhost ipv4 in hosts file
lineinfile:

View File

@@ -43,7 +43,7 @@
path: "{{ kube_config_dir }}"
state: directory
owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}"
when: inventory_hostname in groups['k8s-cluster']
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
- name: Create kubernetes script directory
@@ -51,7 +51,7 @@
path: "{{ kube_script_dir }}"
state: directory
owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}"
when: "inventory_hostname in groups['k8s-cluster']"
tags: [k8s-secrets, bootstrap-os]
- name: Create kubernetes manifests directory
@@ -59,17 +59,21 @@
path: "{{ kube_manifest_dir }}"
state: directory
owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}"
when: "inventory_hostname in groups['k8s-cluster']"
tags: [kubelet, bootstrap-os, master, node]
- name: check cloud_provider value
fail:
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack' or 'vsphere'"
when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere']
when:
- cloud_provider is defined
- cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere']
tags: [cloud-provider, facts]
- include: "{{ cloud_provider }}-credential-check.yml"
when: cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider, facts]
- name: Create cni directories
@@ -80,7 +84,9 @@
with_items:
- "/etc/cni/net.d"
- "/opt/cni/bin"
when: kube_network_plugin in ["calico", "weave", "canal"] and "{{ inventory_hostname in groups['k8s-cluster'] }}"
when:
- kube_network_plugin in ["calico", "weave", "canal"]
- inventory_hostname in groups['k8s-cluster']
tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM)
@@ -91,7 +97,9 @@
until: yum_task_result|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_pkg_mgr == 'yum' and not is_atomic
when:
- ansible_pkg_mgr == 'yum'
- not is_atomic
tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs
@@ -109,14 +117,17 @@
until: dnf_task_result|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_distribution == "Fedora" and
ansible_distribution_major_version > 21
when:
- ansible_distribution == "Fedora"
- ansible_distribution_major_version > 21
changed_when: False
tags: bootstrap-os
- name: Install epel-release on RedHat/CentOS
shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
when:
- ansible_distribution in ["CentOS","RedHat"]
- not is_atomic
register: epel_task_result
until: epel_task_result|succeeded
retries: 4
@@ -149,7 +160,9 @@
selinux:
policy: targeted
state: permissive
when: ansible_os_family == "RedHat" and slc.stat.exists == True
when:
- ansible_os_family == "RedHat"
- slc.stat.exists == True
changed_when: False
tags: bootstrap-os
@@ -159,7 +172,9 @@
line: "precedence ::ffff:0:0/96 100"
state: present
backup: yes
when: disable_ipv6_dns and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
when:
- disable_ipv6_dns
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: bootstrap-os
- name: set default sysctl file path
@@ -176,7 +191,9 @@
- name: Change sysctl file path to link source if linked
set_fact:
sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
when: sysctl_file_stat.stat.islnk is defined and sysctl_file_stat.stat.islnk
when:
- sysctl_file_stat.stat.islnk is defined
- sysctl_file_stat.stat.islnk
tags: bootstrap-os
- name: Enable ip forwarding
@@ -193,22 +210,33 @@
dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
when:
- inventory_hostname in groups['k8s-cluster']
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider]
- include: etchosts.yml
tags: [bootstrap-os, etchosts]
- include: resolvconf.yml
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
tags: [bootstrap-os, resolvconf]
- include: dhclient-hooks.yml
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: [bootstrap-os, resolvconf]
- include: dhclient-hooks-undo.yml
when: dns_mode != 'none' and resolvconf_mode != 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
when:
- dns_mode != 'none'
- resolvconf_mode != 'host_resolvconf'
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM
@@ -218,7 +246,7 @@
tags: bootstrap-os
- include: growpart-azure-centos-7.yml
when: azure_check.stat.exists and
ansible_distribution in ["CentOS","RedHat"]
when:
- azure_check.stat.exists
- ansible_distribution in ["CentOS","RedHat"]
tags: bootstrap-os

View File

@@ -16,7 +16,13 @@
{{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./
{%- endfor %}
default_resolver: >-
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}169.254.169.254{%- else -%}8.8.8.8{%- endif -%}
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}
169.254.169.254
{%- elif cloud_provider is defined and cloud_provider == 'aws' -%}
169.254.169.253
{%- else -%}
8.8.8.8
{%- endif -%}
- name: check if kubelet is configured
stat:

View File

@@ -85,7 +85,7 @@ if [ -n "$MASTERS" ]; then
cn="${host%%.*}"
# admin key
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}" > /dev/null 2>&1
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1
done
fi

View File

@@ -1,4 +1,11 @@
---
- include: sync_kube_master_certs.yml
when: inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- name: gen_certs_vault | Read in the local credentials
command: cat /etc/vault/roles/kube/userpass
@@ -15,7 +22,7 @@
url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ kube_vault_creds.username }}"
headers:
Accept: application/json
Content-Type: application/json
Content-Type: application/json
method: POST
body_format: json
body:
@@ -54,7 +61,7 @@
}}
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ kube_vault_headers }}"
issue_cert_headers: "{{ kube_vault_headers }}"
issue_cert_hosts: "{{ groups['kube-master'] }}"
issue_cert_ip_sans: >-
[
@@ -75,7 +82,7 @@
issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ kube_vault_headers }}"
issue_cert_headers: "{{ kube_vault_headers }}"
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
issue_cert_path: "{{ item }}"
issue_cert_role: kube

View File

@@ -74,13 +74,5 @@
- include: "gen_certs_{{ cert_management }}.yml"
tags: k8s-secrets
- include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- include: gen_tokens.yml
tags: k8s-secrets

View File

@@ -6,7 +6,7 @@
with_items: "{{ groups['kube-master'] }}"
- include: ../../../vault/tasks/shared/sync_file.yml
vars:
vars:
sync_file: "{{ item }}"
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
@@ -38,7 +38,7 @@
set_fact:
kube_api_certs_needed: "{{ item.path }}"
with_items: "{{ sync_file_results|d([]) }}"
when: "{{ item.no_srcs }}"
when: item.no_srcs
- name: sync_kube_master_certs | Unset sync_file_results after apiserver cert
set_fact:
@@ -46,7 +46,7 @@
- include: ../../../vault/tasks/shared/sync_file.yml
vars:
vars:
sync_file: ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"

View File

@@ -56,7 +56,7 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when: "{{ overwrite_hyperkube_cni|bool }}"
when: overwrite_hyperkube_cni|bool
tags: [hyperkube, upgrade]
- name: Calico | Set cni directory permissions

View File

@@ -0,0 +1,2 @@
---
flush_iptables: true

View File

@@ -8,6 +8,7 @@
- kubelet
- etcd
failed_when: false
tags: ['services']
- name: reset | remove services
file:
@@ -17,6 +18,7 @@
- kubelet
- etcd
register: services_removed
tags: ['services']
- name: reset | remove docker dropins
file:
@@ -26,6 +28,7 @@
- docker-dns.conf
- docker-options.conf
register: docker_dropins_removed
tags: ['docker']
- name: reset | systemctl daemon-reload
command: systemctl daemon-reload
@@ -33,25 +36,31 @@
- name: reset | remove all containers
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
tags: ['docker']
- name: reset | restart docker if needed
service:
name: docker
state: restarted
when: docker_dropins_removed.changed
tags: ['docker']
- name: reset | gather mounted kubelet dirs
shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac
check_mode: no
register: mounted_dirs
tags: ['mounts']
- name: reset | unmount kubelet dirs
command: umount {{item}}
with_items: '{{ mounted_dirs.stdout_lines }}'
tags: ['mounts']
- name: flush iptables
iptables:
flush: yes
when: flush_iptables|bool
tags: ['iptables']
- name: reset | delete some files and directories
file:
@@ -74,6 +83,8 @@
- /etc/dhcp/dhclient.d/zdnsupdate.sh
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
- "{{ bin_dir }}/kubelet"
tags: ['files']
- name: reset | remove dns settings from dhclient.conf
blockinfile:
@@ -85,6 +96,7 @@
with_items:
- /etc/dhclient.conf
- /etc/dhcp/dhclient.conf
tags: ['files', 'dns']
- name: reset | remove host entries from /etc/hosts
blockinfile:
@@ -92,6 +104,7 @@
state: absent
follow: yes
marker: "# Ansible inventory hosts {mark}"
tags: ['files', 'dns']
- name: reset | Restart network
service:
@@ -103,3 +116,4 @@
{%- endif %}
state: restarted
when: ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"]
tags: ['services', 'network']

View File

@@ -3,7 +3,7 @@
- name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running
shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi
register: vault_temp_stop_check
changed_when: "{{ 'true' in vault_temp_stop_check.stdout }}"
changed_when: "'true' in vault_temp_stop_check.stdout"
- name: bootstrap/start_vault_temp | Start single node Vault with file backend
command: >
@@ -13,6 +13,10 @@
-v /etc/vault:/etc/vault
{{ vault_image_repo }}:{{ vault_version }} server
#FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
command: docker start {{ vault_temp_container_name }}
- name: bootstrap/start_vault_temp | Initialize vault-temp
uri:
url: "http://localhost:{{ vault_port }}/v1/sys/init"