diff --git a/README.md b/README.md
index 0846f1f3e..baa1a0d45 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ To deploy the cluster you can use :
For Vagrant we need to install python dependencies for provisioning tasks.
Check if Python and pip are installed:
- python -v && pip -v
+ python -V && pip -V
If this returns the version of the software, you're good to go. If not, download and install Python from here
Install the necessary requirements
@@ -103,6 +103,7 @@ Supported Components
- [weave](https://github.com/weaveworks/weave) v2.3.0
- Application
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0
+ - [cert-manager](https://github.com/jetstack/cert-manager/releases) v0.3.0
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
diff --git a/cluster.yml b/cluster.yml
index 9bfd2ff42..c77e9e1b5 100644
--- a/cluster.yml
+++ b/cluster.yml
@@ -51,7 +51,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: true }
+ - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
diff --git a/docs/dns-stack.md b/docs/dns-stack.md
index 1deb88776..92689eee5 100644
--- a/docs/dns-stack.md
+++ b/docs/dns-stack.md
@@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
-#### dnsmasq_kubedns (default)
+#### dnsmasq_kubedns
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
-#### kubedns
+#### kubedns (default)
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries.
diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml
index c523dd5ec..68ed6c1bc 100644
--- a/inventory/sample/group_vars/k8s-cluster.yml
+++ b/inventory/sample/group_vars/k8s-cluster.yml
@@ -140,6 +140,9 @@ dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
+## Used to set docker daemon iptables options to true
+#docker_iptables_enabled: "true"
+
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 1013523c9..4a3b24f80 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -17,7 +17,7 @@ dockerproject_repo_key_info:
dockerproject_repo_info:
repos:
-docker_dns_servers_strict: yes
+docker_dns_servers_strict: true
docker_container_storage_setup: false
@@ -40,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
+
+# Used to set docker daemon iptables options
+docker_iptables_enabled: "false"
diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml
index 6fe516c2d..57e12a8ed 100644
--- a/roles/docker/tasks/set_facts_dns.yml
+++ b/roles/docker/tasks/set_facts_dns.yml
@@ -56,7 +56,7 @@
- name: check number of nameservers
fail:
- msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
+ msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3
diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2
index f1587ec4d..296f5a8a1 100644
--- a/roles/docker/templates/docker-options.conf.j2
+++ b/roles/docker/templates/docker-options.conf.j2
@@ -1,5 +1,5 @@
[Service]
-Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables=false"
+Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }}
{% endif %}
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml
index f07d40483..8eee9fd2f 100644
--- a/roles/download/defaults/main.yml
+++ b/roles/download/defaults/main.yml
@@ -132,14 +132,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
-elasticsearch_version: "v2.4.1"
-elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
+elasticsearch_version: "v5.6.4"
+elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
elasticsearch_image_tag: "{{ elasticsearch_version }}"
-fluentd_version: "1.22"
-fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
+fluentd_version: "v2.0.4"
+fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
fluentd_image_tag: "{{ fluentd_version }}"
-kibana_version: "v4.6.1"
-kibana_image_repo: "gcr.io/google_containers/kibana"
+kibana_version: "5.6.4"
+kibana_image_repo: "docker.elastic.co/kibana/kibana"
kibana_image_tag: "{{ kibana_version }}"
helm_version: "v2.9.1"
helm_image_repo: "lachlanevenson/k8s-helm"
@@ -160,11 +160,9 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
ingress_nginx_controller_image_tag: "0.15.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4"
-cert_manager_version: "v0.2.4"
+cert_manager_version: "v0.3.0"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
-cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
-cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
downloads:
netcheck_server:
@@ -583,14 +581,6 @@ downloads:
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
- cert_manager_ingress_shim:
- enabled: "{{ cert_manager_enabled }}"
- container: true
- repo: "{{ cert_manager_ingress_shim_image_repo }}"
- tag: "{{ cert_manager_ingress_shim_image_tag }}"
- sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
- groups:
- - kube-node
download_defaults:
container: false
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
index 041214903..750b710f2 100644
--- a/roles/etcd/defaults/main.yml
+++ b/roles/etcd/defaults/main.yml
@@ -3,6 +3,9 @@
etcd_cluster_setup: true
etcd_events_cluster_setup: false
+# Set to true to separate k8s events to a different etcd cluster
+etcd_events_cluster_enabled: false
+
etcd_backup_prefix: "/var/backups"
etcd_data_dir: "/var/lib/etcd"
etcd_events_data_dir: "/var/lib/etcd-events"
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
index dd5b9b630..4b9ab0067 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml
@@ -1,9 +1,12 @@
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: efk
namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: efk
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
index 75d75f650..01e774e96 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml
@@ -6,3 +6,4 @@ metadata:
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
index 4cdcf33ad..51666c1f2 100644
--- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2
@@ -1,15 +1,17 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
-kind: Deployment
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
metadata:
- name: elasticsearch-logging-v1
+ name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: "{{ elasticsearch_image_tag }}"
kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ serviceName: elasticsearch-logging
replicas: 2
selector:
matchLabels:
@@ -53,4 +55,10 @@ spec:
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
+ initContainers:
+ - image: alpine:3.6
+ command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
+ name: elasticsearch-logging-init
+ securityContext:
+ privileged: true
diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
index e8d93732c..0305a5f7a 100644
--- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml
@@ -1,7 +1,7 @@
---
fluentd_cpu_limit: 0m
-fluentd_mem_limit: 200Mi
+fluentd_mem_limit: 500Mi
fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi
-fluentd_config_dir: /etc/kubernetes/fluentd
-fluentd_config_file: fluentd.conf
+fluentd_config_dir: /etc/fluent/config.d
+# fluentd_config_file: fluentd.conf
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
index b7de44dc0..0b0229f69 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2
@@ -1,10 +1,19 @@
+---
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentd-config
namespace: "kube-system"
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
data:
- {{ fluentd_config_file }}: |
+ system.conf: |-
+
+ root_dir /tmp/fluentd-buffers/
+
+
+ containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
@@ -18,7 +27,6 @@ data:
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
- # Maintainer: Jimmi Dyson
#
# Example
# =======
@@ -99,63 +107,87 @@ data:
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
- #
- # TODO: Propagate the labels associated with a container along with its logs
- # so users can query logs using labels as well as or instead of the pod name
- # and container name. This is simply done via configuration of the Kubernetes
- # fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
- # problem yet to be solved as secrets are not usable in static pods which the fluentd
- # pod must be until a per-node controller is available in Kubernetes.
- # Prevent fluentd from handling records containing its own logs. Otherwise
- # it can lead to an infinite loop, when error in sending one message generates
- # another message which also fails to be sent and so on.
-
- type null
-
- # Example:
+
+ # Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+ # CRI Log Example:
+ # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
- type tail
+ @id fluentd-containers.log
+ @type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
- tag kubernetes.*
- format json
+ tag raw.kubernetes.*
read_from_head true
+
+ @type multi_format
+
+ format json
+ time_key time
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+ format /^(?
+
+
+ # Detect exceptions in the log output and forward them as one log entry.
+
+ @id raw.kubernetes
+ @type detect_exceptions
+ remove_tag_prefix raw
+ message log
+ stream stream
+ multiline_flush_interval 5
+ max_bytes 500000
+ max_lines 1000
+
+
+ system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
- type tail
+ @id minion
+ @type tail
format /^(?
+
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
- type tail
+ @id startupscript.log
+ @type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
+
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
- type tail
+ @id docker.log
+ @type tail
format /^time="(?
+
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
- type tail
+ @id etcd.log
+ @type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@@ -163,13 +195,16 @@ data:
pos_file /var/log/es-etcd.log.pos
tag etcd
+
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
+
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
- type tail
+ @id kubelet.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -179,10 +214,12 @@ data:
pos_file /var/log/es-kubelet.log.pos
tag kubelet
+
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
- type tail
+ @id kube-proxy.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -192,10 +229,12 @@ data:
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
+
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
- type tail
+ @id kube-apiserver.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -205,10 +244,12 @@ data:
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
+
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
- type tail
+ @id kube-controller-manager.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -218,10 +259,12 @@ data:
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
+
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
- type tail
+ @id kube-scheduler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -231,10 +274,12 @@ data:
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
+
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
- type tail
+ @id rescheduler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -244,10 +289,12 @@ data:
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
- type tail
+ @id glbc.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -257,10 +304,12 @@ data:
pos_file /var/log/es-glbc.log.pos
tag glbc
+
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
- type tail
+ @id cluster-autoscaler.log
+ @type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@@ -270,59 +319,123 @@ data:
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
+
+ # Logs from systemd-journal for interesting services.
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
+
+ @id journald-docker
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "docker.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag docker
+
+
+ #
+ # @id journald-container-runtime
+ # @type systemd
+ # filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
+ #
+ # @type local
+ # persistent true
+ #
+ # read_from_head true
+ # tag container-runtime
+ #
+
+
+ @id journald-kubelet
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag kubelet
+
+
+
+ @id journald-node-problem-detector
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
+
+ @type local
+ persistent true
+
+ read_from_head true
+ tag node-problem-detector
+
+
+ forward.input.conf: |-
+ # Takes the messages sent over TCP
+
+ @type forward
+
+
+ monitoring.conf: |-
+ # Prometheus Exporter Plugin
+ # input plugin that exports metrics
+
+ @type prometheus
+
+
+
+ @type monitor_agent
+
+
+ # input plugin that collects metrics from MonitorAgent
+
+ @type prometheus_monitor
+
+ host ${hostname}
+
+
+
+ # input plugin that collects metrics for output plugin
+
+ @type prometheus_output_monitor
+
+ host ${hostname}
+
+
+
+ # input plugin that collects metrics for in_tail plugin
+
+ @type prometheus_tail_monitor
+
+ host ${hostname}
+
+
+
+ output.conf: |-
+ # Enriches records with Kubernetes metadata
- type kubernetes_metadata
+ @type kubernetes_metadata
- ## Prometheus Exporter Plugin
- ## input plugin that exports metrics
- #
- # type prometheus
- #
- #
- # type monitor_agent
- #
- #
- # type forward
- #
- ## input plugin that collects metrics from MonitorAgent
- #
- # @type prometheus_monitor
- #
- # host ${hostname}
- #
- #
- ## input plugin that collects metrics for output plugin
- #
- # @type prometheus_output_monitor
- #
- # host ${hostname}
- #
- #
- ## input plugin that collects metrics for in_tail plugin
- #
- # @type prometheus_tail_monitor
- #
- # host ${hostname}
- #
- #
+
- type elasticsearch
- user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
- password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
- log_level info
- include_tag_key true
- host elasticsearch-logging
- port 9200
- logstash_format true
- # Set the chunk limit the same as for fluentd-gcp.
- buffer_chunk_limit 2M
- # Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
- buffer_queue_limit 32
- flush_interval 5s
- # Never wait longer than 5 minutes between retries.
- max_retry_wait 30
- # Disable the limit on the number of retries (retry forever).
- disable_retry_limit
- # Use multiple threads for processing.
- num_threads 8
-
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ host elasticsearch-logging
+ port 9200
+ logstash_format true
+
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+
+
\ No newline at end of file
diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
index f23a8851c..3a911cf38 100644
--- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
+++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
@@ -1,32 +1,42 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
+apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: "fluentd-es-v{{ fluentd_version }}"
+ name: "fluentd-es-{{ fluentd_version }}"
namespace: "kube-system"
labels:
k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ addonmanager.kubernetes.io/mode: Reconcile
spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-es
+ version: "{{ fluentd_version }}"
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
- version: "v{{ fluentd_version }}"
+ version: "{{ fluentd_version }}"
+ # This annotation ensures that fluentd does not get evicted if the node
+ # supports critical pod annotation based priority scheme.
+ # Note that this does not guarantee admission on the nodes (#40573).
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
- tolerations:
- - effect: NoSchedule
- operator: Exists
+ priorityClassName: system-node-critical
+{% if rbac_enabled %}
+ serviceAccountName: efk
+{% endif %}
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
- command:
- - '/bin/sh'
- - '-c'
- - '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
+ env:
+ - name: FLUENTD_ARGS
+ value: "--no-supervisor -q"
resources:
limits:
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
@@ -34,27 +44,26 @@ spec:
{% endif %}
memory: {{ fluentd_mem_limit }}
requests:
- cpu: {{ fluentd_cpu_requests }}
+ cpu: {{ fluentd_cpu_requests }}
memory: {{ fluentd_mem_requests }}
volumeMounts:
- name: varlog
mountPath: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
mountPath: "{{ docker_daemon_graph }}/containers"
readOnly: true
- - name: config
+ - name: config-volume
mountPath: "{{ fluentd_config_dir }}"
+ nodeSelector:
+ beta.kubernetes.io/fluentd-ds-ready: "true"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- - name: dockercontainers
+ - name: varlibdockercontainers
hostPath:
path: {{ docker_daemon_graph }}/containers
- - name: config
- configMap:
- name: fluentd-config
-{% if rbac_enabled %}
- serviceAccountName: efk
-{% endif %}
+ - name: config-volume
+ configMap:
+ name: fluentd-config
\ No newline at end of file
diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
index 0651a032d..c76e3e710 100644
--- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml
+++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml
@@ -4,3 +4,4 @@ kibana_mem_limit: 0M
kibana_cpu_requests: 100m
kibana_mem_requests: 0M
kibana_service_port: 5601
+kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
index c5603d389..880482d4d 100644
--- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
+++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2
@@ -1,6 +1,6 @@
---
-# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
-apiVersion: extensions/v1beta1
+# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
+apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
@@ -36,10 +36,12 @@ spec:
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
-{% if kibana_base_url is defined and kibana_base_url != "" %}
- - name: "KIBANA_BASE_URL"
+ - name: "SERVER_BASEPATH"
value: "{{ kibana_base_url }}"
-{% endif %}
+ - name: XPACK_MONITORING_ENABLED
+ value: "false"
+ - name: XPACK_SECURITY_ENABLED
+ value: "false"
ports:
- containerPort: 5601
name: ui
diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml
index 0bc22739c..2e8174521 100644
--- a/roles/kubernetes-apps/helm/defaults/main.yml
+++ b/roles/kubernetes-apps/helm/defaults/main.yml
@@ -18,3 +18,6 @@ helm_skip_refresh: false
# Override values for the Tiller Deployment manifest.
# tiller_override: "key1=val1,key2=val2"
+
+# Limit the maximum number of revisions saved per release. Use 0 for no limit.
+# tiller_max_history: 0
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index e7b387944..7e400d3fe 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -34,6 +34,7 @@
{% if rbac_enabled %} --service-account=tiller{% endif %}
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
+ {% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
- name: Helm | Set up bash completion
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
index 0d27800b3..3b154656f 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
index 8ac64e35f..38f68cb2f 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
index ce6aa48bf..e7f7aa47b 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
rules:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
index d1e26e462..6cf3c2a31 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
roleRef:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
index 7fe98407b..1760ed4b8 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2
@@ -6,11 +6,15 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:
replicas: 1
+ selector:
+ matchLabels:
+ k8s-app: cert-manager
+ release: cert-manager
template:
metadata:
labels:
@@ -25,6 +29,7 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --cluster-resource-namespace=$(POD_NAMESPACE)
+ - --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: POD_NAMESPACE
valueFrom:
@@ -37,15 +42,3 @@ spec:
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}
-
- - name: ingress-shim
- image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
- imagePullPolicy: {{ k8s_image_pull_policy }}
- resources:
- requests:
- cpu: {{ cert_manager_cpu_requests }}
- memory: {{ cert_manager_memory_requests }}
- limits:
- cpu: {{ cert_manager_cpu_limits }}
- memory: {{ cert_manager_memory_limits }}
-
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
index a11386d10..041b82559 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2
@@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:
diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
index 1a67bf6a4..b96c97a2a 100644
--- a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
+++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2
@@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
- chart: cert-manager-0.2.8
+ chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
index 8553ec5e2..05a3d944e 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml
@@ -1,2 +1,7 @@
---
persistent_volumes_enabled: false
+storage_classes:
+ - name: standard
+ is_default: true
+ parameters:
+ availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
index e4d1b138c..80d5fdd29 100644
--- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml
@@ -1,21 +1,19 @@
---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template:
- src: "{{item.file}}"
- dest: "{{kube_config_dir}}/{{item.file}}"
- with_items:
- - {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
+ src: "openstack-storage-class.yml.j2"
+ dest: "{{kube_config_dir}}/openstack-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
- name: "{{item.item.name}}"
+ name: storage-class
kubectl: "{{bin_dir}}/kubectl"
- resource: "{{item.item.type}}"
- filename: "{{kube_config_dir}}/{{item.item.file}}"
+ resource: StorageClass
+ filename: "{{kube_config_dir}}/openstack-storage-class.yml"
state: "latest"
- with_items: "{{ manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
+ - manifests.changed
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
deleted file mode 100644
index 02d39dd97..000000000
--- a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
-provisioner: kubernetes.io/cinder
-parameters:
- availability: nova
diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2 b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
new file mode 100644
index 000000000..629c1f0a3
--- /dev/null
+++ b/roles/kubernetes-apps/persistent_volumes/openstack/templates/openstack-storage-class.yml.j2
@@ -0,0 +1,14 @@
+{% for class in storage_classes %}
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: "{{ class.name }}"
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
+provisioner: kubernetes.io/cinder
+parameters:
+{% for key, value in (class.parameters | default({})).items() %}
+ "{{ key }}": "{{ value }}"
+{% endfor %}
+{% endfor %}
diff --git a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
index 6616adc6f..b87ec971b 100644
--- a/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
+++ b/roles/kubernetes/master/templates/kube-scheduler-policy.yaml.j2
@@ -2,17 +2,27 @@
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
- {"name" : "PodFitsHostPorts"},
- {"name" : "PodFitsResources"},
+ {"name" : "MaxEBSVolumeCount"},
+ {"name" : "MaxGCEPDVolumeCount"},
+ {"name" : "MaxAzureDiskVolumeCount"},
+ {"name" : "MatchInterPodAffinity"},
{"name" : "NoDiskConflict"},
- {"name" : "MatchNodeSelector"},
- {"name" : "HostName"}
+ {"name" : "GeneralPredicates"},
+ {"name" : "CheckNodeMemoryPressure"},
+ {"name" : "CheckNodeDiskPressure"},
+ {"name" : "CheckNodePIDPressure"},
+ {"name" : "CheckNodeCondition"},
+ {"name" : "PodToleratesNodeTaints"},
+ {"name" : "CheckVolumeBinding"}
],
"priorities" : [
+ {"name" : "SelectorSpreadPriority", "weight" : 1},
+ {"name" : "InterPodAffinityPriority", "weight" : 1},
{"name" : "LeastRequestedPriority", "weight" : 1},
{"name" : "BalancedResourceAllocation", "weight" : 1},
- {"name" : "ServiceSpreadingPriority", "weight" : 1},
- {"name" : "EqualPriority", "weight" : 1}
+ {"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
+ {"name" : "NodeAffinityPriority", "weight" : 1},
+ {"name" : "TaintTolerationPriority", "weight" : 1}
],
"hardPodAffinitySymmetricWeight" : 10
}
diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
index 475d2d0ae..32e6071b6 100644
--- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
+++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2
@@ -39,7 +39,7 @@ apiServerExtraArgs:
{% if kube_version | version_compare('v1.9', '>=') %}
endpoint-reconciler-type: lease
{% endif %}
-{% if etcd_events_cluster_setup %}
+{% if etcd_events_cluster_enabled %}
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index b638ff457..c688e1285 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -30,7 +30,7 @@ spec:
- apiserver
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }}
-{% if etcd_events_cluster_setup %}
+{% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
{% endif %}
{% if kube_version | version_compare('v1.9', '<') %}
diff --git a/roles/kubernetes/node/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
index d82d72bf8..1383f78bb 100644
--- a/roles/kubernetes/node/templates/vsphere-cloud-config.j2
+++ b/roles/kubernetes/node/templates/vsphere-cloud-config.j2
@@ -14,6 +14,9 @@ server = "{{ vsphere_vcenter_ip }}"
{% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %}
vm-uuid = "{{ vsphere_vm_uuid }}"
{% endif %}
+{% if vsphere_vm_name is defined and vsphere_vm_name != "" %}
+vm-name = "{{ vsphere_vm_name }}"
+{% endif %}
{% endif %}
{% if kube_version | version_compare('v1.9.2', '>=') %}
diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml
index fcbea6404..75fafaf56 100644
--- a/roles/kubernetes/preinstall/tasks/main.yml
+++ b/roles/kubernetes/preinstall/tasks/main.yml
@@ -1,4 +1,8 @@
---
+# Disable swap
+- import_tasks: swapoff.yml
+ when: disable_swap
+
- import_tasks: verify-settings.yml
tags:
- asserts
diff --git a/roles/kubernetes/preinstall/tasks/swapoff.yml b/roles/kubernetes/preinstall/tasks/swapoff.yml
new file mode 100644
index 000000000..345e75825
--- /dev/null
+++ b/roles/kubernetes/preinstall/tasks/swapoff.yml
@@ -0,0 +1,10 @@
+---
+- name: Remove swapfile from /etc/fstab
+ mount:
+ name: swap
+ fstype: swap
+ state: absent
+
+- name: Disable swap
+ command: swapoff -a
+ when: ansible_swaptotal_mb > 0
diff --git a/roles/kubernetes/preinstall/tasks/verify-settings.yml b/roles/kubernetes/preinstall/tasks/verify-settings.yml
index 2e50ff2cb..18cec48d8 100644
--- a/roles/kubernetes/preinstall/tasks/verify-settings.yml
+++ b/roles/kubernetes/preinstall/tasks/verify-settings.yml
@@ -17,13 +17,13 @@
- name: Stop if unknown network plugin
assert:
- that: network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud']
- when: network_plugin is defined
+ that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'contiv']
+ when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if incompatible network plugin and cloudprovider
assert:
- that: network_plugin != 'calico'
+ that: kube_network_plugin != 'calico'
msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details."
when: cloud_provider is defined and cloud_provider == 'azure'
ignore_errors: "{{ ignore_assert_errors }}"
diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml
index e8d3e9e94..63b7e7db2 100644
--- a/roles/kubernetes/secrets/tasks/check-certs.yml
+++ b/roles/kubernetes/secrets/tasks/check-certs.yml
@@ -33,14 +33,14 @@
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
- '{{ kube_cert_dir }}/admin-{{ host }}.pem'
+ '{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
- {% endfor %}]
+ {% endfor %},
{% for host in groups['k8s-cluster'] %}
- '{{ kube_cert_dir }}/node-{{ host }}.pem'
- '{{ kube_cert_dir }}/node-{{ host }}-key.pem'
- '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem'
+ '{{ kube_cert_dir }}/node-{{ host }}.pem',
+ '{{ kube_cert_dir }}/node-{{ host }}-key.pem',
+ '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml
index 61fb62a09..2394ec4b7 100644
--- a/roles/kubespray-defaults/defaults/main.yaml
+++ b/roles/kubespray-defaults/defaults/main.yaml
@@ -12,6 +12,8 @@ kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
+# optional disable the swap
+disable_swap: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.10.4
@@ -210,7 +212,7 @@ authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint
-kubelet_authentication_token_webhook: false
+kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
@@ -314,7 +316,7 @@ kube_apiserver_client_key: |-
{%- endif %}
# Set to true to deploy etcd-events cluster
-etcd_events_cluster_setup: false
+etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml
index 857ebd11a..553eb6753 100644
--- a/roles/network_plugin/calico/defaults/main.yml
+++ b/roles/network_plugin/calico/defaults/main.yml
@@ -51,3 +51,5 @@ rbac_resources:
# * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
# calico_ip_auto_method: "interface=eth.*"
+
+calico_baremetal_nodename: "{{ inventory_hostname }}"
diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
index 6dd51e912..443e3b43b 100644
--- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2
+++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2
@@ -6,7 +6,7 @@
{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
- "nodename": "{{ inventory_hostname }}",
+ "nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
"type": "calico",
"etcd_endpoints": "{{ etcd_access_addresses }}",
diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml
index 395f9986b..30c75d1b4 100644
--- a/roles/remove-node/post-remove/tasks/main.yml
+++ b/roles/remove-node/post-remove/tasks/main.yml
@@ -4,5 +4,6 @@
command: kubectl delete node {{ item }}
with_items:
- "{{ groups['kube-node'] }}"
- delegate_to: "{{ groups['kube-master'][0] }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml
index 12091917a..836309bbf 100644
--- a/roles/remove-node/pre-remove/tasks/main.yml
+++ b/roles/remove-node/pre-remove/tasks/main.yml
@@ -11,5 +11,6 @@
with_items:
- "{{ groups['kube-node'] }}"
failed_when: false
- delegate_to: "{{ groups['kube-master'][0] }}"
+ delegate_to: "{{ groups['kube-master']|first }}"
+ run_once: true
ignore_errors: yes
diff --git a/roles/rkt/tasks/main.yml b/roles/rkt/tasks/main.yml
index ab9571b13..d84a2165d 100644
--- a/roles/rkt/tasks/main.yml
+++ b/roles/rkt/tasks/main.yml
@@ -2,3 +2,4 @@
- name: Install rkt
import_tasks: install.yml
+ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
diff --git a/roles/vault/templates/rkt.service.j2 b/roles/vault/templates/rkt.service.j2
index 6a4c3d77a..e92221161 100644
--- a/roles/vault/templates/rkt.service.j2
+++ b/roles/vault/templates/rkt.service.j2
@@ -12,26 +12,34 @@ LimitNOFILE=40000
# Container has the following internal mount points:
# /vault/file/ # File backend storage location
# /vault/logs/ # Log files
+ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
+
ExecStart=/usr/bin/rkt run \
---insecure-options=image \
---volume hosts,kind=host,source=/etc/hosts,readOnly=true \
---mount volume=hosts,target=/etc/hosts \
---volume=volume-vault-file,kind=host,source=/var/lib/vault \
---volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
---volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
---mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
---volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
---mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
---volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
---mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
---volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
---mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
---volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
---mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
-docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
---name={{ vault_container_name }} --net=host \
---caps-retain=CAP_IPC_LOCK \
---exec vault -- server --config={{ vault_config_dir }}/config.json
+ --insecure-options=image \
+ --volume hosts,kind=host,source=/etc/hosts,readOnly=true \
+ --mount volume=hosts,target=/etc/hosts \
+ --volume=volume-vault-file,kind=host,source=/var/lib/vault \
+ --volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
+ --volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
+ --mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
+ --volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
+ --mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
+ --volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
+ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
+ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
+ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
+ --volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
+ --mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
+ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
+ --uuid-file-save=/var/run/vault.uuid \
+ --name={{ vault_container_name }} \
+ --net=host \
+ --caps-retain=CAP_IPC_LOCK \
+ --exec vault -- \
+ server \
+ --config={{ vault_config_dir }}/config.json
+
+ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install]
WantedBy=multi-user.target