diff --git a/README.md b/README.md index 0846f1f3e..baa1a0d45 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ To deploy the cluster you can use : For Vagrant we need to install python dependencies for provisioning tasks. Check if Python and pip are installed: - python -v && pip -v + python -V && pip -V If this returns the version of the software, you're good to go. If not, download and install Python from here Install the necessary requirements @@ -103,6 +103,7 @@ Supported Components - [weave](https://github.com/weaveworks/weave) v2.3.0 - Application - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0 + - [cert-manager](https://github.com/jetstack/cert-manager/releases) v0.3.0 Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). diff --git a/cluster.yml b/cluster.yml index 9bfd2ff42..c77e9e1b5 100644 --- a/cluster.yml +++ b/cluster.yml @@ -51,7 +51,7 @@ any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - { role: kubespray-defaults} - - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: true } + - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" } - hosts: k8s-cluster:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" diff --git a/docs/dns-stack.md b/docs/dns-stack.md index 1deb88776..92689eee5 100644 --- a/docs/dns-stack.md +++ b/docs/dns-stack.md @@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d ## dns_mode ``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available: -#### dnsmasq_kubedns (default) +#### dnsmasq_kubedns This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns. It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver`` -#### kubedns +#### kubedns (default) This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for all queries. diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index c523dd5ec..68ed6c1bc 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -140,6 +140,9 @@ dns_domain: "{{ cluster_name }}" # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" +## Used to set docker daemon iptables options to true +#docker_iptables_enabled: "true" + ## A string of extra options to pass to the docker daemon. ## This string should be exactly as you wish it to appear. ## An obvious use case is allowing insecure-registry access diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 1013523c9..4a3b24f80 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -17,7 +17,7 @@ dockerproject_repo_key_info: dockerproject_repo_info: repos: -docker_dns_servers_strict: yes +docker_dns_servers_strict: true docker_container_storage_setup: false @@ -40,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/ dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo' dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg' + +# Used to set docker daemon iptables options +docker_iptables_enabled: "false" diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 6fe516c2d..57e12a8ed 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -56,7 +56,7 @@ - name: check number of nameservers fail: - msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3." + msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3." when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool - name: rtrim number of nameservers to 3 diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2 index f1587ec4d..296f5a8a1 100644 --- a/roles/docker/templates/docker-options.conf.j2 +++ b/roles/docker/templates/docker-options.conf.j2 @@ -1,5 +1,5 @@ [Service] -Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables=false" +Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}" {% if docker_mount_flags is defined and docker_mount_flags != "" %} MountFlags={{ docker_mount_flags }} {% endif %} diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index f07d40483..8eee9fd2f 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -132,14 +132,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}" test_image_repo: busybox test_image_tag: latest -elasticsearch_version: "v2.4.1" -elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch" +elasticsearch_version: "v5.6.4" +elasticsearch_image_repo: "k8s.gcr.io/elasticsearch" elasticsearch_image_tag: "{{ elasticsearch_version }}" -fluentd_version: "1.22" -fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch" +fluentd_version: "v2.0.4" +fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch" fluentd_image_tag: "{{ fluentd_version }}" -kibana_version: "v4.6.1" -kibana_image_repo: "gcr.io/google_containers/kibana" +kibana_version: "5.6.4" +kibana_image_repo: "docker.elastic.co/kibana/kibana" kibana_image_tag: "{{ kibana_version }}" helm_version: "v2.9.1" helm_image_repo: "lachlanevenson/k8s-helm" @@ -160,11 +160,9 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin ingress_nginx_controller_image_tag: "0.15.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" -cert_manager_version: "v0.2.4" +cert_manager_version: "v0.3.0" cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" cert_manager_controller_image_tag: "{{ cert_manager_version }}" -cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" -cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}" downloads: netcheck_server: @@ -583,14 +581,6 @@ downloads: sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" groups: - kube-node - cert_manager_ingress_shim: - enabled: "{{ cert_manager_enabled }}" - container: true - repo: "{{ cert_manager_ingress_shim_image_repo }}" - tag: "{{ cert_manager_ingress_shim_image_tag }}" - sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}" - groups: - - kube-node download_defaults: container: false diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 041214903..750b710f2 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -3,6 +3,9 @@ etcd_cluster_setup: true etcd_events_cluster_setup: false +# Set to true to separate k8s events to a different etcd cluster +etcd_events_cluster_enabled: false + etcd_backup_prefix: "/var/backups" etcd_data_dir: "/var/lib/etcd" etcd_events_data_dir: "/var/lib/etcd-events" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index dd5b9b630..4b9ab0067 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -1,9 +1,12 @@ --- kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: efk namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile subjects: - kind: ServiceAccount name: efk diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index 75d75f650..01e774e96 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -6,3 +6,4 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 index 4cdcf33ad..51666c1f2 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 @@ -1,15 +1,17 @@ --- -# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml -apiVersion: extensions/v1beta1 -kind: Deployment +# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet metadata: - name: elasticsearch-logging-v1 + name: elasticsearch-logging namespace: kube-system labels: k8s-app: elasticsearch-logging version: "{{ elasticsearch_image_tag }}" kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: + serviceName: elasticsearch-logging replicas: 2 selector: matchLabels: @@ -53,4 +55,10 @@ spec: {% if rbac_enabled %} serviceAccountName: efk {% endif %} + initContainers: + - image: alpine:3.6 + command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] + name: elasticsearch-logging-init + securityContext: + privileged: true diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml index e8d93732c..0305a5f7a 100644 --- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml @@ -1,7 +1,7 @@ --- fluentd_cpu_limit: 0m -fluentd_mem_limit: 200Mi +fluentd_mem_limit: 500Mi fluentd_cpu_requests: 100m fluentd_mem_requests: 200Mi -fluentd_config_dir: /etc/kubernetes/fluentd -fluentd_config_file: fluentd.conf +fluentd_config_dir: /etc/fluent/config.d +# fluentd_config_file: fluentd.conf diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 index b7de44dc0..0b0229f69 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 @@ -1,10 +1,19 @@ +--- +# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: fluentd-config namespace: "kube-system" + labels: + addonmanager.kubernetes.io/mode: Reconcile data: - {{ fluentd_config_file }}: | + system.conf: |- + + root_dir /tmp/fluentd-buffers/ + + + containers.input.conf: |- # This configuration file for Fluentd / td-agent is used # to watch changes to Docker log files. The kubelet creates symlinks that # capture the pod name, namespace, container name & Docker container ID @@ -18,7 +27,6 @@ data: # See https://github.com/uken/fluent-plugin-elasticsearch & # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for # more information about the plugins. - # Maintainer: Jimmi Dyson # # Example # ======= @@ -99,63 +107,87 @@ data: # This makes it easier for users to search for logs by pod name or by # the name of the Kubernetes container regardless of how many times the # Kubernetes pod has been restarted (resulting in a several Docker container IDs). - # - # TODO: Propagate the labels associated with a container along with its logs - # so users can query logs using labels as well as or instead of the pod name - # and container name. This is simply done via configuration of the Kubernetes - # fluentd plugin but requires secrets to be enabled in the fluent pod. This is a - # problem yet to be solved as secrets are not usable in static pods which the fluentd - # pod must be until a per-node controller is available in Kubernetes. - # Prevent fluentd from handling records containing its own logs. Otherwise - # it can lead to an infinite loop, when error in sending one message generates - # another message which also fails to be sent and so on. - - type null - - # Example: + + # Json Log Example: # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} + # CRI Log Example: + # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here - type tail + @id fluentd-containers.log + @type tail path /var/log/containers/*.log pos_file /var/log/es-containers.log.pos time_format %Y-%m-%dT%H:%M:%S.%NZ - tag kubernetes.* - format json + tag raw.kubernetes.* read_from_head true + + @type multi_format + + format json + time_key time + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format /^(? + + + # Detect exceptions in the log output and forward them as one log entry. + + @id raw.kubernetes + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + + system.input.conf: |- # Example: # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 - type tail + @id minion + @type tail format /^(?