mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-05-08 09:57:38 -02:30
Merge branch 'master' into multi-arch-support
This commit is contained in:
@@ -4,3 +4,6 @@ pip_python_coreos_modules:
|
||||
- six
|
||||
|
||||
override_system_hostname: true
|
||||
|
||||
|
||||
coreos_auto_upgrade: true
|
||||
|
||||
@@ -18,7 +18,11 @@ mv -n pypy-$PYPY_VERSION-linux64 pypy
|
||||
|
||||
## library fixup
|
||||
mkdir -p pypy/lib
|
||||
ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
if [ -f /lib64/libncurses.so.5.9 ]; then
|
||||
ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
elif [ -f /lib64/libncurses.so.6.1 ]; then
|
||||
ln -snf /lib64/libncurses.so.6.1 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
fi
|
||||
|
||||
cat > $BINDIR/python <<EOF
|
||||
#!/bin/bash
|
||||
|
||||
@@ -62,3 +62,8 @@
|
||||
with_items: "{{pip_python_coreos_modules}}"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"
|
||||
|
||||
- name: Bootstrap | Disable auto-upgrade
|
||||
shell: "systemctl stop locksmithd.service && systemctl mask --now locksmithd.service"
|
||||
when:
|
||||
- not coreos_auto_upgrade
|
||||
|
||||
@@ -17,7 +17,7 @@ dockerproject_repo_key_info:
|
||||
dockerproject_repo_info:
|
||||
repos:
|
||||
|
||||
docker_dns_servers_strict: yes
|
||||
docker_dns_servers_strict: true
|
||||
|
||||
docker_container_storage_setup: false
|
||||
|
||||
@@ -40,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/
|
||||
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
||||
|
||||
# Used to set docker daemon iptables options
|
||||
docker_iptables_enabled: "false"
|
||||
|
||||
@@ -9,10 +9,10 @@ docker_container_storage_setup_container_thinpool: docker-pool
|
||||
docker_container_storage_setup_data_size: 40%FREE
|
||||
docker_container_storage_setup_min_data_size: 2G
|
||||
docker_container_storage_setup_chunk_size: 512K
|
||||
docker_container_storage_setup_growpart: false
|
||||
docker_container_storage_setup_auto_extend_pool: yes
|
||||
docker_container_storage_setup_growpart: "false"
|
||||
docker_container_storage_setup_auto_extend_pool: "yes"
|
||||
docker_container_storage_setup_pool_autoextend_threshold: 60
|
||||
docker_container_storage_setup_pool_autoextend_percent: 20
|
||||
docker_container_storage_setup_device_wait_timeout: 60
|
||||
docker_container_storage_setup_wipe_signatures: false
|
||||
docker_container_storage_setup_wipe_signatures: "false"
|
||||
docker_container_storage_setup_container_root_lv_size: 40%FREE
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||
- "{{ ansible_distribution|lower }}.yml"
|
||||
- "{{ ansible_os_family|lower }}-{{ ansible_architecture }}.yml"
|
||||
- "{{ ansible_os_family|lower }}.yml"
|
||||
- defaults.yml
|
||||
paths:
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
with_items:
|
||||
- docker
|
||||
- docker-engine
|
||||
- docker.io
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||
@@ -19,6 +20,12 @@
|
||||
- docker-common
|
||||
- docker-engine
|
||||
- docker-selinux
|
||||
- docker-client
|
||||
- docker-client-latest
|
||||
- docker-latest
|
||||
- docker-latest-logrotate
|
||||
- docker-logrotate
|
||||
- docker-engine-selinux
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
- name: add upstream dns servers (only when dnsmasq is not used)
|
||||
set_fact:
|
||||
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
|
||||
when: dns_mode in ['kubedns', 'coredns', 'coreos_dual']
|
||||
when: dns_mode in ['kubedns', 'coredns', 'coredns_dual']
|
||||
|
||||
- name: add global searchdomains
|
||||
set_fact:
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
- name: check number of nameservers
|
||||
fail:
|
||||
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
|
||||
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
|
||||
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
|
||||
|
||||
- name: rtrim number of nameservers to 3
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
[Service]
|
||||
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
|
||||
--iptables=false"
|
||||
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
|
||||
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
|
||||
MountFlags={{ docker_mount_flags }}
|
||||
{% endif %}
|
||||
|
||||
@@ -9,6 +9,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'17.09': docker-ce=17.09.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
|
||||
28
roles/docker/vars/redhat-aarch64.yml
Normal file
28
roles/docker/vars/redhat-aarch64.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# overide defaults, missing 17.03 for aarch64
|
||||
docker_version: '1.13'
|
||||
|
||||
# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
|
||||
# or do 'yum --showduplicates list docker'
|
||||
docker_versioned_pkg:
|
||||
'latest': docker
|
||||
'1.12': docker-1.12.6-48.git0fdc778.el7
|
||||
'1.13': docker-1.13.1-63.git94f4240.el7
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||
# http://mirror.centos.org/altarch/7/extras/aarch64/Packages/
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
||||
@@ -11,6 +11,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||
'1.13': docker-engine-1.13.1-1.el7.centos
|
||||
'17.03': docker-ce-17.03.2.ce-1.el7.centos
|
||||
'17.09': docker-ce-17.09.0.ce-1.el7.centos
|
||||
'stable': docker-ce-17.03.2.ce-1.el7.centos
|
||||
'edge': docker-ce-17.12.1.ce-1.el7.centos
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'17.09': docker-ce=17.09.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
|
||||
image_arch: amd64
|
||||
|
||||
# Versions
|
||||
kube_version: v1.10.2
|
||||
kube_version: v1.11.2
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
etcd_version: v3.2.16
|
||||
etcd_version: v3.2.18
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: "v2.6.8"
|
||||
@@ -39,21 +39,18 @@ calico_policy_version: "v1.0.3"
|
||||
calico_rr_version: "v0.4.2"
|
||||
flannel_version: "v0.10.0"
|
||||
flannel_cni_version: "v0.3.0"
|
||||
istio_version: "0.2.6"
|
||||
vault_version: 0.10.1
|
||||
weave_version: 2.3.0
|
||||
weave_version: "2.4.0"
|
||||
pod_infra_version: 3.0
|
||||
contiv_version: 1.1.7
|
||||
cilium_version: "v1.0.0-rc8"
|
||||
cilium_version: "v1.1.2"
|
||||
|
||||
# Download URLs
|
||||
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
||||
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
|
||||
# Checksums
|
||||
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
|
||||
kubeadm_checksum: 394d7d340214c91d669186cf4f2110d8eb840ca965399b4d8b22d0545a60e377
|
||||
kubeadm_checksum: 6b17720a65b8ff46efe92a5544f149c39a221910d89939838d75581d4e6924c0
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
|
||||
# Containers
|
||||
@@ -73,22 +70,6 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||
istio_proxy_image_repo: docker.io/istio/proxy
|
||||
istio_proxy_image_tag: "{{ istio_version }}"
|
||||
istio_proxy_init_image_repo: docker.io/istio/proxy_init
|
||||
istio_proxy_init_image_tag: "{{ istio_version }}"
|
||||
istio_ca_image_repo: docker.io/istio/istio-ca
|
||||
istio_ca_image_tag: "{{ istio_version }}"
|
||||
istio_mixer_image_repo: docker.io/istio/mixer
|
||||
istio_mixer_image_tag: "{{ istio_version }}"
|
||||
istio_pilot_image_repo: docker.io/istio/pilot
|
||||
istio_pilot_image_tag: "{{ istio_version }}"
|
||||
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
|
||||
istio_proxy_debug_image_tag: "{{ istio_version }}"
|
||||
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
|
||||
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
|
||||
istio_statsd_image_repo: prom/statsd-exporter
|
||||
istio_statsd_image_tag: latest
|
||||
hyperkube_image_repo: "gcr.io/google-containers/hyperkube-{{ image_arch }}"
|
||||
hyperkube_image_tag: "{{ kube_version }}"
|
||||
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
|
||||
@@ -120,7 +101,7 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||
kubedns_version: 1.14.10
|
||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
|
||||
kubedns_image_tag: "{{ kubedns_version }}"
|
||||
coredns_version: 1.1.2
|
||||
coredns_version: 1.2.0
|
||||
coredns_image_repo: "docker.io/coredns/coredns"
|
||||
coredns_image_tag: "{{ coredns_version }}"
|
||||
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny--{{ image_arch }}"
|
||||
@@ -135,14 +116,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
|
||||
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
|
||||
test_image_repo: busybox
|
||||
test_image_tag: latest
|
||||
elasticsearch_version: "v2.4.1"
|
||||
elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
|
||||
elasticsearch_version: "v5.6.4"
|
||||
elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
|
||||
elasticsearch_image_tag: "{{ elasticsearch_version }}"
|
||||
fluentd_version: "1.22"
|
||||
fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
|
||||
fluentd_version: "v2.0.4"
|
||||
fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
|
||||
fluentd_image_tag: "{{ fluentd_version }}"
|
||||
kibana_version: "v4.6.1"
|
||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||
kibana_version: "5.6.4"
|
||||
kibana_image_repo: "docker.elastic.co/kibana/kibana"
|
||||
kibana_image_tag: "{{ kibana_version }}"
|
||||
helm_version: "v2.9.1"
|
||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||
@@ -156,18 +137,16 @@ registry_image_tag: "2.6"
|
||||
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
|
||||
registry_proxy_image_tag: "0.4"
|
||||
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
|
||||
local_volume_provisioner_image_tag: "v2.0.0"
|
||||
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
|
||||
cephfs_provisioner_image_tag: "a71a49d4"
|
||||
local_volume_provisioner_image_tag: "v2.1.0"
|
||||
cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
|
||||
cephfs_provisioner_image_tag: "v1.1.0-k8s1.10"
|
||||
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
|
||||
ingress_nginx_controller_image_tag: "0.14.0"
|
||||
ingress_nginx_controller_image_tag: "0.18.0"
|
||||
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
||||
ingress_nginx_default_backend_image_tag: "1.4"
|
||||
cert_manager_version: "v0.2.4"
|
||||
cert_manager_version: "v0.4.1"
|
||||
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
|
||||
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
|
||||
|
||||
downloads:
|
||||
netcheck_server:
|
||||
@@ -207,83 +186,6 @@ downloads:
|
||||
mode: "0755"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
istioctl:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
file: true
|
||||
version: "{{ istio_version }}"
|
||||
dest: "istio/istioctl"
|
||||
sha256: "{{ istioctl_checksum }}"
|
||||
source_url: "{{ istioctl_download_url }}"
|
||||
url: "{{ istioctl_download_url }}"
|
||||
unarchive: false
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
- kube-master
|
||||
istio_proxy:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_image_repo }}"
|
||||
tag: "{{ istio_proxy_image_tag }}"
|
||||
sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_proxy_init:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_init_image_repo }}"
|
||||
tag: "{{ istio_proxy_init_image_tag }}"
|
||||
sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_ca:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_ca_image_repo }}"
|
||||
tag: "{{ istio_ca_image_tag }}"
|
||||
sha256: "{{ istio_ca_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_mixer:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_mixer_image_repo }}"
|
||||
tag: "{{ istio_mixer_image_tag }}"
|
||||
sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_pilot:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_pilot_image_repo }}"
|
||||
tag: "{{ istio_pilot_image_tag }}"
|
||||
sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_proxy_debug:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_debug_image_repo }}"
|
||||
tag: "{{ istio_proxy_debug_image_tag }}"
|
||||
sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_sidecar_initializer:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_sidecar_initializer_image_repo }}"
|
||||
tag: "{{ istio_sidecar_initializer_image_tag }}"
|
||||
sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_statsd:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_statsd_image_repo }}"
|
||||
tag: "{{ istio_statsd_image_tag }}"
|
||||
sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
hyperkube:
|
||||
enabled: true
|
||||
container: true
|
||||
@@ -569,7 +471,7 @@ downloads:
|
||||
tag: "{{ ingress_nginx_controller_image_tag }}"
|
||||
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-ingress
|
||||
- kube-node
|
||||
ingress_nginx_default_backend:
|
||||
enabled: "{{ ingress_nginx_enabled }}"
|
||||
container: true
|
||||
@@ -577,7 +479,7 @@ downloads:
|
||||
tag: "{{ ingress_nginx_default_backend_image_tag }}"
|
||||
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-ingress
|
||||
- kube-node
|
||||
cert_manager_controller:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
@@ -586,14 +488,6 @@ downloads:
|
||||
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
cert_manager_ingress_shim:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
repo: "{{ cert_manager_ingress_shim_image_repo }}"
|
||||
tag: "{{ cert_manager_ingress_shim_image_tag }}"
|
||||
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
download_defaults:
|
||||
container: false
|
||||
|
||||
@@ -20,6 +20,6 @@
|
||||
when:
|
||||
- not skip_downloads|default(false)
|
||||
- item.value.enabled
|
||||
- item.value.container
|
||||
- "{{ item.value.container | default(False) }}"
|
||||
- download_run_once
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
- name: Register docker images info
|
||||
raw: >-
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} if .RepoTags {{ '}}' }}{{ '{{' }} (index .RepoTags 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}{{ '{{' }} if .RepoDigests {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}" | tr '\n' ','
|
||||
no_log: true
|
||||
register: docker_images
|
||||
failed_when: false
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: false
|
||||
|
||||
# Set to true to separate k8s events to a different etcd cluster
|
||||
etcd_events_cluster_enabled: false
|
||||
|
||||
etcd_backup_prefix: "/var/backups"
|
||||
etcd_data_dir: "/var/lib/etcd"
|
||||
etcd_events_data_dir: "/var/lib/etcd-events"
|
||||
|
||||
@@ -95,4 +95,9 @@ if [ -n "$HOSTS" ]; then
|
||||
fi
|
||||
|
||||
# Install certs
|
||||
if [ -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# No pass existing CA
|
||||
rm -f ca.pem ca-key.pem
|
||||
fi
|
||||
|
||||
mv *.pem ${SSLDIR}/
|
||||
|
||||
@@ -62,5 +62,3 @@
|
||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- fail:
|
||||
|
||||
@@ -19,11 +19,17 @@
|
||||
register: "etcd_client_cert_serial_result"
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
|
||||
- name: Set etcd_client_cert_serial
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
|
||||
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
||||
when: is_etcd_master
|
||||
|
||||
@@ -8,13 +8,15 @@
|
||||
"member-" + inventory_hostname + ".pem"
|
||||
] }}
|
||||
|
||||
#- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
# vars:
|
||||
# sync_file: "{{ item }}"
|
||||
# sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
# sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
# sync_file_is_cert: true
|
||||
# with_items: "{{ etcd_master_cert_list|d([]) }}"
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_master_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
@@ -22,16 +24,16 @@
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
#- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
|
||||
# set_fact:
|
||||
# sync_file_results: []
|
||||
#
|
||||
#- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
# vars:
|
||||
# sync_file: ca.pem
|
||||
# sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
# sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
#
|
||||
#- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
|
||||
# set_fact:
|
||||
# sync_file_results: []
|
||||
- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
|
||||
- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
@@ -4,30 +4,30 @@
|
||||
set_fact:
|
||||
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
#- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
# vars:
|
||||
# sync_file: "{{ item }}"
|
||||
# sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
# sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
# sync_file_is_cert: true
|
||||
# with_items: "{{ etcd_node_cert_list|d([]) }}"
|
||||
#
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_node_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
etcd_node_certs_needed: "{{ etcd_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
#- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
|
||||
# set_fact:
|
||||
# sync_file_results: []
|
||||
#
|
||||
#- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
# vars:
|
||||
# sync_file: ca.pem
|
||||
# sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
# sync_file_hosts: "{{ groups['etcd'] }}"
|
||||
#
|
||||
#- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
|
||||
# set_fact:
|
||||
# sync_file_results: []
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups['etcd'] }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
31
roles/etcd/templates/etcd-events-rkt.service.j2
Normal file
31
roles/etcd/templates/etcd-events-rkt.service.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
[Unit]
|
||||
Description=etcd events rkt wrapper
|
||||
Documentation=https://github.com/coreos/etcd
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
TimeoutStartSec=0
|
||||
LimitNOFILE=40000
|
||||
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--uuid-file-save=/var/run/etcd-events.uuid \
|
||||
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
|
||||
--mount volume=hosts,target=/etc/hosts \
|
||||
--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
|
||||
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
||||
--volume=etcd-data-dir,kind=host,source={{ etcd_events_data_dir }},readOnly=false \
|
||||
--mount=volume=etcd-data-dir,target={{ etcd_events_data_dir }} \
|
||||
--set-env-file=/etc/etcd-events.env \
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||
--name={{ etcd_member_name | default("etcd-events") }}
|
||||
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd-events.uuid
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd-events.uuid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -60,6 +60,9 @@ dashboard_certs_secret_name: kubernetes-dashboard-certs
|
||||
dashboard_tls_key_file: dashboard.key
|
||||
dashboard_tls_cert_file: dashboard.crt
|
||||
|
||||
# Override dashboard default settings
|
||||
dashboard_token_ttl: 900
|
||||
|
||||
# SSL
|
||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
- rbac_enabled or item.type not in rbac_resources
|
||||
tags:
|
||||
- dnsmasq
|
||||
- kubedns
|
||||
|
||||
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
|
||||
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
|
||||
@@ -39,3 +40,4 @@
|
||||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
||||
tags:
|
||||
- dnsmasq
|
||||
- kubedns
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
- dnsmasq
|
||||
- coredns
|
||||
- kubedns
|
||||
|
||||
- name: Kubernetes Apps | CoreDNS
|
||||
import_tasks: "tasks/coredns.yml"
|
||||
@@ -56,6 +59,8 @@
|
||||
delay: 5
|
||||
tags:
|
||||
- dnsmasq
|
||||
- coredns
|
||||
- kubedns
|
||||
|
||||
- name: Kubernetes Apps | Netchecker
|
||||
import_tasks: tasks/netchecker.yml
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
- name: Kubernetes Apps | Check if netchecker-server manifest already exists
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2"
|
||||
path: "{{ kube_config_dir }}/netchecker-server-deployment.yml"
|
||||
register: netchecker_server_manifest
|
||||
tags:
|
||||
- facts
|
||||
@@ -22,16 +22,16 @@
|
||||
|
||||
- name: Kubernetes Apps | Lay Down Netchecker Template
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
src: "{{item.file}}.j2"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
|
||||
- {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
|
||||
- {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server}
|
||||
- {file: netchecker-server-deployment.yml.j2, type: deployment, name: netchecker-server}
|
||||
- {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
|
||||
- {file: netchecker-agent-ds.yml, type: ds, name: netchecker-agent}
|
||||
- {file: netchecker-agent-hostnet-ds.yml, type: ds, name: netchecker-agent-hostnet}
|
||||
- {file: netchecker-server-sa.yml, type: sa, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrole.yml, type: clusterrole, name: netchecker-server}
|
||||
- {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
|
||||
- {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
|
||||
- {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
|
||||
register: manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -11,7 +11,7 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa {
|
||||
kubernetes {{ dns_domain }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
|
||||
@@ -34,6 +34,22 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
|
||||
|
||||
@@ -166,6 +166,7 @@ spec:
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --token-ttl={{ dashboard_token_ttl }}
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
|
||||
@@ -30,7 +30,24 @@ spec:
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
operator: Equal
|
||||
key: node-role.kubernetes.io/master
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: kubedns-autoscaler
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
|
||||
|
||||
@@ -30,8 +30,25 @@ spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
key: "node-role.kubernetes.io/master"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: efk
|
||||
|
||||
@@ -6,3 +6,4 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging-v1
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
serviceName: elasticsearch-logging
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -53,4 +55,10 @@ spec:
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
initContainers:
|
||||
- image: alpine:3.6
|
||||
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
||||
name: elasticsearch-logging-init
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
fluentd_cpu_limit: 0m
|
||||
fluentd_mem_limit: 200Mi
|
||||
fluentd_mem_limit: 500Mi
|
||||
fluentd_cpu_requests: 100m
|
||||
fluentd_mem_requests: 200Mi
|
||||
fluentd_config_dir: /etc/kubernetes/fluentd
|
||||
fluentd_config_file: fluentd.conf
|
||||
fluentd_config_dir: /etc/fluent/config.d
|
||||
# fluentd_config_file: fluentd.conf
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: fluentd-config
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
{{ fluentd_config_file }}: |
|
||||
system.conf: |-
|
||||
<system>
|
||||
root_dir /tmp/fluentd-buffers/
|
||||
</system>
|
||||
|
||||
containers.input.conf: |-
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
# to watch changes to Docker log files. The kubelet creates symlinks that
|
||||
# capture the pod name, namespace, container name & Docker container ID
|
||||
@@ -18,7 +27,6 @@ data:
|
||||
# See https://github.com/uken/fluent-plugin-elasticsearch &
|
||||
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
|
||||
# more information about the plugins.
|
||||
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
|
||||
#
|
||||
# Example
|
||||
# =======
|
||||
@@ -99,63 +107,87 @@ data:
|
||||
# This makes it easier for users to search for logs by pod name or by
|
||||
# the name of the Kubernetes container regardless of how many times the
|
||||
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
|
||||
#
|
||||
# TODO: Propagate the labels associated with a container along with its logs
|
||||
# so users can query logs using labels as well as or instead of the pod name
|
||||
# and container name. This is simply done via configuration of the Kubernetes
|
||||
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
|
||||
# problem yet to be solved as secrets are not usable in static pods which the fluentd
|
||||
# pod must be until a per-node controller is available in Kubernetes.
|
||||
# Prevent fluentd from handling records containing its own logs. Otherwise
|
||||
# it can lead to an infinite loop, when error in sending one message generates
|
||||
# another message which also fails to be sent and so on.
|
||||
<match fluent.**>
|
||||
type null
|
||||
</match>
|
||||
# Example:
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
# CRI Log Example:
|
||||
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
|
||||
<source>
|
||||
type tail
|
||||
@id fluentd-containers.log
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag kubernetes.*
|
||||
format json
|
||||
tag raw.kubernetes.*
|
||||
read_from_head true
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
@id raw.kubernetes
|
||||
@type detect_exceptions
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</match>
|
||||
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
type tail
|
||||
@id minion
|
||||
@type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/es-salt.pos
|
||||
pos_file /var/log/salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
type tail
|
||||
@id startupscript.log
|
||||
@type tail
|
||||
format syslog
|
||||
path /var/log/startupscript.log
|
||||
pos_file /var/log/es-startupscript.log.pos
|
||||
tag startupscript
|
||||
</source>
|
||||
|
||||
# Examples:
|
||||
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
type tail
|
||||
@id docker.log
|
||||
@type tail
|
||||
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||
path /var/log/docker.log
|
||||
pos_file /var/log/es-docker.log.pos
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||
<source>
|
||||
type tail
|
||||
@id etcd.log
|
||||
@type tail
|
||||
# Not parsing this, because it doesn't have anything particularly useful to
|
||||
# parse out of it (like severities).
|
||||
format none
|
||||
@@ -163,13 +195,16 @@ data:
|
||||
pos_file /var/log/es-etcd.log.pos
|
||||
tag etcd
|
||||
</source>
|
||||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
<source>
|
||||
type tail
|
||||
@id kubelet.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -179,10 +214,12 @@ data:
|
||||
pos_file /var/log/es-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||
<source>
|
||||
type tail
|
||||
@id kube-proxy.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -192,10 +229,12 @@ data:
|
||||
pos_file /var/log/es-kube-proxy.log.pos
|
||||
tag kube-proxy
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||
<source>
|
||||
type tail
|
||||
@id kube-apiserver.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -205,10 +244,12 @@ data:
|
||||
pos_file /var/log/es-kube-apiserver.log.pos
|
||||
tag kube-apiserver
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||
<source>
|
||||
type tail
|
||||
@id kube-controller-manager.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -218,10 +259,12 @@ data:
|
||||
pos_file /var/log/es-kube-controller-manager.log.pos
|
||||
tag kube-controller-manager
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||
<source>
|
||||
type tail
|
||||
@id kube-scheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -231,10 +274,12 @@ data:
|
||||
pos_file /var/log/es-kube-scheduler.log.pos
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
type tail
|
||||
@id rescheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -244,10 +289,12 @@ data:
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
@id glbc.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -257,10 +304,12 @@ data:
|
||||
pos_file /var/log/es-glbc.log.pos
|
||||
tag glbc
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
@id cluster-autoscaler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -270,59 +319,123 @@ data:
|
||||
pos_file /var/log/es-cluster-autoscaler.log.pos
|
||||
tag cluster-autoscaler
|
||||
</source>
|
||||
|
||||
# Logs from systemd-journal for interesting services.
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
@id journald-docker
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# <source>
|
||||
# @id journald-container-runtime
|
||||
# @type systemd
|
||||
# filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
|
||||
# <storage>
|
||||
# @type local
|
||||
# persistent true
|
||||
# </storage>
|
||||
# read_from_head true
|
||||
# tag container-runtime
|
||||
# </source>
|
||||
|
||||
<source>
|
||||
@id journald-kubelet
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@id journald-node-problem-detector
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
forward.input.conf: |-
|
||||
# Takes the messages sent over TCP
|
||||
<source>
|
||||
@type forward
|
||||
</source>
|
||||
|
||||
monitoring.conf: |-
|
||||
# Prometheus Exporter Plugin
|
||||
# input plugin that exports metrics
|
||||
<source>
|
||||
@type prometheus
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type monitor_agent
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics from MonitorAgent
|
||||
<source>
|
||||
@type prometheus_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for output plugin
|
||||
<source>
|
||||
@type prometheus_output_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for in_tail plugin
|
||||
<source>
|
||||
@type prometheus_tail_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
output.conf: |-
|
||||
# Enriches records with Kubernetes metadata
|
||||
<filter kubernetes.**>
|
||||
type kubernetes_metadata
|
||||
@type kubernetes_metadata
|
||||
</filter>
|
||||
## Prometheus Exporter Plugin
|
||||
## input plugin that exports metrics
|
||||
#<source>
|
||||
# type prometheus
|
||||
#</source>
|
||||
#<source>
|
||||
# type monitor_agent
|
||||
#</source>
|
||||
#<source>
|
||||
# type forward
|
||||
#</source>
|
||||
## input plugin that collects metrics from MonitorAgent
|
||||
#<source>
|
||||
# @type prometheus_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
## input plugin that collects metrics for output plugin
|
||||
#<source>
|
||||
# @type prometheus_output_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
## input plugin that collects metrics for in_tail plugin
|
||||
#<source>
|
||||
# @type prometheus_tail_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
|
||||
<match **>
|
||||
type elasticsearch
|
||||
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
|
||||
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
# Set the chunk limit the same as for fluentd-gcp.
|
||||
buffer_chunk_limit 2M
|
||||
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
|
||||
buffer_queue_limit 32
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 8
|
||||
</match>
|
||||
@id elasticsearch
|
||||
@type elasticsearch
|
||||
@log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
<buffer>
|
||||
@type file
|
||||
path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
flush_mode interval
|
||||
retry_type exponential_backoff
|
||||
flush_thread_count 2
|
||||
flush_interval 5s
|
||||
retry_forever
|
||||
retry_max_interval 30
|
||||
chunk_limit_size 2M
|
||||
queue_limit_length 8
|
||||
overflow_action block
|
||||
</buffer>
|
||||
</match>
|
||||
@@ -1,32 +1,42 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: "fluentd-es-v{{ fluentd_version }}"
|
||||
name: "fluentd-es-{{ fluentd_version }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "v{{ fluentd_version }}"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "v{{ fluentd_version }}"
|
||||
version: "{{ fluentd_version }}"
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
priorityClassName: system-node-critical
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
- '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: "--no-supervisor -q"
|
||||
resources:
|
||||
limits:
|
||||
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
|
||||
@@ -34,27 +44,24 @@ spec:
|
||||
{% endif %}
|
||||
memory: {{ fluentd_mem_limit }}
|
||||
requests:
|
||||
cpu: {{ fluentd_cpu_requests }}
|
||||
cpu: {{ fluentd_cpu_requests }}
|
||||
memory: {{ fluentd_mem_requests }}
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: dockercontainers
|
||||
- name: varlibdockercontainers
|
||||
mountPath: "{{ docker_daemon_graph }}/containers"
|
||||
readOnly: true
|
||||
- name: config
|
||||
- name: config-volume
|
||||
mountPath: "{{ fluentd_config_dir }}"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dockercontainers
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: {{ docker_daemon_graph }}/containers
|
||||
- name: config
|
||||
configMap:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-config
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
|
||||
@@ -4,3 +4,4 @@ kibana_mem_limit: 0M
|
||||
kibana_cpu_requests: 100m
|
||||
kibana_mem_requests: 0M
|
||||
kibana_service_port: 5601
|
||||
kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
@@ -36,10 +36,12 @@ spec:
|
||||
env:
|
||||
- name: "ELASTICSEARCH_URL"
|
||||
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
|
||||
{% if kibana_base_url is defined and kibana_base_url != "" %}
|
||||
- name: "KIBANA_BASE_URL"
|
||||
- name: "SERVER_BASEPATH"
|
||||
value: "{{ kibana_base_url }}"
|
||||
{% endif %}
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
---
|
||||
cephfs_provisioner_namespace: "kube-system"
|
||||
cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||
cephfs_provisioner_cluster: ceph
|
||||
cephfs_provisioner_monitors: []
|
||||
cephfs_provisioner_monitors: ~
|
||||
cephfs_provisioner_admin_id: admin
|
||||
cephfs_provisioner_secret: secret
|
||||
cephfs_provisioner_storage_class: cephfs
|
||||
cephfs_provisioner_reclaim_policy: Delete
|
||||
cephfs_provisioner_claim_root: /volumes
|
||||
cephfs_provisioner_deterministic_names: true
|
||||
|
||||
@@ -1,5 +1,32 @@
|
||||
---
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy addon dir and manifests
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy storageclass
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
||||
@@ -7,22 +34,24 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: CephFS Provisioner | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
|
||||
- { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
|
||||
- { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
|
||||
- { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
|
||||
- { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
|
||||
- { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
|
||||
- { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
|
||||
- { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
|
||||
register: cephfs_manifests
|
||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
||||
- { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
|
||||
- { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
|
||||
- { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
|
||||
- { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
|
||||
- { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
|
||||
- { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
|
||||
- { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: rs }
|
||||
- { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
|
||||
register: cephfs_provisioner_manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: CephFS Provisioner | Apply manifests
|
||||
@@ -33,5 +62,5 @@
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
with_items: "{{ cephfs_manifests.results }}"
|
||||
with_items: "{{ cephfs_provisioner_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
|
||||
namespace: {{ cephfs_provisioner_namespace }}
|
||||
@@ -4,9 +4,12 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: {{ cephfs_provisioner_storage_class }}
|
||||
provisioner: ceph.com/cephfs
|
||||
reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
|
||||
parameters:
|
||||
cluster: {{ cephfs_provisioner_cluster }}
|
||||
monitors: {{ cephfs_provisioner_monitors | join(',') }}
|
||||
monitors: {{ cephfs_provisioner_monitors }}
|
||||
adminId: {{ cephfs_provisioner_admin_id }}
|
||||
adminSecretName: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
|
||||
adminSecretName: cephfs-provisioner
|
||||
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
|
||||
claimRoot: {{ cephfs_provisioner_claim_root }}
|
||||
deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"
|
||||
@@ -2,7 +2,7 @@
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
|
||||
name: cephfs-provisioner
|
||||
namespace: {{ cephfs_provisioner_namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
@@ -46,18 +46,20 @@ to limit the quota of persistent volumes.
|
||||
|
||||
### Simple directories
|
||||
|
||||
``` bash
|
||||
for vol in vol6 vol7 vol8; do
|
||||
mkdir /mnt/disks/$vol
|
||||
done
|
||||
```
|
||||
|
||||
This is also acceptable in a development environment, but there is no capacity
|
||||
In a development environment using `mount --bind` works also, but there is no capacity
|
||||
management.
|
||||
|
||||
### Block volumeMode PVs
|
||||
|
||||
Create a symbolic link under discovery directory to the block device on the node. To use
|
||||
raw block devices in pods BlockVolume feature gate must be enabled.
|
||||
|
||||
Usage notes
|
||||
-----------
|
||||
|
||||
Beta PV.NodeAffinity field is used by default. If running against an older K8s
|
||||
version, the useAlphaAPI flag must be set in the configMap.
|
||||
|
||||
The volume provisioner cannot calculate volume sizes correctly, so you should
|
||||
delete the daemonset pod on the relevant host after creating volumes. The pod
|
||||
will be recreated and read the size correctly.
|
||||
|
||||
@@ -19,6 +19,9 @@ spec:
|
||||
version: {{ local_volume_provisioner_image_tag }}
|
||||
spec:
|
||||
serviceAccountName: local-volume-provisioner
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: provisioner
|
||||
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
|
||||
@@ -30,12 +33,17 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: MY_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: local-volume-provisioner
|
||||
mountPath: /etc/provisioner/config
|
||||
readOnly: true
|
||||
- name: local-volume-provisioner-hostpath-mnt-disks
|
||||
mountPath: {{ local_volume_provisioner_mount_dir }}
|
||||
mountPropagation: "HostToContainer"
|
||||
volumes:
|
||||
- name: local-volume-provisioner
|
||||
configMap:
|
||||
|
||||
@@ -18,3 +18,6 @@ helm_skip_refresh: false
|
||||
|
||||
# Override values for the Tiller Deployment manifest.
|
||||
# tiller_override: "key1=val1,key2=val2"
|
||||
|
||||
# Limit the maximum number of revisions saved per release. Use 0 for no limit.
|
||||
# tiller_max_history: 0
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
|
||||
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
|
||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
|
||||
|
||||
- name: Helm | Set up bash completion
|
||||
|
||||
@@ -1,6 +1,2 @@
|
||||
---
|
||||
cert_manager_namespace: "cert-manager"
|
||||
cert_manager_cpu_requests: 10m
|
||||
cert_manager_cpu_limits: 30m
|
||||
cert_manager_memory_requests: 32Mi
|
||||
cert_manager_memory_limits: 200Mi
|
||||
|
||||
@@ -1,5 +1,23 @@
|
||||
---
|
||||
|
||||
- name: Cert Manager | Remove legacy addon dir and manifests
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: Cert Manager | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: Cert Manager | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||
@@ -7,20 +25,22 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Cert Manager | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
|
||||
- { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
|
||||
- { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
|
||||
- { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
|
||||
- { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
|
||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
||||
- { name: sa-cert-manager, file: sa-cert-manager.yml, type: sa }
|
||||
- { name: crd-certificate, file: crd-certificate.yml, type: crd }
|
||||
- { name: crd-clusterissuer, file: crd-clusterissuer.yml, type: crd }
|
||||
- { name: crd-issuer, file: crd-issuer.yml, type: crd }
|
||||
- { name: clusterrole-cert-manager, file: clusterrole-cert-manager.yml, type: clusterrole }
|
||||
- { name: clusterrolebinding-cert-manager, file: clusterrolebinding-cert-manager.yml, type: clusterrolebinding }
|
||||
- { name: deploy-cert-manager, file: deploy-cert-manager.yml, type: deploy }
|
||||
register: cert_manager_manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
rules:
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
roleRef:
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: certificates.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: clusterissuers.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: issuers.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
@@ -6,15 +6,19 @@ metadata:
|
||||
namespace: {{ cert_manager_namespace }}
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cert-manager
|
||||
release: cert-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cert-manager
|
||||
app: cert-manager
|
||||
release: cert-manager
|
||||
annotations:
|
||||
spec:
|
||||
@@ -25,6 +29,7 @@ spec:
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
args:
|
||||
- --cluster-resource-namespace=$(POD_NAMESPACE)
|
||||
- --leader-election-namespace=$(POD_NAMESPACE)
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
@@ -32,20 +37,5 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ cert_manager_cpu_requests }}
|
||||
memory: {{ cert_manager_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ cert_manager_cpu_limits }}
|
||||
memory: {{ cert_manager_memory_limits }}
|
||||
|
||||
- name: ingress-shim
|
||||
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ cert_manager_cpu_requests }}
|
||||
memory: {{ cert_manager_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ cert_manager_cpu_limits }}
|
||||
memory: {{ cert_manager_memory_limits }}
|
||||
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
namespace: {{ cert_manager_namespace }}
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.8
|
||||
chart: cert-manager-v0.4.1
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
ingress_nginx_namespace: "ingress-nginx"
|
||||
ingress_nginx_host_network: false
|
||||
ingress_nginx_nodeselector:
|
||||
node-role.kubernetes.io/master: "true"
|
||||
ingress_nginx_insecure_port: 80
|
||||
ingress_nginx_secure_port: 443
|
||||
ingress_nginx_configmap: {}
|
||||
|
||||
@@ -1,5 +1,23 @@
|
||||
---
|
||||
|
||||
- name: NGINX Ingress Controller | Remove legacy addon dir and manifests
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: NGINX Ingress Controller | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: NGINX Ingress Controller | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
||||
@@ -7,24 +25,26 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: NGINX Ingress Controller | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: ingress-nginx-ns, file: ingress-nginx-ns.yml, type: ns }
|
||||
- { name: ingress-nginx-sa, file: ingress-nginx-sa.yml, type: sa }
|
||||
- { name: ingress-nginx-role, file: ingress-nginx-role.yml, type: role }
|
||||
- { name: ingress-nginx-rolebinding, file: ingress-nginx-rolebinding.yml, type: rolebinding }
|
||||
- { name: ingress-nginx-clusterrole, file: ingress-nginx-clusterrole.yml, type: clusterrole }
|
||||
- { name: ingress-nginx-clusterrolebinding, file: ingress-nginx-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: ingress-nginx-cm, file: ingress-nginx-cm.yml, type: cm }
|
||||
- { name: ingress-nginx-tcp-servicecs-cm, file: ingress-nginx-tcp-servicecs-cm.yml, type: cm }
|
||||
- { name: ingress-nginx-udp-servicecs-cm, file: ingress-nginx-udp-servicecs-cm.yml, type: cm }
|
||||
- { name: ingress-nginx-default-backend-svc, file: ingress-nginx-default-backend-svc.yml, type: svc }
|
||||
- { name: ingress-nginx-default-backend-rs, file: ingress-nginx-default-backend-rs.yml, type: rs }
|
||||
- { name: ingress-nginx-controller-ds, file: ingress-nginx-controller-ds.yml, type: ds }
|
||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
||||
- { name: deploy-default-backend, file: deploy-default-backend.yml, type: deploy }
|
||||
- { name: svc-default-backend, file: svc-default-backend.yml, type: svc }
|
||||
- { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm }
|
||||
- { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm }
|
||||
- { name: cm-udp-services, file: cm-udp-services.yml, type: cm }
|
||||
- { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa }
|
||||
- { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole }
|
||||
- { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding }
|
||||
- { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role }
|
||||
- { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding }
|
||||
- { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds }
|
||||
register: ingress_nginx_manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -6,5 +6,7 @@ metadata:
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
{% if ingress_nginx_configmap %}
|
||||
data:
|
||||
{{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
|
||||
{%- endif %}
|
||||
@@ -2,9 +2,11 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-tcp-services
|
||||
name: tcp-services
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
{% if ingress_nginx_configmap_tcp_services %}
|
||||
data:
|
||||
{{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
|
||||
{%- endif %}
|
||||
@@ -2,9 +2,11 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-udp-services
|
||||
name: udp-services
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
{% if ingress_nginx_configmap_udp_services %}
|
||||
data:
|
||||
{{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
|
||||
{%- endif %}
|
||||
@@ -1,27 +1,27 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress-nginx-default-backend-v{{ ingress_nginx_default_backend_image_tag }}
|
||||
name: default-backend-v{{ ingress_nginx_default_backend_image_tag }}
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
k8s-app: ingress-nginx-default-backend
|
||||
k8s-app: default-backend
|
||||
version: v{{ ingress_nginx_default_backend_image_tag }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: ingress-nginx-default-backend
|
||||
k8s-app: default-backend
|
||||
version: v{{ ingress_nginx_default_backend_image_tag }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: ingress-nginx-default-backend
|
||||
k8s-app: default-backend
|
||||
version: v{{ ingress_nginx_default_backend_image_tag }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: ingress-nginx-default-backend
|
||||
- name: default-backend
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
@@ -35,3 +35,10 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
@@ -7,9 +7,6 @@ metadata:
|
||||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
version: v{{ ingress_nginx_controller_image_tag }}
|
||||
annotations:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -24,23 +21,35 @@ spec:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: ingress-nginx
|
||||
{% endif %}
|
||||
{% if ingress_nginx_host_network %}
|
||||
hostNetwork: true
|
||||
{% endif %}
|
||||
{% if ingress_nginx_nodeselector %}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress: "true"
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{ ingress_nginx_nodeselector | to_nice_yaml }}
|
||||
{%- endif %}
|
||||
containers:
|
||||
- name: ingress-nginx-controller
|
||||
image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-backend
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx
|
||||
- --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services
|
||||
- --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services
|
||||
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
|
||||
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
|
||||
- --annotations-prefix=nginx.ingress.kubernetes.io
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
# www-data -> 33
|
||||
runAsUser: 33
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -76,7 +85,3 @@ spec:
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: ingress-nginx
|
||||
{% endif %}
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-default-backend
|
||||
name: default-backend
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
k8s-app: ingress-nginx-default-backend
|
||||
k8s-app: default-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: ingress-nginx-default-backend
|
||||
k8s-app: default-backend
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
istio_namespace: istio-system
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
- name: istio | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/istio"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
recurse: yes
|
||||
|
||||
- name: istio | Lay out manifests
|
||||
template:
|
||||
src: "{{item.file}}.j2"
|
||||
dest: "{{kube_config_dir}}/addons/istio/{{item.file}}"
|
||||
with_items:
|
||||
- {name: istio-mixer, file: istio.yml, type: deployment }
|
||||
- {name: istio-initializer, file: istio-initializer.yml, type: deployment }
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: istio | Copy istioctl binary from download dir
|
||||
command: rsync -piu "{{ local_release_dir }}/istio/istioctl" "{{ bin_dir }}/istioctl"
|
||||
changed_when: false
|
||||
|
||||
- name: istio | Set up bash completion
|
||||
shell: "{{ bin_dir }}/istioctl completion >/etc/bash_completion.d/istioctl.sh"
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
|
||||
- name: istio | Set bash completion file
|
||||
file:
|
||||
path: /etc/bash_completion.d/istioctl.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when: ansible_os_family in ["Debian","RedHat"]
|
||||
|
||||
- name: istio | apply manifests
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ istio_namespace }}"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/addons/istio/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
@@ -1,84 +0,0 @@
|
||||
# GENERATED FILE. Use with Kubernetes 1.7+
|
||||
# TO UPDATE, modify files in install/kubernetes/templates and run install/updateVersion.sh
|
||||
################################
|
||||
# Istio initializer
|
||||
################################
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: istio-inject
|
||||
namespace: {{ istio_namespace }}
|
||||
data:
|
||||
config: |-
|
||||
policy: "enabled"
|
||||
namespaces: [""] # everything, aka v1.NamepsaceAll, aka cluster-wide
|
||||
initializerName: "sidecar.initializer.istio.io"
|
||||
params:
|
||||
initImage: {{ istio_proxy_init_image_repo }}:{{ istio_proxy_init_image_tag }}
|
||||
proxyImage: {{ istio_proxy_image_repo }}:{{ istio_proxy_image_tag }}
|
||||
verbosity: 2
|
||||
version: 0.2.6
|
||||
meshConfigMapName: istio
|
||||
imagePullPolicy: IfNotPresent
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: istio-initializer-service-account
|
||||
namespace: {{ istio_namespace }}
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: istio-initializer
|
||||
namespace: {{ istio_namespace }}
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
initializers:
|
||||
pending: []
|
||||
labels:
|
||||
istio: istio-initializer
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: istio-initializer
|
||||
labels:
|
||||
istio: initializer
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: istio-initializer-service-account
|
||||
containers:
|
||||
- name: initializer
|
||||
image: {{ istio_sidecar_initializer_image_repo }}:{{ istio_sidecar_initializer_image_tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --port=8083
|
||||
- --namespace={{ istio_namespace }}
|
||||
- -v=2
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/istio/config
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: istio
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1alpha1
|
||||
kind: InitializerConfiguration
|
||||
metadata:
|
||||
name: istio-sidecar
|
||||
initializers:
|
||||
- name: sidecar.initializer.istio.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
apiVersions:
|
||||
- "*"
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
- jobs
|
||||
- daemonsets
|
||||
---
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,14 +22,6 @@ dependencies:
|
||||
- apps
|
||||
- registry
|
||||
|
||||
# istio role should be last because it takes a long time to initialize and
|
||||
# will cause timeouts trying to start other addons.
|
||||
- role: kubernetes-apps/istio
|
||||
when: istio_enabled
|
||||
tags:
|
||||
- apps
|
||||
- istio
|
||||
|
||||
- role: kubernetes-apps/persistent_volumes
|
||||
when: persistent_volumes_enabled
|
||||
tags:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
---
|
||||
|
||||
- name: Weave | Start Resources
|
||||
kube:
|
||||
name: "weave-net"
|
||||
@@ -9,13 +10,12 @@
|
||||
state: "latest"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: "Weave | wait for weave to become available"
|
||||
- name: Weave | Wait for Weave to become available
|
||||
uri:
|
||||
url: http://127.0.0.1:6784/status
|
||||
return_content: yes
|
||||
register: weave_status
|
||||
retries: 180
|
||||
delay: 5
|
||||
until: "{{ weave_status.status == 200 and
|
||||
'Status: ready' in weave_status.content }}"
|
||||
until: "{{ weave_status.status == 200 and 'Status: ready' in weave_status.content }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -1,2 +1,7 @@
|
||||
---
|
||||
persistent_volumes_enabled: false
|
||||
storage_classes:
|
||||
- name: standard
|
||||
is_default: true
|
||||
parameters:
|
||||
availability: nova
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
---
|
||||
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
|
||||
src: "openstack-storage-class.yml.j2"
|
||||
dest: "{{kube_config_dir}}/openstack-storage-class.yml"
|
||||
register: manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
name: storage-class
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
resource: StorageClass
|
||||
filename: "{{kube_config_dir}}/openstack-storage-class.yml"
|
||||
state: "latest"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- manifests.changed
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/cinder
|
||||
parameters:
|
||||
availability: nova
|
||||
@@ -0,0 +1,14 @@
|
||||
{% for class in storage_classes %}
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: "{{ class.name }}"
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
|
||||
provisioner: kubernetes.io/cinder
|
||||
parameters:
|
||||
{% for key, value in (class.parameters | default({})).items() %}
|
||||
"{{ key }}": "{{ value }}"
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -110,18 +110,18 @@ metadata:
|
||||
name: kube-registry-v0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-registry-upstream
|
||||
k8s-app: registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kube-registry-upstream
|
||||
k8s-app: registry
|
||||
version: v0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-registry-upstream
|
||||
k8s-app: registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
@@ -164,12 +164,12 @@ metadata:
|
||||
name: kube-registry
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-registry-upstream
|
||||
k8s-app: registry
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeRegistry"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-registry-upstream
|
||||
k8s-app: registry
|
||||
ports:
|
||||
- name: registry
|
||||
port: 5000
|
||||
@@ -257,7 +257,7 @@ You can use `kubectl` to set up a port-forward from your local node to a
|
||||
running Pod:
|
||||
|
||||
``` console
|
||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \
|
||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
||||
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
||||
| grep Running | head -1 | cut -f1 -d' ')
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
{%- else -%}
|
||||
https://{{ kube_apiserver_address }}:{{ kube_apiserver_port }}
|
||||
https://{{ kube_apiserver_access_address }}:{{ kube_apiserver_port }}
|
||||
{%- endif -%}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
- name: Set kubeadm_discovery_address
|
||||
set_fact:
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%}
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint }}
|
||||
{%- endif %}
|
||||
when: not is_kube_master
|
||||
tags:
|
||||
- facts
|
||||
|
||||
@@ -28,23 +27,36 @@
|
||||
register: temp_token
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
|
||||
- name: gets the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
|
||||
- name: sets kubeadm api version to v1alpha1
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha1
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
|
||||
|
||||
- name: defaults kubeadm api version to v1alpha2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha2
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
|
||||
|
||||
- name: Create kubeadm client config
|
||||
template:
|
||||
src: kubeadm-client.conf.j2
|
||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf"
|
||||
backup: yes
|
||||
when: not is_kube_master
|
||||
vars:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
register: kubeadm_client_conf
|
||||
|
||||
- name: Join to cluster if needed
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir}}/kubeadm-client.conf
|
||||
--config {{ kube_config_dir}}/kubeadm-client.{{ kubeadmConfig_api_version }}.conf
|
||||
--ignore-preflight-errors=all
|
||||
register: kubeadm_join
|
||||
when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists)
|
||||
when: not is_kube_master and (not kubelet_conf.stat.exists)
|
||||
|
||||
- name: Wait for kubelet bootstrap to create config
|
||||
wait_for:
|
||||
@@ -53,18 +65,33 @@
|
||||
timeout: 60
|
||||
|
||||
- name: Update server field in kubelet kubeconfig
|
||||
replace:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: '(\s+)https://{{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$'
|
||||
replace: '\1{{ kube_apiserver_endpoint }}\2'
|
||||
lineinfile:
|
||||
dest: "{{ kube_config_dir }}/kubelet.conf"
|
||||
regexp: 'server:'
|
||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||
backup: yes
|
||||
when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
notify: restart kubelet
|
||||
|
||||
- name: Update server field in kube-proxy kubeconfig
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl get configmap kube-proxy -n kube-system -o yaml
|
||||
| sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
|
||||
| {{ bin_dir }}/kubectl replace -f -
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
run_once: true
|
||||
when: is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||
shell: "{{ bin_dir }}/kubectl delete pod -n kube-system -l k8s-app=kube-proxy"
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
run_once: true
|
||||
when: is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
|
||||
# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
|
||||
- name: Symlink kubelet kubeconfig for calico/canal
|
||||
file:
|
||||
src: "{{ kube_config_dir }}//kubelet.conf"
|
||||
src: "{{ kube_config_dir }}/kubelet.conf"
|
||||
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
|
||||
state: link
|
||||
force: yes
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
apiVersion: kubeadm.k8s.io/v1alpha2
|
||||
kind: NodeConfiguration
|
||||
clusterName: {{ cluster_name }}
|
||||
discoveryFile: ""
|
||||
caCertPath: {{ kube_config_dir }}/ssl/ca.crt
|
||||
discoveryToken: {{ kubeadm_token }}
|
||||
tlsBootstrapToken: {{ kubeadm_token }}
|
||||
token: {{ kubeadm_token }}
|
||||
discoveryTokenAPIServers:
|
||||
- {{ kubeadm_discovery_address | replace("https://", "")}}
|
||||
discoveryTokenUnsafeSkipCAVerification: true
|
||||
nodeRegistration:
|
||||
name: {{ inventory_hostname }}
|
||||
@@ -24,6 +24,29 @@ kube_apiserver_storage_backend: etcd3
|
||||
# By default, force back to etcd2. Set to true to force etcd3 (experimental!)
|
||||
force_etcd3: false
|
||||
|
||||
# audit support
|
||||
kubernetes_audit: false
|
||||
audit_log_path: /var/log/audit/kube-apiserver-audit.log
|
||||
# num days
|
||||
audit_log_maxage: 30
|
||||
# the num of audit logs to retain
|
||||
audit_log_maxbackups: 1
|
||||
# the max size in MB to retain
|
||||
audit_log_maxsize: 100
|
||||
# policy file
|
||||
audit_policy_file: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||
|
||||
# audit log hostpath
|
||||
audit_log_name: audit-logs
|
||||
audit_log_hostpath: /var/log/kubernetes/audit
|
||||
audit_log_mountpath: /var/log/audit
|
||||
audit_log_writable: true
|
||||
|
||||
# audit policy hostpath
|
||||
audit_policy_name: audit-policy
|
||||
audit_policy_hostpath: /etc/kubernetes/audit-policy
|
||||
audit_policy_mountpath: "{{ audit_policy_hostpath }}"
|
||||
|
||||
# Limits for kube components
|
||||
kube_controller_memory_limit: 512M
|
||||
kube_controller_cpu_limit: 250m
|
||||
@@ -41,7 +64,7 @@ kube_apiserver_cpu_limit: 800m
|
||||
kube_apiserver_memory_requests: 256M
|
||||
kube_apiserver_cpu_requests: 100m
|
||||
|
||||
# Admission control plug-ins
|
||||
# 1.9 and below Admission control plug-ins
|
||||
kube_apiserver_admission_control:
|
||||
- Initializers
|
||||
- NamespaceLifecycle
|
||||
@@ -56,6 +79,12 @@ kube_apiserver_admission_control:
|
||||
{%- endif -%}
|
||||
- ResourceQuota
|
||||
|
||||
# 1.10+ admission plugins
|
||||
kube_apiserver_enable_admission_plugins: []
|
||||
|
||||
# 1.10+ list of disabled admission plugins
|
||||
kube_apiserver_disable_admission_plugins: []
|
||||
|
||||
# extra runtime config
|
||||
kube_api_runtime_config:
|
||||
- admissionregistration.k8s.io/v1alpha1
|
||||
|
||||
@@ -65,14 +65,38 @@
|
||||
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
|
||||
changed_when: false
|
||||
|
||||
- name: Create audit-policy directory
|
||||
file: path={{ kube_config_dir }}/audit-policy state=directory
|
||||
when: kubernetes_audit|default(false)
|
||||
|
||||
- name: Write api audit policy yaml
|
||||
template:
|
||||
src: apiserver-audit-policy.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||
when: kubernetes_audit|default(false)
|
||||
|
||||
- name: gets the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
|
||||
- name: sets kubeadm api version to v1alpha1
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha1
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
|
||||
|
||||
- name: defaults kubeadm api version to v1alpha2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha2
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '>=')
|
||||
|
||||
- name: kubeadm | Create kubeadm config
|
||||
template:
|
||||
src: kubeadm-config.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
|
||||
register: kubeadm_config
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
|
||||
command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
|
||||
register: kubeadm_init
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
@@ -85,7 +109,7 @@
|
||||
timeout -k 240s 240s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--allow-release-candidate-upgrades
|
||||
@@ -98,7 +122,7 @@
|
||||
|
||||
# FIXME(mattymo): remove when https://github.com/kubernetes/kubeadm/issues/433 is fixed
|
||||
- name: kubeadm | Enable kube-proxy
|
||||
command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
command: "{{ bin_dir }}/kubeadm alpha phase addon kube-proxy --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml"
|
||||
when: inventory_hostname == groups['kube-master']|first
|
||||
changed_when: false
|
||||
|
||||
@@ -135,7 +159,7 @@
|
||||
when: inventory_hostname != groups['kube-master']|first
|
||||
|
||||
- name: kubeadm | Init other uninitialized masters
|
||||
command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
|
||||
command: timeout -k 240s 240s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml --ignore-preflight-errors=all
|
||||
register: kubeadm_init
|
||||
when: inventory_hostname != groups['kube-master']|first and not kubeadm_ca.stat.exists
|
||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||
@@ -146,7 +170,7 @@
|
||||
timeout -k 240s 240s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--config={{ kube_config_dir }}/kubeadm-config.{{ kubeadmConfig_api_version }}.yaml
|
||||
--ignore-preflight-errors=all
|
||||
--allow-experimental-upgrades
|
||||
--allow-release-candidate-upgrades
|
||||
|
||||
@@ -3,12 +3,6 @@
|
||||
tags:
|
||||
- k8s-pre-upgrade
|
||||
|
||||
# upstream bug: https://github.com/kubernetes/kubeadm/issues/441
|
||||
- name: Disable kube_basic_auth until kubeadm/441 is fixed
|
||||
set_fact:
|
||||
kube_basic_auth: false
|
||||
when: kubeadm_enabled|bool|default(false)
|
||||
|
||||
- import_tasks: users-file.yml
|
||||
when: kube_basic_auth|default(true)
|
||||
|
||||
@@ -29,7 +23,7 @@
|
||||
- upgrade
|
||||
|
||||
- name: Copy kubectl from hyperkube container
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp /hyperkube /systembindir/kubectl"
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -f /hyperkube /systembindir/kubectl"
|
||||
when: kubectl_task_compare_result.rc != 0
|
||||
register: kubectl_task_result
|
||||
until: kubectl_task_result.rc == 0
|
||||
|
||||
@@ -1,4 +1,19 @@
|
||||
---
|
||||
- name: Create audit-policy directory
|
||||
file: path={{ kube_config_dir }}/audit-policy state=directory
|
||||
tags:
|
||||
- kube-apiserver
|
||||
when: kubernetes_audit|default(false)
|
||||
|
||||
- name: Write api audit policy yaml
|
||||
template:
|
||||
src: apiserver-audit-policy.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||
notify: Master | Restart apiserver
|
||||
tags:
|
||||
- kube-apiserver
|
||||
when: kubernetes_audit|default(false)
|
||||
|
||||
- name: Write kube-apiserver manifest
|
||||
template:
|
||||
src: manifests/kube-apiserver.manifest.j2
|
||||
|
||||
125
roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
Normal file
125
roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2
Normal file
@@ -0,0 +1,125 @@
|
||||
apiVersion: audit.k8s.io/v1beta1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
# Don't log events requests.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "settings.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
@@ -2,17 +2,26 @@
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "PodFitsHostPorts"},
|
||||
{"name" : "PodFitsResources"},
|
||||
{"name" : "MaxEBSVolumeCount"},
|
||||
{"name" : "MaxGCEPDVolumeCount"},
|
||||
{"name" : "MaxAzureDiskVolumeCount"},
|
||||
{"name" : "MatchInterPodAffinity"},
|
||||
{"name" : "NoDiskConflict"},
|
||||
{"name" : "MatchNodeSelector"},
|
||||
{"name" : "HostName"}
|
||||
{"name" : "GeneralPredicates"},
|
||||
{"name" : "CheckNodeMemoryPressure"},
|
||||
{"name" : "CheckNodeDiskPressure"},
|
||||
{"name" : "CheckNodeCondition"},
|
||||
{"name" : "PodToleratesNodeTaints"},
|
||||
{"name" : "CheckVolumeBinding"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "SelectorSpreadPriority", "weight" : 1},
|
||||
{"name" : "InterPodAffinityPriority", "weight" : 1},
|
||||
{"name" : "LeastRequestedPriority", "weight" : 1},
|
||||
{"name" : "BalancedResourceAllocation", "weight" : 1},
|
||||
{"name" : "ServiceSpreadingPriority", "weight" : 1},
|
||||
{"name" : "EqualPriority", "weight" : 1}
|
||||
{"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
|
||||
{"name" : "NodeAffinityPriority", "weight" : 1},
|
||||
{"name" : "TaintTolerationPriority", "weight" : 1}
|
||||
],
|
||||
"hardPodAffinitySymmetricWeight" : 10
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user