This commit is contained in:
woopstar
2018-05-02 13:37:15 +02:00
committed by Andreas Kruger
104 changed files with 663 additions and 294 deletions

View File

@@ -22,7 +22,6 @@
failed_when: false
changed_when: false
check_mode: no
when: need_bootstrap.rc != 0
tags:
- facts
@@ -30,24 +29,24 @@
copy:
src: get-pip.py
dest: ~/get-pip.py
when: need_pip != 0
when: need_pip.rc != 0
- name: Bootstrap | Install pip
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
when: need_pip != 0
when: need_pip.rc != 0
- name: Bootstrap | Remove get-pip.py
file:
path: ~/get-pip.py
state: absent
when: need_pip != 0
when: need_pip.rc != 0
- name: Bootstrap | Install pip launcher
copy:
src: runner
dest: /opt/bin/pip
mode: 0755
when: need_pip != 0
when: need_pip.rc != 0
- name: Install required python modules
pip:

View File

@@ -0,0 +1,7 @@
---
- name: Install required packages (SUSE)
package:
name: "{{ item }}"
state: present
with_items:
- python-cryptography

View File

@@ -11,6 +11,9 @@
- import_tasks: bootstrap-centos.yml
when: bootstrap_os == "centos"
- import_tasks: bootstrap-opensuse.yml
when: bootstrap_os == "opensuse"
- import_tasks: setup-pipelining.yml
- name: check if atomic host
@@ -26,18 +29,25 @@
gather_subset: '!all'
filter: ansible_*
- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed)
hostname:
name: "{{inventory_hostname}}"
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname
when:
- override_system_hostname
- ansible_distribution not in ['openSUSE Tumbleweed']
- ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
- name: Assign inventory name to unconfigured hostnames (CoreOS only)
- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only)
command: "hostnamectl set-hostname {{inventory_hostname}}"
register: hostname_changed
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and override_system_hostname
when:
- ansible_hostname == 'localhost'
- ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
- override_system_hostname
- name: Update hostname fact (CoreOS only)
- name: Update hostname fact (CoreOS and Tumbleweed only)
setup:
gather_subset: '!all'
filter: ansible_hostname
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed
when:
- hostname_changed.changed

View File

@@ -15,6 +15,14 @@
tags:
- facts
# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
# openSUSE version so we can't use it. The only alternative is to use the docker
# packages from the distribution repositories.
- name: Warn about Docker version on SUSE
debug:
msg: "SUSE distributions always install Docker from the distro repos"
when: ansible_pkg_mgr == 'zypper'
- include_tasks: set_facts_dns.yml
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
tags:
@@ -43,7 +51,7 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ docker_repo_key_info.repo_keys }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
- name: ensure docker-ce repository is enabled
action: "{{ docker_repo_info.pkg_repo }}"
@@ -51,7 +59,7 @@
repo: "{{item}}"
state: present
with_items: "{{ docker_repo_info.repos }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0)
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0)
- name: ensure docker-engine repository public key is installed
action: "{{ dockerproject_repo_key_info.pkg_key }}"
@@ -64,7 +72,7 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic)
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
- name: ensure docker-engine repository is enabled
action: "{{ dockerproject_repo_info.pkg_repo }}"
@@ -72,7 +80,7 @@
repo: "{{item}}"
state: present
with_items: "{{ dockerproject_repo_info.repos }}"
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
- name: Configure docker repository on RedHat/CentOS
template:
@@ -110,6 +118,12 @@
notify: restart docker
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
- name: ensure service is started if docker packages are already present
service:
name: docker
state: started
when: docker_task_result is not changed
- name: flush handlers so we can wait for docker to come up
meta: flush_handlers

View File

@@ -6,7 +6,9 @@
with_items:
- docker
- docker-engine
when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
when:
- ansible_os_family == 'Debian'
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
- name: Ensure old versions of Docker are not installed. | RedHat
package:
@@ -17,4 +19,7 @@
- docker-common
- docker-engine
- docker-selinux
when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
when:
- ansible_os_family == 'RedHat'
- (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
- not is_atomic

View File

@@ -7,6 +7,9 @@ Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %}
After=network.target docker.socket
Wants=docker.socket
{% elif ansible_os_family == "Suse" %}
After=network.target containerd.socket containerd.service
Requires=containerd.socket containerd.service
{% endif %}
[Service]
@@ -19,6 +22,9 @@ ExecReload=/bin/kill -s HUP $MAINPID
Delegate=yes
KillMode=process
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
{% if ansible_os_family == "Suse" %}
--containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
{% endif %}
$DOCKER_OPTS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \

View File

@@ -0,0 +1,15 @@
---
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: zypper
pkgs:
- name: docker
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@@ -38,7 +38,7 @@ flannel_version: "v0.10.0"
flannel_cni_version: "v0.3.0"
istio_version: "0.2.6"
vault_version: 0.8.1
weave_version: 2.2.1
weave_version: 2.3.0
pod_infra_version: 3.0
contiv_version: 1.1.7
cilium_version: "v1.0.0-rc8"
@@ -70,16 +70,32 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
istio_proxy_image_repo: docker.io/istio/proxy
istio_proxy_image_tag: "{{ istio_version }}"
istio_proxy_init_image_repo: docker.io/istio/proxy_init
istio_proxy_init_image_tag: "{{ istio_version }}"
istio_ca_image_repo: docker.io/istio/istio-ca
istio_ca_image_tag: "{{ istio_version }}"
istio_mixer_image_repo: docker.io/istio/mixer
istio_mixer_image_tag: "{{ istio_version }}"
istio_pilot_image_repo: docker.io/istio/pilot
istio_pilot_image_tag: "{{ istio_version }}"
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
istio_proxy_debug_image_tag: "{{ istio_version }}"
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
istio_statsd_image_repo: prom/statsd-exporter
istio_statsd_image_tag: latest
hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat"
install_socat_image_tag: "latest"
netcheck_version: "v1.0"
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
netcheck_version: "v1.2.2"
netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}"
netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
netcheck_server_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}"
@@ -134,13 +150,15 @@ registry_image_repo: "registry"
registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.0.0"
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
cephfs_provisioner_image_tag: "92295a30"
cephfs_provisioner_image_tag: "a71a49d4"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.11.0"
ingress_nginx_controller_image_tag: "0.14.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4"
cert_manager_version: "v0.2.3"
cert_manager_version: "v0.2.4"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
@@ -197,6 +215,70 @@ downloads:
mode: "0755"
groups:
- kube-master
istio_proxy:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_image_repo }}"
tag: "{{ istio_proxy_image_tag }}"
sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
groups:
- kube-node
istio_proxy_init:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_init_image_repo }}"
tag: "{{ istio_proxy_init_image_tag }}"
sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
groups:
- kube-node
istio_ca:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_ca_image_repo }}"
tag: "{{ istio_ca_image_tag }}"
sha256: "{{ istio_ca_digest_checksum|default(None) }}"
groups:
- kube-node
istio_mixer:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_mixer_image_repo }}"
tag: "{{ istio_mixer_image_tag }}"
sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
groups:
- kube-node
istio_pilot:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_pilot_image_repo }}"
tag: "{{ istio_pilot_image_tag }}"
sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
groups:
- kube-node
istio_proxy_debug:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_proxy_debug_image_repo }}"
tag: "{{ istio_proxy_debug_image_tag }}"
sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
groups:
- kube-node
istio_sidecar_initializer:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_sidecar_initializer_image_repo }}"
tag: "{{ istio_sidecar_initializer_image_tag }}"
sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
groups:
- kube-node
istio_statsd:
enabled: "{{ istio_enabled }}"
container: true
repo: "{{ istio_statsd_image_repo }}"
tag: "{{ istio_statsd_image_tag }}"
sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
groups:
- kube-node
hyperkube:
enabled: true
container: true
@@ -451,6 +533,14 @@ downloads:
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
groups:
- kube-node
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
container: true
repo: "{{ local_volume_provisioner_image_repo }}"
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
cephfs_provisioner:
enabled: "{{ cephfs_provisioner_enabled }}"
container: true

View File

@@ -2,12 +2,11 @@
- name: container_download | Make download decision if pull is required by tag or sha256
include_tasks: set_docker_image_facts.yml
delegate_to: "{{ download_delegate if download_run_once or omit }}"
delegate_facts: no
delegate_facts: yes
run_once: "{{ download_run_once }}"
when:
- download.enabled
- download.container
- group_names | intersect(download.groups) | length
tags:
- facts
@@ -24,7 +23,6 @@
- download.enabled
- download.container
- pull_required|default(download_always_pull)
- group_names | intersect(download.groups) | length
delegate_to: "{{ download_delegate }}"
delegate_facts: yes
run_once: yes

View File

@@ -22,3 +22,4 @@
- item.value.enabled
- item.value.container
- download_run_once
- group_names | intersect(download.groups) | length

View File

@@ -7,7 +7,6 @@
when:
- download.enabled
- download.container
- group_names | intersect(download.groups) | length
tags:
- facts
@@ -18,7 +17,7 @@
- download.enabled
- download.container
- download_run_once
- group_names | intersect(download.groups) | length
tags:
- facts
@@ -29,7 +28,6 @@
- download.enabled
- download.container
- download_run_once
- group_names | intersect(download.groups) | length
- name: "container_download | Update the 'container_changed' fact"
set_fact:
@@ -39,14 +37,13 @@
- download.container
- download_run_once
- pull_required|default(download_always_pull)
- group_names | intersect(download.groups) | length
run_once: "{{ download_run_once }}"
tags:
- facts
- name: container_download | Stat saved container image
stat:
path: "{{fname}}"
path: "{{ fname }}"
register: img
changed_when: false
delegate_to: "{{ download_delegate }}"
@@ -57,7 +54,6 @@
- download.enabled
- download.container
- download_run_once
- group_names | intersect(download.groups) | length
tags:
- facts
@@ -73,7 +69,6 @@
- download_run_once
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
- (container_changed or not img.stat.exists)
- group_names | intersect(download.groups) | length
- name: container_download | copy container images to ansible host
synchronize:
@@ -93,7 +88,6 @@
- inventory_hostname == download_delegate
- download_delegate != "localhost"
- saved.changed
- group_names | intersect(download.groups) | length
- name: container_download | upload container images to nodes
synchronize:
@@ -115,7 +109,6 @@
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != download_delegate or
download_delegate == "localhost")
- group_names | intersect(download.groups) | length
tags:
- upload
- upgrade
@@ -128,7 +121,6 @@
- download_run_once
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != download_delegate or download_delegate == "localhost")
- group_names | intersect(download.groups) | length
tags:
- upload
- upgrade

View File

@@ -32,6 +32,12 @@ etcd_election_timeout: "5000"
etcd_metrics: "basic"
## A dictionary of extra environment variables to add to etcd.env, formatted like:
## etcd_extra_vars:
## ETCD_VAR1: "value1"
## ETCD_VAR2: "value2"
etcd_extra_vars: {}
# Limits
# Limit memory only if <4GB memory on host. 0=unlimited
etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}"

View File

@@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/etcd-ca.pem
{%- elif ansible_os_family == "Suse" -%}
/etc/pki/trust/anchors/etcd-ca.pem
{%- endif %}
tags:
- facts
@@ -19,9 +21,9 @@
remote_src: true
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract

View File

@@ -27,3 +27,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
{% for key, value in etcd_extra_vars.iteritems() %}
{{ key }}={{ value }}
{% endfor %}

View File

@@ -7,3 +7,6 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ['*']

View File

@@ -104,6 +104,7 @@
- rbac_enabled
- cloud_provider is defined
- cloud_provider == 'vsphere'
- vsphere_cloud_provider.rc is defined
- vsphere_cloud_provider.rc != 0
- kube_version | version_compare('v1.9.0', '>=')
- kube_version | version_compare('v1.9.3', '<=')
@@ -121,6 +122,7 @@
- rbac_enabled
- cloud_provider is defined
- cloud_provider == 'vsphere'
- vsphere_cloud_provider.rc is defined
- vsphere_cloud_provider.rc != 0
- kube_version | version_compare('v1.9.0', '>=')
- kube_version | version_compare('v1.9.3', '<=')

View File

@@ -30,12 +30,12 @@ spec:
limits:
cpu: {{ elasticsearch_cpu_limit }}
{% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
mem: {{ elasticsearch_mem_limit }}
memory: "{{ elasticsearch_mem_limit }}"
{% endif %}
requests:
cpu: {{ elasticsearch_cpu_requests }}
{% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
mem: {{ elasticsearch_mem_requests }}
memory: "{{ elasticsearch_mem_requests }}"
{% endif %}
ports:
- containerPort: 9200

View File

@@ -26,12 +26,12 @@ spec:
limits:
cpu: {{ kibana_cpu_limit }}
{% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
mem: {{ kibana_mem_limit }}
memory: "{{ kibana_mem_limit }}"
{% endif %}
requests:
cpu: {{ kibana_cpu_requests }}
{% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
mem: {{ kibana_mem_requests }}
memory: "{{ kibana_mem_requests }}"
{% endif %}
env:
- name: "ELASTICSEARCH_URL"

View File

@@ -1,7 +1,4 @@
---
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
local_volume_provisioner_image_tag: v2.0.0
local_volume_provisioner_namespace: "kube-system"
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks

View File

@@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
spec:

View File

@@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
spec:

View File

@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
rules:

View File

@@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
roleRef:

View File

@@ -6,7 +6,7 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
spec:

View File

@@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller
spec:

View File

@@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.5
chart: cert-manager-0.2.8
release: cert-manager
heritage: Tiller

View File

@@ -1,32 +1,2 @@
---
istio_enabled: false
istio_namespace: istio-system
istio_version: "0.2.6"
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
istio_proxy_image_repo: docker.io/istio/proxy
istio_proxy_image_tag: "{{ istio_version }}"
istio_proxy_init_image_repo: docker.io/istio/proxy_init
istio_proxy_init_image_tag: "{{ istio_version }}"
istio_ca_image_repo: docker.io/istio/istio-ca
istio_ca_image_tag: "{{ istio_version }}"
istio_mixer_image_repo: docker.io/istio/mixer
istio_mixer_image_tag: "{{ istio_version }}"
istio_pilot_image_repo: docker.io/istio/pilot
istio_pilot_image_tag: "{{ istio_version }}"
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
istio_proxy_debug_image_tag: "{{ istio_version }}"
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
istio_statsd_image_repo: prom/statsd-exporter
istio_statsd_image_tag: latest

View File

@@ -27,12 +27,6 @@ dependencies:
- apps
- registry
- role: kubernetes-apps/metrics
when: prometheus_operator_enabled
tags:
- apps
- metrics
# istio role should be last because it takes a long time to initialize and
# will cause timeouts trying to start other addons.
- role: kubernetes-apps/istio

View File

@@ -1,9 +0,0 @@
---
# Prometheus Operator. Needs for k8s metrics. Installed Helm is required.
prometheus_operator_enabled: false
# K8s cluster metrics. Installed Helm and Prometheus Operators are required.
k8s_metrics_enabled: false
# Separate namespace for monitoring/metrics
monitoring_namespace: "monitoring"

View File

@@ -1,32 +0,0 @@
---
- name: Metrics | Make sure Helm is installed
command: "{{ bin_dir }}/helm version"
register: helm_ready_result
until: helm_ready_result|succeeded
retries: 4
delay: 5
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
- name: Metrics | Add coreos repo
command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/"
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true
- name: Metrics | Install Prometheus Operator
command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}"
when:
- prometheus_operator_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true
- name: Metrics | Install K8s cluster metrics
command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}"
when:
- prometheus_operator_enabled
- k8s_metrics_enabled
- inventory_hostname == groups['kube-master'][0]
run_once: true

View File

@@ -34,7 +34,7 @@
{{ bin_dir }}/kubectl get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller'
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner'
register: tokens_to_delete
when: needs_rotation

View File

@@ -55,7 +55,7 @@
- name: Copy kubectl binary to ansible host
fetch:
src: "{{ bin_dir }}/kubectl"
dest: "{{ bin_dir }}/kubectl"
dest: "{{ artifacts_dir }}/kubectl"
flat: yes
validate_checksum: no
become: no
@@ -68,8 +68,6 @@
#!/bin/bash
kubectl --kubeconfig=admin.conf $@
dest: "{{ artifacts_dir }}/kubectl.sh"
owner: root
group: root
mode: 0755
become: no
run_once: yes

View File

@@ -52,7 +52,7 @@ kube_apiserver_admission_control:
{%- if kube_version | version_compare('v1.9', '<') -%}
GenericAdmissionWebhook
{%- else -%}
ValidatingAdmissionWebhook
MutatingAdmissionWebhook,ValidatingAdmissionWebhook
{%- endif -%}
- ResourceQuota
@@ -73,7 +73,9 @@ kube_oidc_auth: false
## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
# kube_oidc_username_prefix: oidc:
# kube_oidc_groups_claim: groups
# kube_oidc_groups_prefix: oidc:
## Variables for custom flags
apiserver_custom_flags: []

View File

@@ -9,6 +9,10 @@
- {src: apiserver-key.pem, dest: apiserver.key}
- {src: ca.pem, dest: ca.crt}
- {src: ca-key.pem, dest: ca.key}
- {src: front-proxy-ca.pem, dest: front-proxy-ca.crt}
- {src: front-proxy-ca-key.pem, dest: front-proxy-ca.key}
- {src: front-proxy-client.pem, dest: front-proxy-client.crt}
- {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
- {src: service-account-key.pem, dest: sa.pub}
- {src: service-account-key.pem, dest: sa.key}
register: kubeadm_copy_old_certs

View File

@@ -73,9 +73,15 @@ spec:
{% if kube_oidc_username_claim is defined %}
- --oidc-username-claim={{ kube_oidc_username_claim }}
{% endif %}
{% if kube_oidc_username_prefix is defined %}
- "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
{% endif %}
{% if kube_oidc_groups_claim is defined %}
- --oidc-groups-claim={{ kube_oidc_groups_claim }}
{% endif %}
{% if kube_oidc_groups_prefix is defined %}
- "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
{% endif %}
{% endif %}
- --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }}
@@ -111,7 +117,7 @@ spec:
- --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %}
{% if kube_version | version_compare('v1.9', '>=') %}
- --requestheader-client-ca-file={{ kube_cert_dir }}/ca.pem
- --requestheader-client-ca-file={{ kube_cert_dir }}/front-proxy-ca.pem
- --requestheader-allowed-names=front-proxy-client
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group

View File

@@ -29,6 +29,7 @@ spec:
- --leader-elect=true
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
{% if volume_cross_zone_attachment %}
- --use-legacy-policy-config
- --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml
{% endif %}
- --profiling=false

View File

@@ -92,3 +92,48 @@ kube_cadvisor_port: 0
# The read-only port for the Kubelet to serve on with no authentication/authorization.
kube_read_only_port: 0
# sysctl_file_path to add sysctl conf to
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')| default(lookup('env','OS_PROJECT_ID')|default(lookup('env','OS_PROJECT_NAME'),true)) }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
#azure_aad_client_secret:
#azure_resource_group:
#azure_location:
#azure_subnet_name:
#azure_security_group_name:
#azure_vnet_name:
#azure_route_table_name:

View File

@@ -61,6 +61,7 @@
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
when: kube_apiserver_node_port_range is defined
@@ -96,6 +97,7 @@
sysctl:
name: "{{ item }}"
state: present
sysctl_file: "{{ sysctl_file_path }}"
value: 1
reload: yes
when: sysctl_bridge_nf_call_iptables.rc == 0
@@ -118,6 +120,19 @@
tags:
- kube-proxy
- name: Persist ip_vs modules
copy:
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
content: |
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Write proxy manifest
template:
src: manifests/kube-proxy.manifest.j2
@@ -134,6 +149,14 @@
tags:
- kube-proxy
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Write cloud-config
template:
src: "{{ cloud_provider }}-cloud-config.j2"

View File

@@ -24,6 +24,15 @@
-v /var/lib/kubelet:/var/lib/kubelet:shared \
-v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
{% endif -%}
{% if local_volume_provisioner_enabled -%}
-v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:rw \
-v {{ local_volume_provisioner_mount_dir }}:{{ local_volume_provisioner_mount_dir }}:rw \
{% endif %}
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \

View File

@@ -23,9 +23,7 @@ ExecStart={{ bin_dir }}/kubelet \
Restart=always
RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
ExecReload={{ docker_bin_dir }}/docker restart kubelet

View File

@@ -7,9 +7,7 @@ Wants=docker.socket
[Service]
User=root
EnvironmentFile=-{{kube_config_dir}}/kubelet.env
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \

View File

@@ -12,10 +12,7 @@ LimitNOFILE=40000
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
{% if kubelet_flexvolumes_plugins_dir is defined %}
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
{% endif %}
EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
@@ -41,8 +38,17 @@ ExecStart=/usr/bin/rkt run \
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
{% if kubelet_flexvolumes_plugins_dir is defined %}
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
{% endif -%}
{% if local_volume_provisioner_enabled %}
--volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false \
{# Not pretty, but needed to avoid double mount #}
{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
--volume local-volume-provisioner-mount-dir,kind=host,source={{ local_volume_provisioner_mount_dir }},readOnly=false \
{% endif %}
{% endif %}
{% if kubelet_load_modules == true %}
--mount volume=modprobe,target=/usr/sbin/modprobe \
@@ -65,8 +71,17 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \
{% if kubelet_flexvolumes_plugins_dir is defined %}
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
{% endif -%}
{% if local_volume_provisioner_enabled %}
--mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \
{# Not pretty, but needed to avoid double mount #}
{% if local_volume_provisioner_base_dir not in local_volume_provisioner_mount_dir and local_volume_provisioner_mount_dir not in local_volume_provisioner_base_dir %}
--mount volume=local-volume-provisioner-mount-dir,target={{ local_volume_provisioner_mount_dir }} \
{% endif %}
{% endif %}
--stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %}

View File

@@ -83,21 +83,21 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{# Kubelet node labels #}
{% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %}
{% do role_node_labels.append('node-role.kubernetes.io/master=true') %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/master=true') %}
{% if not standalone_kubelet|bool %}
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
{% else %}
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/node=true') %}
{% endif %}
{% if inventory_hostname in groups['kube-ingress']|default([]) %}
{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
{% set dummy = role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
{% endif %}
{% set inventory_node_labels = [] %}
{% if node_labels is defined %}
{% for labelname, labelvalue in node_labels.iteritems() %}
{% do inventory_node_labels.append(labelname + '=' + labelvalue) %}
{% endfor %}
{% for labelname, labelvalue in node_labels.iteritems() %}
{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
{% endfor %}
{% endif %}
{% set all_node_labels = role_node_labels + inventory_node_labels %}
@@ -110,9 +110,7 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %}
{% if kubelet_flexvolumes_plugins_dir is defined %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
{% endif %}
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"

View File

@@ -8,7 +8,7 @@ epel_enabled: false
common_required_pkgs:
- python-httplib2
- openssl
- "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1_0', 'openssl') }}"
- curl
- rsync
- bash-completion
@@ -23,35 +23,6 @@ disable_ipv6_dns: false
kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes
# For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be
# read from the environment.
openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"
openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
@@ -60,3 +31,5 @@ resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
populate_inventory_to_hosts_file: true
preinstall_selinux_state: permissive
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"

View File

@@ -15,7 +15,7 @@
notify: Preinstall | restart network
when: dhclientconffile is defined
- name: Configue dhclient hooks for resolv.conf (non-RH)
- name: Configure dhclient hooks for resolv.conf (non-RH)
template:
src: dhclient_dnsupdate.sh.j2
dest: "{{ dhclienthookfile }}"
@@ -24,7 +24,7 @@
notify: Preinstall | restart network
when: ansible_os_family != "RedHat"
- name: Configue dhclient hooks for resolv.conf (RH-only)
- name: Configure dhclient hooks for resolv.conf (RH-only)
template:
src: dhclient_dnsupdate_rh.sh.j2
dest: "{{ dhclienthookfile }}"

View File

@@ -3,6 +3,12 @@
tags:
- asserts
# This is run before bin_dir is pinned because these tasks are run on localhost
- import_tasks: pre_upgrade.yml
run_once: true
tags:
- upgrade
- name: Force binaries directory for Container Linux by CoreOS
set_fact:
bin_dir: "/opt/bin"
@@ -71,14 +77,6 @@
- cloud-provider
- facts
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Create cni directories
file:
path: "{{ item }}"
@@ -99,6 +97,20 @@
- contiv
- bootstrap-os
- name: Create local volume provisioner directories
file:
path: "{{ item }}"
state: directory
owner: kube
with_items:
- "{{ local_volume_provisioner_base_dir }}"
- "{{ local_volume_provisioner_mount_dir }}"
when:
- inventory_hostname in groups['k8s-cluster']
- local_volume_provisioner_enabled
tags:
- persistent_volumes
- import_tasks: resolvconf.yml
when:
- dns_mode != 'none'
@@ -146,6 +158,15 @@
- not is_atomic
tags: bootstrap-os
- name: Update package management cache (zypper) - SUSE
shell: zypper -n --gpg-auto-import-keys ref
register: make_cache_output
until: make_cache_output|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when:
- ansible_pkg_mgr == 'zypper'
tags: bootstrap-os
- name: Update package management cache (APT)
apt:
@@ -224,12 +245,6 @@
tags:
- bootstrap-os
- name: set default sysctl file path
set_fact:
sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
tags:
- bootstrap-os
- name: Stat sysctl file configuration
stat:
path: "{{sysctl_file_path}}"

View File

@@ -0,0 +1,28 @@
---
- name: "Pre-upgrade | check if old credential dir exists"
local_action:
module: stat
path: "{{ inventory_dir }}/../credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
register: old_credential_dir
become: no
- name: "Pre-upgrade | check if new credential dir exists"
local_action:
module: stat
path: "{{ inventory_dir }}/credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
register: new_credential_dir
become: no
when: old_credential_dir.stat.exists
- name: "Pre-upgrade | move data from old credential dir to new"
local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials
args:
creates: "{{ inventory_dir }}/credentials"
vars:
ansible_python_interpreter: "/usr/bin/env python"
become: no
when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists

View File

@@ -12,7 +12,7 @@
- name: Stop if unknown OS
assert:
that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS']
that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'CoreOS', 'Container Linux by CoreOS', 'openSUSE Leap', 'openSUSE Tumbleweed']
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if unknown network plugin
@@ -94,4 +94,4 @@
assert:
that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=')
when: kube_network_plugin == 'cilium'
ignore_errors: "{{ ignore_assert_errors }}"
ignore_errors: "{{ ignore_assert_errors }}"

View File

@@ -0,0 +1,4 @@
---
required_pkgs:
- device-mapper
- ebtables

View File

@@ -1,3 +1,4 @@
---
kube_cert_group: kube-cert
kube_vault_mount_path: kube
front_proxy_vault_mount_path: front-proxy

View File

@@ -72,6 +72,15 @@ else
openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
fi
# Front proxy client CA
if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
# Reuse existing front proxy CA
cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
else
openssl genrsa -out front-proxy-ca-key.pem 2048 > /dev/null 2>&1
openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days 36500 -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
fi
gen_key_and_cert() {
local name=$1
local subject=$2
@@ -80,6 +89,14 @@ gen_key_and_cert() {
openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
gen_key_and_cert_front_proxy() {
local name=$1
local subject=$2
openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
# Admins
if [ -n "$MASTERS" ]; then
@@ -105,7 +122,7 @@ if [ -n "$MASTERS" ]; then
# kube-controller-manager
gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
# metrics aggregator
gen_key_and_cert "front-proxy-client" "/CN=front-proxy-client"
gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
for host in $MASTERS; do
cn="${host%%.*}"

View File

@@ -48,8 +48,11 @@
'{{ kube_cert_dir }}/kube-scheduler-key.pem',
'{{ kube_cert_dir }}/kube-controller-manager.pem',
'{{ kube_cert_dir }}/kube-controller-manager-key.pem',
'{{ kube_cert_dir }}/front-proxy-ca.pem',
'{{ kube_cert_dir }}/front-proxy-ca-key.pem',
'{{ kube_cert_dir }}/front-proxy-client.pem',
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
'{{ kube_cert_dir }}/admin-{{ host }}.pem'
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
@@ -71,7 +74,9 @@
{% for cert in ['apiserver.pem', 'apiserver-key.pem',
'kube-scheduler.pem','kube-scheduler-key.pem',
'kube-controller-manager.pem','kube-controller-manager-key.pem',
'front-proxy-client.pem','front-proxy-client-key.pem'] -%}
'front-proxy-ca.pem','front-proxy-ca-key.pem',
'front-proxy-client.pem','front-proxy-client-key.pem',
'service-account-key.pem'] -%}
{% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
{% if not cert_file in existing_certs -%}
{%- set gen = True -%}

View File

@@ -73,6 +73,8 @@
'kube-scheduler-key.pem',
'kube-controller-manager.pem',
'kube-controller-manager-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
'service-account-key.pem',
@@ -85,6 +87,8 @@
'admin-{{ inventory_hostname }}-key.pem',
'apiserver.pem',
'apiserver-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
'service-account-key.pem',

View File

@@ -52,6 +52,11 @@
"{{ hostvars[host]['ip'] }}",
{%- endif -%}
{%- endfor -%}
{%- if supplementary_addresses_in_ssl_keys is defined -%}
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
"{{ ip_item }}",
{%- endfor -%}
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
issue_cert_path: "{{ item }}"
@@ -98,6 +103,8 @@
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "front-proxy-client"
issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
issue_cert_ca_filename: front-proxy-ca.pem
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
@@ -110,12 +117,17 @@
"{{ hostvars[host]['ip'] }}",
{%- endif -%}
{%- endfor -%}
{%- if supplementary_addresses_in_ssl_keys is defined -%}
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
"{{ ip_item }}",
{%- endfor -%}
{%- endif -%}
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
issue_cert_path: "{{ item }}"
issue_cert_role: front-proxy-client
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
issue_cert_mount_path: "{{ front_proxy_vault_mount_path }}"
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
notify: set secret_changed

View File

@@ -32,7 +32,7 @@
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_is_cert: true
sync_file_owner: kube
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem"]
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
- name: sync_kube_master_certs | Set facts for kube master components sync_file results
set_fact:
@@ -44,6 +44,18 @@
set_fact:
sync_file_results: []
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
vars:
sync_file: front-proxy-ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_owner: kube
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
set_fact:
sync_file_results: []
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
vars:
sync_file: "{{ item }}"

View File

@@ -8,6 +8,8 @@
/etc/pki/ca-trust/source/anchors/kube-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/kube-ca.pem
{%- elif ansible_os_family == "Suse" -%}
/etc/pki/trust/anchors/kube-ca.pem
{%- endif %}
tags:
- facts
@@ -19,9 +21,9 @@
remote_src: true
register: kube_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract

View File

@@ -5,7 +5,7 @@ bootstrap_os: none
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
kube_api_anonymous_auth: false
@@ -129,6 +129,10 @@ kube_apiserver_insecure_port: 8080
# Aggregator
kube_api_aggregator_routing: false
# Docker options
# Optionally do not run docker role
manage_docker: true
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
@@ -219,6 +223,10 @@ vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
# Local volume provisioner dirs
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue

View File

@@ -1,7 +1,7 @@
---
# Limits
weave_memory_limits: 400M
weave_cpu_limits: 30m
weave_cpu_limits: 300m
weave_memory_requests: 64M
weave_cpu_requests: 10m

View File

@@ -34,3 +34,13 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "RedHat"
- name: install rkt pkg on openSUSE
zypper:
name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
state: present
register: rkt_task_result
until: rkt_task_result|succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "Suse"

2
roles/rkt/vars/suse.yml Normal file
View File

@@ -0,0 +1,2 @@
---
rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"

View File

@@ -97,6 +97,11 @@ vault_ca_options:
format: pem
ttl: "{{ vault_max_lease_ttl }}"
exclude_cn_from_sans: true
front_proxy:
common_name: front-proxy
format: pem
ttl: "{{ vault_max_lease_ttl }}"
exclude_cn_from_sans: true
vault_client_headers:
Accept: "application/json"
@@ -164,11 +169,18 @@ vault_pki_mounts:
allow_any_name: true
enforce_hostnames: false
organization: "system:node-proxier"
front_proxy:
name: front-proxy
default_lease_ttl: "{{ vault_default_lease_ttl }}"
max_lease_ttl: "{{ vault_max_lease_ttl }}"
description: "Kubernetes Front Proxy CA"
cert_dir: "{{ vault_kube_cert_dir }}"
roles:
- name: front-proxy-client
group: k8s-cluster
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
password: "{{ lookup('password', inventory_dir + '/credentials/vault/front-proxy-client.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
organization: "system:front-proxy"
organization: "system:front-proxy"

View File

@@ -57,6 +57,7 @@
gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.etcd }}"
gen_ca_copy_group: "etcd"
when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed
- import_tasks: gen_vault_certs.yml

View File

@@ -6,8 +6,9 @@
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
create_mount_description: "{{ item.description }}"
create_mount_cert_dir: "{{ item.cert_dir }}"
create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name
create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name and item.name != vault_pki_mounts.front_proxy.name
with_items:
- "{{ vault_pki_mounts.vault }}"
- "{{ vault_pki_mounts.etcd }}"
- "{{ vault_pki_mounts.kube }}"
- "{{ vault_pki_mounts.front_proxy }}"

View File

@@ -32,6 +32,15 @@
gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
gen_ca_copy_group: "kube-master"
when: inventory_hostname in groups.vault
- include_tasks: ../shared/gen_ca.yml
vars:
gen_ca_cert_dir: "{{ vault_pki_mounts.front_proxy.cert_dir }}"
gen_ca_mount_path: "{{ vault_pki_mounts.front_proxy.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.front_proxy }}"
when: inventory_hostname in groups.vault
- include_tasks: ../shared/auth_backend.yml
@@ -46,6 +55,7 @@
- "{{ vault_pki_mounts.vault }}"
- "{{ vault_pki_mounts.etcd }}"
- "{{ vault_pki_mounts.kube }}"
- "{{ vault_pki_mounts.front_proxy }}"
loop_control:
loop_var: mount
when: inventory_hostname in groups.vault

View File

@@ -24,9 +24,12 @@
mode: 0644
when: vault_ca_gen.status == 200
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally"
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts"
copy:
content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}"
dest: "{{ gen_ca_cert_dir }}/ca-key.pem"
mode: 0640
when: vault_ca_gen.status == 200
delegate_to: "{{ item }}"
with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"

View File

@@ -6,6 +6,7 @@
# issue_cert_alt_name: Requested Subject Alternative Names, in a list.
# issue_cert_common_name: Common Name included in the cert
# issue_cert_copy_ca: Copy issuing CA cert needed
# issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem)
# issue_cert_dir_mode: Mode of the placed cert directory
# issue_cert_file_group: Group of the placed cert file and directory
# issue_cert_file_mode: Mode of the placed cert file
@@ -100,7 +101,7 @@
- name: issue_cert | Copy issuing CA cert
copy:
content: "{{ issue_cert_result['json']['data']['issuing_ca'] }}\n"
dest: "{{ issue_cert_path | dirname }}/ca.pem"
dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}"
group: "{{ issue_cert_file_group | d('root' )}}"
mode: "{{ issue_cert_file_mode | d('0644') }}"
owner: "{{ issue_cert_file_owner | d('root') }}"