diff --git a/Vagrantfile b/Vagrantfile index 9db4be3a1..536bbff2b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -3,7 +3,7 @@ require 'fileutils' -Vagrant.require_version ">= 1.9.0" +Vagrant.require_version ">= 2.0.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") @@ -135,12 +135,6 @@ Vagrant.configure("2") do |config| config.vm.network :private_network, ip: ip - # workaround for Vagrant 1.9.1 and centos vm - # https://github.com/hashicorp/vagrant/issues/8096 - if Vagrant::VERSION == "1.9.1" && $os == "centos" - config.vm.provision "shell", inline: "service network restart", run: "always" - end - # Disable swap for each vm config.vm.provision "shell", inline: "swapoff -a" @@ -164,7 +158,7 @@ Vagrant.configure("2") do |config| if File.exist?(File.join(File.dirname($inventory), "hosts")) ansible.inventory_path = $inventory end - ansible.sudo = true + ansible.become = true ansible.limit = "all" ansible.host_key_checking = false ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"] diff --git a/ansible.cfg b/ansible.cfg index d3102a6f4..6f381690e 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -13,3 +13,4 @@ callback_whitelist = profile_tasks roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles deprecation_warnings=False inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds +jinja2_extensions = jinja2.ext.do diff --git a/contrib/terraform/openstack/ansible_bastion_template.txt b/contrib/terraform/openstack/ansible_bastion_template.txt index cdf012066..a304b2c9d 100644 --- a/contrib/terraform/openstack/ansible_bastion_template.txt +++ b/contrib/terraform/openstack/ansible_bastion_template.txt @@ -1 +1 @@ -ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"' +ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf index a426202b9..e56a792c2 100644 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ b/contrib/terraform/openstack/modules/network/outputs.tf @@ -2,6 +2,6 @@ output "router_id" { value = "${openstack_networking_router_interface_v2.k8s.id}" } -output "network_id" { +output "subnet_id" { value = "${openstack_networking_subnet_v2.k8s.id}" } diff --git a/docs/vagrant.md b/docs/vagrant.md index 042e8137b..de47159fa 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -1,7 +1,7 @@ Vagrant Install ================= -Assuming you have Vagrant (1.9+) installed with virtualbox (it may work +Assuming you have Vagrant (2.0+) installed with virtualbox (it may work with vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `$ vagrant up`.
diff --git a/docs/vars.md b/docs/vars.md index 5ea76b0e5..a4ae65678 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -118,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st * *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. By default autodetection is used to match Docker configuration. +* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. + For example, labels can be set in the inventory as variables or more widely in group_vars. + *node_labels* must be defined as a dict: +``` +node_labels: + label1_name: label1_value + label2_name: label2_value +``` ##### Custom flags for Kube Components For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example: diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 3e208bdaf..031108767 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -6,7 +6,6 @@ kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" @@ -214,6 +213,10 @@ ingress_nginx_enabled: false # ingress_nginx_configmap_udp_services: # 53: "kube-system/kube-dns:53" +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" + # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index b6574fd27..831330175 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -91,7 +91,7 @@ - name: Start Resources kube: name: "{{item.item.name}}" - namespace: "{{system_namespace}}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml index 817de877b..0fa300989 100644 --- a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml +++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" subjects: - kind: ServiceAccount name: dnsmasq - namespace: "{{ system_namespace}}" + namespace: "kube-system" roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index 838471050..0fb6045e8 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: dnsmasq - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: k8s-app: dnsmasq kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml index bce8a232f..91e98feee 100644 --- a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml +++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml index 54dc0aa97..f00d3d3dd 100644 --- a/roles/dnsmasq/templates/dnsmasq-svc.yml +++ b/roles/dnsmasq/templates/dnsmasq-svc.yml @@ -6,7 +6,7 @@ metadata: kubernetes.io/cluster-service: 'true' k8s-app: dnsmasq name: dnsmasq - namespace: {{system_namespace}} + namespace: kube-system spec: ports: - port: 53 diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index aa10371f5..3ed3e9ce7 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -21,6 +21,10 @@ docker_dns_servers_strict: yes docker_container_storage_setup: false +# Used to override obsoletes=0 +yum_conf: /etc/yum.conf +docker_yum_conf: /etc/yum_docker.conf + # CentOS/RedHat docker-ce repo docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable' docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 80b917114..729397b44 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -30,6 +30,8 @@ tags: - facts +- import_tasks: pre-upgrade.yml + - name: ensure docker-ce repository public key is installed action: "{{ docker_repo_key_info.pkg_key }}" args: @@ -78,11 +80,27 @@ dest: "/etc/yum.repos.d/docker.repo" when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic +- name: Copy yum.conf for editing + copy: + src: "{{ yum_conf }}" + dest: "{{ docker_yum_conf }}" + remote_src: yes + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + +- name: Edit copy of yum.conf to set obsoletes=0 + lineinfile: + path: "{{ docker_yum_conf }}" + state: present + regexp: '^obsoletes=' + line: 'obsoletes=0' + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + - name: ensure docker packages are installed action: "{{ docker_package_info.pkg_mgr }}" args: pkg: "{{item.name}}" force: "{{item.force|default(omit)}}" + conf_file: "{{item.yum_conf|default(omit)}}" state: present register: docker_task_result until: docker_task_result|succeeded diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml new file mode 100644 index 000000000..9315da305 --- /dev/null +++ b/roles/docker/tasks/pre-upgrade.yml @@ -0,0 +1,20 @@ +--- +- name: Ensure old versions of Docker are not installed. | Debian + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-engine + when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + +- name: Ensure old versions of Docker are not installed. | RedHat + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-common + - docker-engine + - docker-selinux + when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) \ No newline at end of file diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 39ba211d8..cd53e284c 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -28,7 +28,9 @@ docker_package_info: pkg_mgr: yum pkgs: - name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" - name: "{{ docker_versioned_pkg[docker_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" docker_repo_key_info: pkg_key: '' diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 26f4c89e0..21b6bc72d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -70,8 +70,8 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" -hyperkube_image_repo: "quay.io/coreos/hyperkube" -hyperkube_image_tag: "{{ kube_version }}_coreos.0" +hyperkube_image_repo: "gcr.io/google-containers/hyperkube" +hyperkube_image_tag: "{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" @@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}" kibana_version: "v4.6.1" kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_tag: "{{ kibana_version }}" - helm_version: "v2.8.1" helm_image_repo: "lachlanevenson/k8s-helm" helm_image_tag: "{{ helm_version }}" @@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" +cert_manager_version: "v0.2.3" +cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" +cert_manager_controller_image_tag: "{{ cert_manager_version }}" +cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" +cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}" downloads: netcheck_server: @@ -140,18 +144,24 @@ downloads: repo: "{{ netcheck_server_img_repo }}" tag: "{{ netcheck_server_tag }}" sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + groups: + - k8s-cluster netcheck_agent: enabled: "{{ deploy_netchecker }}" container: true repo: "{{ netcheck_agent_img_repo }}" tag: "{{ netcheck_agent_tag }}" sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + groups: + - k8s-cluster etcd: enabled: true container: true repo: "{{ etcd_image_repo }}" tag: "{{ etcd_image_tag }}" sha256: "{{ etcd_digest_checksum|default(None) }}" + groups: + - etcd kubeadm: enabled: "{{ kubeadm_enabled }}" file: true @@ -163,6 +173,8 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - k8s-cluster istioctl: enabled: "{{ istio_enabled }}" file: true @@ -174,140 +186,186 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - kube-master hyperkube: enabled: true container: true repo: "{{ hyperkube_image_repo }}" tag: "{{ hyperkube_image_tag }}" sha256: "{{ hyperkube_digest_checksum|default(None) }}" + groups: + - k8s-cluster cilium: enabled: "{{ kube_network_plugin == 'cilium' }}" container: true repo: "{{ cilium_image_repo }}" tag: "{{ cilium_image_tag }}" sha256: "{{ cilium_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel: enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" container: true repo: "{{ flannel_image_repo }}" tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel_cni: enabled: "{{ kube_network_plugin == 'flannel' }}" container: true repo: "{{ flannel_cni_image_repo }}" tag: "{{ flannel_cni_image_tag }}" sha256: "{{ flannel_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calicoctl: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calicoctl_image_repo }}" tag: "{{ calicoctl_image_tag }}" sha256: "{{ calicoctl_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_node: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_node_image_repo }}" tag: "{{ calico_node_image_tag }}" sha256: "{{ calico_node_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_cni: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_cni_image_repo }}" tag: "{{ calico_cni_image_tag }}" sha256: "{{ calico_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_policy: enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_policy_image_repo }}" tag: "{{ calico_policy_image_tag }}" sha256: "{{ calico_policy_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_rr: enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}" container: true repo: "{{ calico_rr_image_repo }}" tag: "{{ calico_rr_image_tag }}" sha256: "{{ calico_rr_digest_checksum|default(None) }}" + groups: + - calico-rr weave_kube: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_kube_image_repo }}" tag: "{{ weave_kube_image_tag }}" sha256: "{{ weave_kube_digest_checksum|default(None) }}" + groups: + - k8s-cluster weave_npc: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_npc_image_repo }}" tag: "{{ weave_npc_image_tag }}" sha256: "{{ weave_npc_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_image_repo }}" tag: "{{ contiv_image_tag }}" sha256: "{{ contiv_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv_auth_proxy: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_auth_proxy_image_repo }}" tag: "{{ contiv_auth_proxy_image_tag }}" sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}" + groups: + - k8s-cluster pod_infra: enabled: true container: true repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" + groups: + - k8s-cluster install_socat: enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}" container: true repo: "{{ install_socat_image_repo }}" tag: "{{ install_socat_image_tag }}" sha256: "{{ install_socat_digest_checksum|default(None) }}" + groups: + - k8s-cluster nginx: - enabled: true + enabled: "{{ loadbalancer_apiserver_localhost }}" container: true repo: "{{ nginx_image_repo }}" tag: "{{ nginx_image_tag }}" sha256: "{{ nginx_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq: enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}" container: true repo: "{{ dnsmasq_image_repo }}" tag: "{{ dnsmasq_image_tag }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}" + groups: + - kube-node kubedns: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubedns_image_repo }}" tag: "{{ kubedns_image_tag }}" sha256: "{{ kubedns_digest_checksum|default(None) }}" + groups: + - kube-node coredns: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" container: true repo: "{{ coredns_image_repo }}" tag: "{{ coredns_image_tag }}" sha256: "{{ coredns_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_nanny: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_nanny_image_repo }}" tag: "{{ dnsmasq_nanny_image_tag }}" sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_sidecar: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_sidecar_image_repo }}" tag: "{{ dnsmasq_sidecar_image_tag }}" sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}" + groups: + - kube-node kubednsautoscaler: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubednsautoscaler_image_repo }}" tag: "{{ kubednsautoscaler_image_tag }}" sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}" + groups: + - kube-node testbox: - enabled: true + enabled: false container: true repo: "{{ test_image_repo }}" tag: "{{ test_image_tag }}" @@ -318,30 +376,40 @@ downloads: repo: "{{ elasticsearch_image_repo }}" tag: "{{ elasticsearch_image_tag }}" sha256: "{{ elasticsearch_digest_checksum|default(None) }}" + groups: + - kube-node fluentd: enabled: "{{ efk_enabled }}" container: true repo: "{{ fluentd_image_repo }}" tag: "{{ fluentd_image_tag }}" sha256: "{{ fluentd_digest_checksum|default(None) }}" + groups: + - kube-node kibana: enabled: "{{ efk_enabled }}" container: true repo: "{{ kibana_image_repo }}" tag: "{{ kibana_image_tag }}" sha256: "{{ kibana_digest_checksum|default(None) }}" + groups: + - kube-node helm: enabled: "{{ helm_enabled }}" container: true repo: "{{ helm_image_repo }}" tag: "{{ helm_image_tag }}" sha256: "{{ helm_digest_checksum|default(None) }}" + groups: + - kube-node tiller: enabled: "{{ helm_enabled }}" container: true repo: "{{ tiller_image_repo }}" tag: "{{ tiller_image_tag }}" sha256: "{{ tiller_digest_checksum|default(None) }}" + groups: + - kube-node vault: enabled: "{{ cert_management == 'vault' }}" container: "{{ vault_deployment_type != 'host' }}" @@ -356,6 +424,24 @@ downloads: unarchive: true url: "{{ vault_download_url }}" version: "{{ vault_version }}" + groups: + - vault + cert_manager_controller: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_controller_image_repo }}" + tag: "{{ cert_manager_controller_image_tag }}" + sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + groups: + - kube-node + cert_manager_ingress_shim: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_ingress_shim_image_repo }}" + tag: "{{ cert_manager_ingress_shim_image_tag }}" + sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}" + groups: + - kube-node download_defaults: container: false diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index bbf7cec85..a5659619c 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -23,6 +24,7 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length delegate_to: "{{ download_delegate }}" delegate_facts: yes run_once: yes @@ -38,3 +40,4 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index 664fa4728..832fec41e 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -13,6 +13,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Download item get_url: @@ -28,6 +29,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Extract archives unarchive: @@ -40,3 +42,4 @@ - download.enabled - download.file - download.unarchive|default(False) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml index a15f78cde..1ca84ad67 100644 --- a/roles/download/tasks/sync_container.yml +++ b/roles/download/tasks/sync_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -17,6 +18,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -27,6 +29,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length - name: "container_download | Update the 'container_changed' fact" set_fact: @@ -36,6 +39,7 @@ - download.container - download_run_once - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length run_once: "{{ download_run_once }}" tags: - facts @@ -53,6 +57,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -68,6 +73,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") - (container_changed or not img.stat.exists) + - group_names | intersect(download.groups) | length - name: container_download | copy container images to ansible host synchronize: @@ -87,6 +93,7 @@ - inventory_hostname == download_delegate - download_delegate != "localhost" - saved.changed + - group_names | intersect(download.groups) | length - name: container_download | upload container images to nodes synchronize: @@ -108,6 +115,7 @@ - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade @@ -120,6 +128,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 5f16db1d1..6c13810c5 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -12,9 +12,9 @@ etcd_cert_group: root # Note: This does not set up DNS entries. It simply adds the following DNS # entries to the certificate etcd_cert_alt_names: - - "etcd.{{ system_namespace }}.svc.{{ dns_domain }}" - - "etcd.{{ system_namespace }}.svc" - - "etcd.{{ system_namespace }}" + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" - "etcd" etcd_script_dir: "{{ bin_dir }}/etcd-scripts" @@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts" etcd_heartbeat_interval: "250" etcd_election_timeout: "5000" -#etcd_snapshot_count: "10000" +# etcd_snapshot_count: "10000" # Parameters for ionice # -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle. # -n takes an integer between 0 (highest priority) and 7 (lowest priority) -#etcd_ionice: "-c2 -n0" +# etcd_ionice: "-c2 -n0" etcd_metrics: "basic" diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 247b2ae00..a0a80e108 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -48,7 +48,7 @@ snapshot save {{ etcd_backup_directory }}/snapshot.db environment: ETCDCTL_API: 3 - ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" retries: 3 delay: "{{ retry_stagger | random + 3 }}" diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index d7d3920c6..d39ba62d4 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if member is in etcd-events cluster shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}" @@ -22,8 +22,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Copy etcd.service systemd file template: diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index 104ef22df..5a7061880 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_member.yml b/roles/etcd/tasks/join_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_member.yml +++ b/roles/etcd/tasks/join_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index bb299126b..a64d9b097 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -29,13 +29,13 @@ tags: - upgrade -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: configure.yml +- include_tasks: configure.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup - name: Restart etcd if certs changed @@ -68,8 +68,8 @@ # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing` # state insted of `new`. -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml index 68e738031..d0202943c 100644 --- a/roles/etcd/tasks/set_cluster_health.yml +++ b/roles/etcd/tasks/set_cluster_health.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if etcd-events cluster is healthy shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'" @@ -22,5 +22,5 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2 index 48327f0bf..2f4f7e262 100644 --- a/roles/etcd/templates/openssl.conf.j2 +++ b/roles/etcd/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name @@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer [alt_names] DNS.1 = localhost {% for host in groups['etcd'] %} -DNS.{{ 1 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% if loadbalancer_apiserver is defined %} -{% set idx = groups['etcd'] | length | int + 2 %} -DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} -{% set idx = groups['etcd'] | length | int + 3 %} {% for etcd_alt_name in etcd_cert_alt_names %} -DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }} +DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} {% endfor %} {% for host in groups['etcd'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} {% endfor %} -{% set idx = groups['etcd'] | length | int * 2 + 1 %} -IP.{{ idx }} = 127.0.0.1 +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 5f8356cf9..e77f1e799 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -2,7 +2,7 @@ - name: Kubernetes Apps | Delete old CoreDNS resources kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -16,7 +16,7 @@ - name: Kubernetes Apps | Delete kubeadm CoreDNS kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "deploy" state: absent @@ -28,7 +28,7 @@ - name: Kubernetes Apps | Delete old KubeDNS resources kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -41,7 +41,7 @@ - name: Kubernetes Apps | Delete kubeadm KubeDNS kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index ce56bd5d1..4c9ad5c74 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -22,7 +22,7 @@ - name: Kubernetes Apps | Start dashboard kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 55d417982..ceb667f69 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -37,7 +37,7 @@ - name: Kubernetes Apps | Start Resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" @@ -50,6 +50,10 @@ - dns_mode != 'none' - inventory_hostname == groups['kube-master'][0] - not item|skipped + register: resource_result + until: resource_result|succeeded + retries: 4 + delay: 5 tags: - dnsmasq diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 index 6c49d047f..89becd5b4 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -15,4 +15,4 @@ roleRef: subjects: - kind: ServiceAccount name: coredns - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 index 983d2579f..360480c1e 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 index 30128d566..5cba6f1f0 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 index db5682354..64d9c4dae 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 index c5b76b0b5..193de10eb 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 index b1ba1481d..5f0a40cb3 100644 --- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -25,7 +25,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs - namespace: {{ system_namespace }} + namespace: kube-system type: Opaque --- @@ -37,7 +37,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Dashboard Role & Role Binding ------------------- # @@ -46,7 +46,7 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system rules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [""] @@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -89,7 +89,7 @@ roleRef: subjects: - kind: ServiceAccount name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Gross Hack For anonymous auth through api proxy ------------------- # @@ -103,7 +103,7 @@ rules: resources: ["services/proxy"] resourceNames: ["https:kubernetes-dashboard:"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"] +- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] --- @@ -128,7 +128,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 @@ -200,7 +200,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: ports: - port: 443 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 index f80d3d90c..e29ed4dac 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 @@ -17,7 +17,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: ["nodes"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 index eb76f2d4e..3b11c6b9f 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 @@ -17,11 +17,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-proportional-autoscaler diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 index 542ae86ce..4c440f653 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 @@ -17,4 +17,4 @@ kind: ServiceAccount apiVersion: v1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index df92ee615..d7c30eceb 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -40,7 +40,7 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace={{ system_namespace }} + - --namespace=kube-system - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kube-dns diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 682bdf491..cfce65f0e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kube-dns - namespace: "{{system_namespace}}" + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 index f399fd6f4..296a3a938 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 index 1c4710db1..6bc5f9240 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index c576586a2..fefa7caeb 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -126,32 +126,3 @@ - kube_version | version_compare('v1.9.3', '<=') - inventory_hostname == groups['kube-master'][0] tags: vsphere - -# This is not a cluster role, but should be run after kubeconfig is set on master -- name: Write kube system namespace manifest - template: - src: namespace.j2 - dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Check if kube system namespace exists - command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}" - register: 'kubesystem' - changed_when: False - failed_when: False - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Create kube system namespace - command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml" - retries: 4 - delay: "{{ retry_stagger | random + 3 }}" - register: create_system_ns - until: create_system_ns.rc == 0 - changed_when: False - when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0 - tags: - - apps diff --git a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 index 9bdf201a2..f2e115a6a 100644 --- a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 +++ b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: "{{system_namespace}}" + name: "kube-system" diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index 8abbe2317..b6055132b 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -10,7 +10,7 @@ when: rbac_enabled - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system" with_items: - "efk-sa.yml" - "efk-clusterrolebinding.yml" @@ -24,7 +24,7 @@ register: es_deployment_manifest - name: "ElasticSearch | Create ES deployment" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system" run_once: true when: es_deployment_manifest.changed @@ -35,6 +35,6 @@ register: es_service_manifest - name: "ElasticSearch | Create ES service" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system" run_once: true when: es_service_manifest.changed diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index a5aba61ae..dd5b9b630 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: efk - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index e79e26be8..75d75f650 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 index 6d5382e09..ee2eb8b21 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: elasticsearch-logging-v1 - namespace: "{{ system_namespace }}" + namespace: kube-system labels: k8s-app: elasticsearch-logging version: "{{ elasticsearch_image_tag }}" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 index b7558f9d9..789ecb215 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: elasticsearch-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml index c91bf6827..f444c79b6 100644 --- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml @@ -17,6 +17,6 @@ register: fluentd_ds_manifest - name: "Fluentd | Create fluentd daemonset" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system" run_once: true when: fluentd_ds_manifest.changed diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 index 8a8ebbcec..b7de44dc0 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: fluentd-config - namespace: "{{ system_namespace }}" + namespace: "kube-system" data: {{ fluentd_config_file }}: | # This configuration file for Fluentd / td-agent is used diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 960a79e89..f23a8851c 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: "fluentd-es-v{{ fluentd_version }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index ea8568286..424b313b8 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -10,7 +10,7 @@ filename: "{{kube_config_dir}}/kibana-deployment.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "deployment" state: "latest" with_items: "{{ kibana_deployment_manifest.changed }}" @@ -27,7 +27,7 @@ filename: "{{kube_config_dir}}/kibana-service.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "svc" state: "latest" with_items: "{{ kibana_service_manifest.changed }}" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 index c48413bd0..4fdf54c04 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 index 241b896f0..5cff3c628 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml index 9a3bca1ef..3b80ecbb2 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner cephfs_provisioner_image_tag: 92295a30 -cephfs_provisioner_namespace: "{{ system_namespace }}" +cephfs_provisioner_namespace: "kube-system" cephfs_provisioner_cluster: ceph cephfs_provisioner_monitors: [] cephfs_provisioner_admin_id: admin diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index dd2e8a147..ea5dcb079 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner local_volume_provisioner_image_tag: v2.0.0 -local_volume_provisioner_namespace: "{{ system_namespace }}" +local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks local_volume_provisioner_storage_class: local-storage diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 06e97aff2..e7b387944 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -18,7 +18,7 @@ - name: Helm | Apply Helm Manifests (RBAC) kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -28,7 +28,7 @@ - name: Helm | Install/upgrade helm command: > - {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }} + {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system {% if helm_skip_refresh %} --skip-refresh{% endif %} {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %} {% if rbac_enabled %} --service-account=tiller{% endif %} diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml index 0c8db4c78..00694181e 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: tiller - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml index 26e575fb6..606dbb147 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md new file mode 100644 index 000000000..b0f008676 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md @@ -0,0 +1,17 @@ +Deployment files +================ + +This directory contains example deployment manifests for cert-manager that can +be used in place of the official Helm chart. + +This is useful if you are deploying cert-manager into an environment without +Helm, or want to inspect a 'bare minimum' deployment. + +Where do these come from? +------------------------- + +The manifests in these subdirectories are generated from the Helm chart +automatically. The `values.yaml` files used to configure cert-manager can be +found in [`hack/deploy`](../../hack/deploy/). + +They are automatically generated by running `./hack/update-deploy-gen.sh`. diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml new file mode 100644 index 000000000..bc6bceb15 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -0,0 +1,6 @@ +--- +cert_manager_namespace: "cert-manager" +cert_manager_cpu_requests: 10m +cert_manager_cpu_limits: 30m +cert_manager_memory_requests: 32Mi +cert_manager_memory_limits: 200Mi diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml new file mode 100644 index 000000000..eeb29da2d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- name: Cert Manager | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: directory + owner: root + group: root + mode: 0755 + +- name: Cert Manager | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + with_items: + - { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns } + - { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa } + - { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole } + - { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding } + - { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd } + - { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd } + - { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd } + - { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy } + register: cert_manager_manifests + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Cert Manager | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ cert_manager_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" + state: "latest" + with_items: "{{ cert_manager_manifests.results }}" + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 new file mode 100644 index 000000000..48d0c5b49 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 @@ -0,0 +1,21 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + scope: Namespaced + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 new file mode 100644 index 000000000..86601e098 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: ClusterIssuer + plural: clusterissuers + scope: Cluster diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 new file mode 100644 index 000000000..9d36de5cb --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "issuers", "clusterissuers"] + verbs: ["*"] + - apiGroups: [""] + # TODO: remove endpoints once 0.4 is released. We include it here in case + # users use the 'master' version of the Helm chart with a 0.2.x release of + # cert-manager that still performs leader election with Endpoint resources. + # We advise users don't do this, but some will anyway and this will reduce + # friction. + resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"] + verbs: ["*"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["*"] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..d0e481c6c --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 new file mode 100644 index 000000000..ef66bef05 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: cert-manager + release: cert-manager + annotations: + spec: + serviceAccountName: cert-manager + containers: + - name: cert-manager + image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --cluster-resource-namespace=$(POD_NAMESPACE) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + + - name: ingress-shim + image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 new file mode 100644 index 000000000..7e344d9f9 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: issuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: Issuer + plural: issuers + scope: Namespaced diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 new file mode 100644 index 000000000..7cf3a282d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cert_manager_namespace }} + labels: + name: {{ cert_manager_namespace }} diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 new file mode 100644 index 000000000..ccdd5f430 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index b88bb9d6f..e65a440b0 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -20,6 +20,9 @@ spec: labels: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' spec: {% if ingress_nginx_host_network %} hostNetwork: true @@ -78,3 +81,4 @@ spec: {% if rbac_enabled %} serviceAccountName: ingress-nginx {% endif %} + diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml index da2e03ecc..617e9d9a7 100644 --- a/roles/kubernetes-apps/ingress_controller/meta/main.yml +++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -6,3 +6,10 @@ dependencies: - apps - ingress-nginx - ingress-controller + + - role: kubernetes-apps/ingress_controller/cert_manager + when: cert_manager_enabled + tags: + - apps + - cert-manager + - ingress-controller diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index f17e45c7a..4c8295c1e 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -2,7 +2,7 @@ - name: Start Calico resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index cbe4f0ac7..3640fe762 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -2,7 +2,7 @@ - name: Canal | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml index 2359fe2d4..5d90bdb01 100755 --- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml @@ -2,7 +2,7 @@ - name: Cilium | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -11,7 +11,7 @@ when: inventory_hostname == groups['kube-master'][0] and not item|skipped - name: Cilium | Wait for pods to run - command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" + command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml index 330acc1cd..5289296dc 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml @@ -3,7 +3,7 @@ - name: Contiv | Create Kubernetes resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ contiv_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index 09603a794..bdf954bf9 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Flannel | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index 66d900d55..53ad953b5 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -5,7 +5,7 @@ kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/weave-net.yml" resource: "ds" - namespace: "{{system_namespace}}" + namespace: "kube-system" state: "latest" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index ba1162799..62e929f41 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -12,7 +12,7 @@ name: calico-policy-controller kubectl: "{{bin_dir}}/kubectl" resource: rs - namespace: "{{ system_namespace }}" + namespace: "kube-system" state: absent run_once: true @@ -32,7 +32,7 @@ - name: Start of Calico kube controllers kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 index 7e1311b92..d7083e3e6 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -2,7 +2,7 @@ apiVersion: apps/v1beta2 kind: Deployment metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-kube-controllers kubernetes.io/cluster-service: "true" @@ -15,7 +15,7 @@ spec: template: metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" k8s-app: calico-kube-controllers diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 index 82c2f3e44..d05e986a4 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 index 38853a413..2e5118481 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 index bf8958976..e42e89d18 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml index 93d1cfa2a..a626435d5 100644 --- a/roles/kubernetes-apps/registry/defaults/main.yml +++ b/roles/kubernetes-apps/registry/defaults/main.yml @@ -4,6 +4,6 @@ registry_image_tag: 2.6 registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy registry_proxy_image_tag: 0.4 -registry_namespace: "{{ system_namespace }}" +registry_namespace: "kube-system" registry_storage_class: "" registry_disk_size: "10Gi" diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 52101ae16..3884a3a65 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -44,5 +44,5 @@ when: needs_rotation - name: Rotate Tokens | Delete pods in system namespace - command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all" + command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all" when: needs_rotation diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 303c1a88a..6325bb31c 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -96,4 +96,5 @@ volume_cross_zone_attachment: false ## Encrypting Secret Data at Rest kube_encrypt_secret_data: false kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" -kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm +# Must be either: aescbc, secretbox or aesgcm +kube_encryption_algorithm: "aescbc" diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml index a9f938318..58eaaa66f 100644 --- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml +++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml @@ -9,4 +9,6 @@ - {src: apiserver-key.pem, dest: apiserver.key} - {src: ca.pem, dest: ca.crt} - {src: ca-key.pem, dest: ca.key} + - {src: service-account-key.pem, dest: sa.pub} + - {src: service-account-key.pem, dest: sa.key} register: kubeadm_copy_old_certs diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 3a9fe6417..56e57b015 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -30,4 +30,7 @@ with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: kube_apiserver_manifest_replaced.changed - run_once: true + register: remove_master_container + retries: 4 + until: remove_master_container.rc == 0 + delay: 5 \ No newline at end of file diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index 844421d32..0eccb4918 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -38,7 +38,7 @@ apiServerExtraArgs: apiserver-count: "{{ kube_apiserver_count }}" {% if kube_version | version_compare('v1.9', '>=') %} endpoint-reconciler-type: lease -{% endif %} +{% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" {% if kube_basic_auth|default(true) %} @@ -90,3 +90,6 @@ apiServerCertSANs: {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" +{% if kube_override_hostname|default('') %} +nodeName: {{ kube_override_hostname }} +{% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 350a27a18..687ca415d 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-apiserver - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-apiserver kubespray: v2 @@ -63,7 +63,7 @@ spec: {% if kube_token_auth|default(true) %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv {% endif %} - - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} - --oidc-issuer-url={{ kube_oidc_url }} - --oidc-client-id={{ kube_oidc_client_id }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 2b4282a2e..012372496 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-controller-manager - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-controller-manager annotations: @@ -29,7 +29,7 @@ spec: - controller-manager - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index b13fc7fa3..a4023365e 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-scheduler annotations: diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml deleted file mode 100644 index a5eba4f2b..000000000 --- a/roles/kubernetes/master/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -namespace_kubesystem: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{system_namespace}}" diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 4d5fa5df5..78e6d92d6 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -134,6 +134,19 @@ tags: - kube-proxy +- name: Write cloud-config + template: + src: "{{ cloud_provider }}-cloud-config.j2" + dest: "{{ kube_config_dir }}/cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + notify: restart kubelet + tags: + - cloud-provider + # reload-systemd - meta: flush_handlers diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 08fe1644b..cd48fca9c 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -81,18 +81,26 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {# Kubelet node labels #} +{% set role_node_labels = [] %} {% if inventory_hostname in groups['kube-master'] %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/master=true') %} {% if not standalone_kubelet|bool %} -{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% elif inventory_hostname in groups['kube-ingress']|default([]) %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% else %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} +{% set inventory_node_labels = [] %} +{% if node_labels is defined %} +{% for labelname, labelvalue in node_labels.iteritems() %} +{% do inventory_node_labels.append(labelname + '=' + labelvalue) %} +{% endfor %} +{% endif %} +{% set all_node_labels = role_node_labels + inventory_node_labels %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 7c8e0062d..18e51069f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-proxy annotations: @@ -48,7 +48,6 @@ spec: {% elif kube_proxy_mode == 'ipvs' %} - --masquerade-all - --feature-gates=SupportIPVSProxyMode=true - - --proxy-mode=ipvs - --ipvs-min-sync-period=5s - --ipvs-sync-period=5s - --ipvs-scheduler=rr diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index 2d566cad1..a1e9a7815 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: nginx-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-nginx spec: diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index f23040751..aca0c9606 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -256,19 +256,6 @@ tags: - bootstrap-os -- name: Write cloud-config - template: - src: "{{ cloud_provider }}-cloud-config.j2" - dest: "{{ kube_config_dir }}/cloud_config" - group: "{{ kube_cert_group }}" - mode: 0640 - when: - - inventory_hostname in groups['k8s-cluster'] - - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] - tags: - - cloud-provider - - import_tasks: etchosts.yml tags: - bootstrap-os diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 724c6f369..1c34fc69d 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -82,6 +82,17 @@ gen_key_and_cert() { # Admins if [ -n "$MASTERS" ]; then + + # service-account + # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions + if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem + fi + # Generate dedicated service account signing key if one doesn't exist + if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + openssl genrsa -out service-account-key.pem 2048 > /dev/null 2>&1 + fi + # kube-apiserver # Generate only if we don't have existing ca and apiserver certs if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 627889771..4780b14d6 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -105,9 +105,9 @@ {%- set certs = {'sync': False} -%} {% if gen_node_certs[inventory_hostname] or (not kubecert_node.results[0].stat.exists|default(False)) or - (not kubecert_node.results[10].stat.exists|default(False)) or - (not kubecert_node.results[7].stat.exists|default(False)) or - (kubecert_node.results[10].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[10].stat.path)|map(attribute="checksum")|first|default('')) -%} + (not kubecert_node.results[12].stat.exists|default(False)) or + (not kubecert_node.results[8].stat.exists|default(False)) or + (kubecert_node.results[12].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[12].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 011575358..c39f606ad 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -75,6 +75,7 @@ 'kube-controller-manager-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', @@ -86,6 +87,7 @@ 'apiserver-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', 'kube-scheduler.pem', 'kube-scheduler-key.pem', 'kube-controller-manager.pem', diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index adc875ba6..38902aeef 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] @@ -13,31 +13,30 @@ DNS.3 = kubernetes.default.svc DNS.4 = kubernetes.default.svc.{{ dns_domain }} DNS.5 = localhost {% for host in groups['kube-master'] %} -DNS.{{ 5 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% set idns = groups['kube-master'] | length | int + 5 %} -{% if loadbalancer_apiserver is defined %} -{% set idns = idns + 1 %} -DNS.{{ idns | string }} = {{ apiserver_loadbalancer_domain_name }} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} {% for host in groups['kube-master'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -{% endfor %} -{% set idx = groups['kube-master'] | length | int * 2 + 1 %} -IP.{{ idx }} = {{ kube_apiserver_ip }} -{% if loadbalancer_apiserver is defined %} -IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }} -{% set idx = idx + 1 %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% if kube_apiserver_ip is defined %} +IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }} +{% endif %} +{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %} +IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }} {% endif %} -IP.{{ idx + 1 }} = 127.0.0.1 {% if supplementary_addresses_in_ssl_keys is defined %} -{% set is = idx + 1 %} {% for addr in supplementary_addresses_in_ssl_keys %} {% if addr | ipaddr %} -IP.{{ is + loop.index }} = {{ addr }} +IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }} {% else %} -DNS.{{ idns + loop.index }} = {{ addr }} +DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }} {% endif %} {% endfor %} {% endif %} +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4828de6af..d6217d654 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -61,7 +61,6 @@ dns_domain: "{{ cluster_name }}" kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" @@ -175,6 +174,7 @@ local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') } persistent_volumes_enabled: false cephfs_provisioner_enabled: false ingress_nginx_enabled: false +cert_manager_enabled: false ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) # openstack_blockstorage_version: "v1/v2/auto (default)" @@ -241,6 +241,7 @@ weave_peers: uninitialized ## Set no_proxy to all assigned cluster IPs and hostnames no_proxy: >- + {%- if http_proxy is defined or https_proxy is defined %} {%- if loadbalancer_apiserver is defined -%} {{ apiserver_loadbalancer_domain_name| default('') }}, {{ loadbalancer_apiserver.address | default('') }}, @@ -254,11 +255,12 @@ no_proxy: >- {{ item }},{{ item }}.{{ dns_domain }}, {%- endfor -%} 127.0.0.1,localhost + {%- endif %} proxy_env: http_proxy: "{{ http_proxy| default ('') }}" https_proxy: "{{ https_proxy| default ('') }}" - no_proxy: "{{ no_proxy }}" + no_proxy: "{{ no_proxy| default ('') }}" # Vars for pointing to kubernetes api endpoints is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 1b0cd0421..857ebd11a 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -50,4 +50,4 @@ rbac_resources: # * can-reach=DESTINATION # * interface=INTERFACE-REGEX # see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods -#calico_ip_auto_method: "interface=eth.*" +# calico_ip_auto_method: "interface=eth.*" diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index 92d2f1f0a..3be65deaa 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: calico-config - namespace: {{ system_namespace }} + namespace: kube-system data: etcd_endpoints: "{{ etcd_access_addresses }}" etcd_ca: "/calico-secrets/ca_cert.crt" diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 index 47d626659..cef8331f3 100644 --- a/roles/network_plugin/calico/templates/calico-cr.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 index 2e132a0dc..1b4e8fe00 100644 --- a/roles/network_plugin/calico/templates/calico-crb.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 index 5cce29793..68b1c286f 100644 --- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 6ec3cd20b..849ea0afb 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -6,7 +6,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-node spec: diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 index e3b048c64..2e92b7b2b 100644 --- a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 index e1c1f5050..016e5193e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 index 3b00017b1..097b1538e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 index d5b9a6e97..aa168d15c 100644 --- a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: canal - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 index d63bf99b0..8535360a1 100644 --- a/roles/network_plugin/canal/templates/canal-node.yaml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: canal-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: canal-node spec: diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 index a96bb8531..c5051e2ca 100755 --- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: cilium-config - namespace: {{ system_namespace }} + namespace: kube-system data: # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you uncomment the ca-file line and add the respective diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 index dcfe4d471..04d603d57 100755 --- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 @@ -10,6 +10,6 @@ roleRef: subjects: - kind: ServiceAccount name: cilium - namespace: {{ system_namespace }} + namespace: kube-system - kind: Group name: system:nodes diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 index 3d877a5cb..8eaa24f32 100755 --- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system spec: template: metadata: diff --git a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 index d6ef2a431..c03ac59b4 100755 --- a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 @@ -3,4 +3,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 index 140379b13..3ccaffaf8 100644 --- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 index 0505cd1f1..249d9d88e 100644 --- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 @@ -5,7 +5,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: contiv-config - namespace: {{ system_namespace }} + namespace: kube-system data: # The location of your cluster store. This is set to the # avdertise-client value below from the contiv-etcd service. diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 index a9690cc2f..75946d821 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd-proxy spec: diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 index 8060f4c01..a6e9121d4 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd spec: diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 index 82ca00437..6ccd4f9b4 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 index 74c5e3145..73d636775 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 index 0c1bfb3e5..758ea4493 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 index 56be2d93d..d41259ec1 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 index c26e094ed..af4c6e584 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 index 0c989008a..6cac217fc 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 index edfac8bb3..8d00ec8cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 index 9c2c0a036..2a7bf71cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 @@ -5,7 +5,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netplugin spec: diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 index aafe2a0f5..6f5c9a211 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -41,4 +41,4 @@ roleRef: subjects: - kind: ServiceAccount name: flannel - namespace: "{{system_namespace}}" \ No newline at end of file + namespace: "kube-system" \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index bb2a6a7f8..7ecb21ad0 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -3,7 +3,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node app: flannel @@ -41,7 +41,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: kube-flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node k8s-app: flannel diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 699ba3128..9a7da7377 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -8,14 +8,14 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -41,7 +41,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: weave-net @@ -49,14 +49,14 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -79,7 +79,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: Role name: weave-net @@ -87,7 +87,7 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: extensions/v1beta1 kind: DaemonSet metadata: @@ -95,7 +95,7 @@ items: labels: name: weave-net version: v{{ weave_version }} - namespace: {{ system_namespace }} + namespace: kube-system spec: minReadySeconds: 5 template: diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 9a3e83035..8e5ad08a0 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -86,7 +86,7 @@ vault_ca_options: format: pem ttl: "{{ vault_max_lease_ttl }}" exclude_cn_from_sans: true - alt_names: "vault.{{ system_namespace }}.svc.{{ dns_domain }},vault.{{ system_namespace }}.svc,vault.{{ system_namespace }},vault" + alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault" etcd: common_name: etcd format: pem diff --git a/roles/vault/tasks/cluster/systemd.yml b/roles/vault/tasks/cluster/systemd.yml index 8df52f982..f7139d336 100644 --- a/roles/vault/tasks/cluster/systemd.yml +++ b/roles/vault/tasks/cluster/systemd.yml @@ -55,3 +55,4 @@ register: vault_health_check until: vault_health_check|succeeded retries: 10 + delay: "{{ retry_stagger | random + 3 }}" diff --git a/tests/ansible.cfg b/tests/ansible.cfg index 9e734403e..9c4057529 100644 --- a/tests/ansible.cfg +++ b/tests/ansible.cfg @@ -10,3 +10,4 @@ fact_caching_connection = /tmp stdout_callback = skippy library = ./library:../library callback_whitelist = profile_tasks +jinja2_extensions = jinja2.ext.do diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 467bee2d0..c12092011 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -18,3 +18,5 @@ cloud_provider: gce kube_encrypt_secret_data: true prometheus_operator_enabled: true k8s_metrics_enabled: true +ingress_nginx_enabled: true +cert_manager_enabled: true