From af8cc4dc4a9d39c84c11606388541d4ba3dcda74 Mon Sep 17 00:00:00 2001 From: TAKAHASHI Yuto Date: Mon, 8 May 2017 22:55:34 +0900 Subject: [PATCH 01/26] Typo --- library/kube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/kube.py b/library/kube.py index 2922c6212..fdc783fff 100644 --- a/library/kube.py +++ b/library/kube.py @@ -66,7 +66,7 @@ options: description: - present handles checking existence or creating if definition file provided, absent handles deleting resource(s) based on other options, - latest handles creating ore updating based on existence, + latest handles creating or updating based on existence, reloaded handles updating resource(s) definition using definition file, stopped handles stopping resource(s) based on other options. requirements: From 3e97299a46dbf2afccbb6e8281d62fbc81f948e6 Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Fri, 9 Jun 2017 17:19:28 -0400 Subject: [PATCH 02/26] Sync folders on the vagrant machine --- Vagrantfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Vagrantfile b/Vagrantfile index a2c2c1c8f..f0fd92ce4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -100,6 +100,10 @@ Vagrant.configure("2") do |config| end end + $shared_folders.each do |src, dst| + config.vm.synced_folder src, dst + end + config.vm.provider :virtualbox do |vb| vb.gui = $vm_gui vb.memory = $vm_memory From 266ca9318d9ad735b0ac7ad95362b548caa57a54 Mon Sep 17 00:00:00 2001 From: Gregory Storme Date: Tue, 6 Jun 2017 18:36:04 +0200 Subject: [PATCH 03/26] Use the kube_apiserver_insecure_port variable instead of static 8080 --- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- roles/kubernetes/master/handlers/main.yml | 2 +- .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 9ec3b7ddc..4e7236df6 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver uri: - url: http://localhost:8080/healthz + url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz register: result until: result.status == 200 retries: 10 diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 94cec7d1b..e408ce04e 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -39,7 +39,7 @@ - name: Master | wait for the apiserver to be running uri: - url: http://localhost:8080/healthz + url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz register: result until: result.status == 200 retries: 20 diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 1cee58282..bf4979596 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -92,7 +92,7 @@ spec: httpGet: host: 127.0.0.1 path: /healthz - port: 8080 + port: {{ kube_apiserver_insecure_port }} initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: @@ -124,4 +124,4 @@ spec: - hostPath: path: /etc/ssl/certs/ca-bundle.crt name: rhel-ca-bundle -{% endif %} \ No newline at end of file +{% endif %} From 67eeccb31ff83cc6e56a14ae30343d4ac113c6ce Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Mon, 12 Jun 2017 13:20:15 +0200 Subject: [PATCH 04/26] Create ansible.md fixing a typo --- docs/ansible.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ansible.md b/docs/ansible.md index 4da6edb48..3481ffc3f 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -84,7 +84,7 @@ Layer | Comment inventory vars | Unused **inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things inventory host_vars | Unused -playbook group_vars | Unuses +playbook group_vars | Unused playbook host_vars | Unused **host facts** | Kargo overrides for internal roles' logic, like state flags play vars | Unused From a3c88a0de55dcf86caf7d4f05cb4a5240dd9d5db Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 16 Jun 2017 12:18:35 -0400 Subject: [PATCH 05/26] rename kargo mentions in top-level yml files --- cluster.yml | 24 ++++++++++++------------ reset.yml | 2 +- scale.yml | 4 ++-- upgrade-cluster.yml | 28 ++++++++++++++-------------- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/cluster.yml b/cluster.yml index 75296646a..b973d6c14 100644 --- a/cluster.yml +++ b/cluster.yml @@ -2,7 +2,7 @@ - hosts: localhost gather_facts: False roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - hosts: k8s-cluster:etcd:calico-rr @@ -13,7 +13,7 @@ # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. ansible_ssh_pipelining: false roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bootstrap-os, tags: bootstrap-os} - hosts: k8s-cluster:etcd:calico-rr @@ -25,7 +25,7 @@ - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kubernetes/preinstall, tags: preinstall } - { role: docker, tags: docker } @@ -36,38 +36,38 @@ - hosts: etcd:k8s-cluster:vault any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults, when: "cert_management == 'vault'" } + - { role: kubespray-defaults, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - hosts: etcd any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: etcd, tags: etcd, etcd_cluster_setup: true } - hosts: k8s-cluster any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: etcd, tags: etcd, etcd_cluster_setup: false } - hosts: etcd:k8s-cluster:vault any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: vault, tags: vault, when: "cert_management == 'vault'"} - hosts: k8s-cluster any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes/node, tags: node } - { role: network_plugin, tags: network } - hosts: kube-master any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes/master, tags: master } - { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/policy_controller, tags: policy-controller } @@ -75,18 +75,18 @@ - hosts: calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: network_plugin/calico/rr, tags: network } - hosts: k8s-cluster any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - hosts: kube-master[0] any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes-apps, tags: apps } diff --git a/reset.yml b/reset.yml index b6e15d828..859ca6264 100644 --- a/reset.yml +++ b/reset.yml @@ -14,5 +14,5 @@ when: reset_confirmation != "yes" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: reset, tags: reset } diff --git a/scale.yml b/scale.yml index 02e79aa37..49445cabc 100644 --- a/scale.yml +++ b/scale.yml @@ -7,7 +7,7 @@ vars: ansible_ssh_pipelining: false roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bootstrap-os, tags: bootstrap-os} ##We still have to gather facts about our masters and etcd nodes @@ -21,7 +21,7 @@ - hosts: kube-node any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kubernetes/preinstall, tags: preinstall } - { role: docker, tags: docker } diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 0b4613820..09f268ecf 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -2,7 +2,7 @@ - hosts: localhost gather_facts: False roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - hosts: k8s-cluster:etcd:calico-rr @@ -13,7 +13,7 @@ # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. ansible_ssh_pipelining: false roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bootstrap-os, tags: bootstrap-os} - hosts: k8s-cluster:etcd:calico-rr @@ -25,7 +25,7 @@ - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kubernetes/preinstall, tags: preinstall } - { role: docker, tags: docker } @@ -36,25 +36,25 @@ - hosts: etcd:k8s-cluster:vault any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults, when: "cert_management == 'vault'" } + - { role: kubespray-defaults, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - hosts: etcd any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: etcd, tags: etcd, etcd_cluster_setup: true } - hosts: k8s-cluster any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: etcd, tags: etcd, etcd_cluster_setup: false } - hosts: etcd:k8s-cluster:vault any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults, when: "cert_management == 'vault'"} + - { role: kubespray-defaults, when: "cert_management == 'vault'"} - { role: vault, tags: vault, when: "cert_management == 'vault'"} #Handle upgrades to master components first to maintain backwards compat. @@ -62,7 +62,7 @@ any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: 1 roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } @@ -73,35 +73,35 @@ any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: network_plugin, tags: network } - { role: upgrade/post-upgrade, tags: post-upgrade } - - { role: kargo-defaults} + - { role: kubespray-defaults} - hosts: kube-master any_errors_fatal: true roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/policy_controller, tags: policy-controller } - hosts: calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: network_plugin/calico/rr, tags: network } - hosts: k8s-cluster any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - hosts: kube-master[0] any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes-apps, tags: apps } From 8203383c037b4d8bdc9eeca0f736ecaa6be3cd4b Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 16 Jun 2017 13:25:46 -0400 Subject: [PATCH 06/26] rename almost all mentions of kargo --- .github/ISSUE_TEMPLATE.md | 2 +- README.md | 10 +++--- RELEASE.md | 18 +++++------ ...nventory.py => kubespray-aws-inventory.py} | 4 +-- contrib/azurerm/README.md | 8 ++--- contrib/network-storage/glusterfs/README.md | 14 ++++---- contrib/terraform/aws/README.md | 4 +-- .../terraform/aws/create-infrastructure.tf | 2 +- .../docs/{aws_kargo.png => aws_kubespray.png} | Bin docs/ansible.md | 10 +++--- docs/aws.md | 8 ++--- docs/calico.md | 6 ++-- docs/comparisons.md | 12 +++---- docs/dns-stack.md | 16 +++++----- docs/downloads.md | 2 +- ...-calico-rr.png => kubespray-calico-rr.png} | Bin docs/getting-started.md | 6 ++-- docs/ha-mode.md | 6 ++-- docs/netcheck.md | 4 +-- docs/roadmap.md | 30 +++++++++--------- docs/upgrades.md | 8 ++--- docs/vars.md | 16 +++++----- docs/vault.md | 2 +- extra_playbooks/upgrade-only-k8s.yml | 12 +++---- roles/dnsmasq/templates/dnsmasq-deploy.yml | 2 +- roles/kargo-defaults/tasks/main.yaml | 5 --- .../manifests/kube-apiserver.manifest.j2 | 2 +- .../preinstall/tasks/dhclient-hooks-undo.yml | 6 ++-- .../defaults/main.yaml | 0 roles/kubespray-defaults/tasks/main.yaml | 5 +++ 30 files changed, 110 insertions(+), 110 deletions(-) rename contrib/aws_inventory/{kargo-aws-inventory.py => kubespray-aws-inventory.py} (96%) rename contrib/terraform/aws/docs/{aws_kargo.png => aws_kubespray.png} (100%) rename docs/figures/{kargo-calico-rr.png => kubespray-calico-rr.png} (100%) delete mode 100644 roles/kargo-defaults/tasks/main.yaml rename roles/{kargo-defaults => kubespray-defaults}/defaults/main.yaml (100%) create mode 100644 roles/kubespray-defaults/tasks/main.yaml diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index b82daadde..0f81faeac 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -24,7 +24,7 @@ explain why. - **Version of Ansible** (`ansible --version`): -**Kargo version (commit) (`git rev-parse --short HEAD`):** +**Kubespray version (commit) (`git rev-parse --short HEAD`):** **Network plugin used**: diff --git a/README.md b/README.md index aa1360a77..50f6e3977 100644 --- a/README.md +++ b/README.md @@ -14,12 +14,12 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), To deploy the cluster you can use : [**kargo-cli**](https://github.com/kubespray/kargo-cli)
-**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
+**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
**vagrant** by simply running `vagrant up` (for tests purposes)
* [Requirements](#requirements) -* [Kargo vs ...](docs/comparisons.md) +* [Kubespray vs ...](docs/comparisons.md) * [Getting started](docs/getting-started.md) * [Ansible inventory and tags](docs/ansible.md) * [Deployment data variables](docs/vars.md) @@ -100,13 +100,13 @@ See also [Network checker](docs/netcheck.md). - [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/) - [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty - - [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) + - [Deploy a Kubernetes Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) -## Tools and projects on top of Kargo +## Tools and projects on top of Kubespray - [Digital Rebar](https://github.com/digitalrebar/digitalrebar) - [Kargo-cli](https://github.com/kubespray/kargo-cli) - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer) - - [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform) + - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform) ## CI Tests diff --git a/RELEASE.md b/RELEASE.md index 56f0b2e0f..0679667a1 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,16 +1,16 @@ # Release Process -The Kargo Project is released on an as-needed basis. The process is as follows: +The Kubespray Project is released on an as-needed basis. The process is as follows: 1. An issue is proposing a new release with a changelog since the last release 2. At least on of the [OWNERS](OWNERS) must LGTM this release 3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 4. The release issue is closed -5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released` +5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released` ## Major/minor releases, merge freezes and milestones -* Kargo does not maintain stable branches for releases. Releases are tags, not +* Kubespray does not maintain stable branches for releases. Releases are tags, not branches, and there are no backports. Therefore, there is no need for merge freezes as well. @@ -20,21 +20,21 @@ The Kargo Project is released on an as-needed basis. The process is as follows: support lifetime, which ends once the milestone closed. Then only a next major or minor release can be done. -* Kargo major and minor releases are bound to the given ``kube_version`` major/minor +* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor version numbers and other components' arbitrary versions, like etcd or network plugins. Older or newer versions are not supported and not tested for the given release. -* There is no unstable releases and no APIs, thus Kargo doesn't follow +* There is no unstable releases and no APIs, thus Kubespray doesn't follow [semver](http://semver.org/). Every version describes only a stable release. Breaking changes, if any introduced by changed defaults or non-contrib ansible roles' playbooks, shall be described in the release notes. Other breaking changes, if any in the contributed addons or bound versions of Kubernetes and other components, are - considered out of Kargo scope and are up to the components' teams to deal with and + considered out of Kubespray scope and are up to the components' teams to deal with and document. * Minor releases can change components' versions, but not the major ``kube_version``. - Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0 + Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0 is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``, - then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1 + then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1 and *any* changes to other components, like etcd v4, or calico 1.2.3. - And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively. + And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively. diff --git a/contrib/aws_inventory/kargo-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py similarity index 96% rename from contrib/aws_inventory/kargo-aws-inventory.py rename to contrib/aws_inventory/kubespray-aws-inventory.py index d379be349..65741bbda 100755 --- a/contrib/aws_inventory/kargo-aws-inventory.py +++ b/contrib/aws_inventory/kubespray-aws-inventory.py @@ -33,10 +33,10 @@ class SearchEC2Tags(object): hosts = {} hosts['_meta'] = { 'hostvars': {} } - ##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value. + ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. for group in ["kube-master", "kube-node", "etcd"]: hosts[group] = [] - tag_key = "kargo-role" + tag_key = "kubespray-role" tag_value = ["*"+group+"*"] region = os.environ['REGION'] diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md index d8cd28e7f..ac2548c85 100644 --- a/contrib/azurerm/README.md +++ b/contrib/azurerm/README.md @@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou ## Status This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified -Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course). +Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course). ## Requirements @@ -47,7 +47,7 @@ $ ./clear-rg.sh **WARNING** this really deletes everything from your resource group, including everything that was later created by you! -## Generating an inventory for kargo +## Generating an inventory for kubespray After you have applied the templates, you can generate an inventory with this call: @@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca $ ./generate-inventory.sh ``` -It will create the file ./inventory which can then be used with kargo, e.g.: +It will create the file ./inventory which can then be used with kubespray, e.g.: ```shell -$ cd kargo-root-dir +$ cd kubespray-root-dir $ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml ``` diff --git a/contrib/network-storage/glusterfs/README.md b/contrib/network-storage/glusterfs/README.md index d09c10c7f..d7aea26aa 100644 --- a/contrib/network-storage/glusterfs/README.md +++ b/contrib/network-storage/glusterfs/README.md @@ -1,4 +1,4 @@ -# Deploying a Kargo Kubernetes Cluster with GlusterFS +# Deploying a Kubespray Kubernetes Cluster with GlusterFS You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section. @@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. -Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu): +Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): ``` ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml @@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us ## Using Terraform and Ansible -First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: +First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: ``` cluster_name = "cluster1" @@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \ export TF_VAR_auth_url=${OS_AUTH_URL} ``` -Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: +Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: ``` -terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack +terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack ``` This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping). -Then, provision your Kubernetes (Kargo) cluster with the following ansible call: +Then, provision your Kubernetes (kubespray) cluster with the following ansible call: ``` ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml @@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co If you need to destroy the cluster, you can run: ``` -terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack +terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack ``` diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index de858b2a9..451fc58a7 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -33,7 +33,7 @@ export AWS_DEFAULT_REGION="zzz" - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` -- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag. +- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. **Troubleshooting** @@ -54,4 +54,4 @@ It could happen that Terraform doesnt create an Ansible Inventory file automatic Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. -![AWS Infrastructure with Terraform ](docs/aws_kargo.png) +![AWS Infrastructure with Terraform ](docs/aws_kubespray.png) diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index 781edea86..a58bca53c 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -157,7 +157,7 @@ resource "aws_instance" "k8s-worker" { /* -* Create Kargo Inventory File +* Create Kubespray Inventory File * */ data "template_file" "inventory" { diff --git a/contrib/terraform/aws/docs/aws_kargo.png b/contrib/terraform/aws/docs/aws_kubespray.png similarity index 100% rename from contrib/terraform/aws/docs/aws_kargo.png rename to contrib/terraform/aws/docs/aws_kubespray.png diff --git a/docs/ansible.md b/docs/ansible.md index 4da6edb48..a9e38bf53 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -75,25 +75,25 @@ According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variab those cannot be overriden from the group vars. In order to override, one should use the `-e ` runtime flags (most simple way) or other layers described in the docs. -Kargo uses only a few layers to override things (or expect them to +Kubespray uses only a few layers to override things (or expect them to be overriden for roles): Layer | Comment ------|-------- -**role defaults** | provides best UX to override things for Kargo deployments +**role defaults** | provides best UX to override things for Kubespray deployments inventory vars | Unused **inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things inventory host_vars | Unused playbook group_vars | Unuses playbook host_vars | Unused -**host facts** | Kargo overrides for internal roles' logic, like state flags +**host facts** | Kubespray overrides for internal roles' logic, like state flags play vars | Unused play vars_prompt | Unused play vars_files | Unused registered vars | Unused -set_facts | Kargo overrides those, for some places +set_facts | Kubespray overrides those, for some places **role and include vars** | Provides bad UX to override things! Use extra vars to enforce -block vars (only for tasks in block) | Kargo overrides for internal roles' logic +block vars (only for tasks in block) | Kubespray overrides for internal roles' logic task vars (only for the task) | Unused for roles, but only for helper scripts **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` diff --git a/docs/aws.md b/docs/aws.md index 91bded11c..8bdbc06fa 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -3,7 +3,7 @@ AWS To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. -Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. +Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. @@ -45,12 +45,12 @@ This will produce an inventory that is passed into Ansible that looks like the f Guide: - Create instances in AWS as needed. -- Either during or after creation, add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` -- Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory. +- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` +- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. - Set the following AWS credentials and info as environment variables in your terminal: ``` export AWS_ACCESS_KEY_ID="xxxxx" export AWS_SECRET_ACCESS_KEY="yyyyy" export REGION="us-east-2" ``` -- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` +- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` diff --git a/docs/calico.md b/docs/calico.md index eefbcb6e2..00ff748c1 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -96,7 +96,7 @@ You need to edit your inventory and add: * `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/)) -Here's an example of Kargo inventory with route reflectors: +Here's an example of Kubespray inventory with route reflectors: ``` [all] @@ -145,11 +145,11 @@ cluster_id="1.0.0.1" The inventory above will deploy the following topology assuming that calico's `global_as_num` is set to `65400`: -![Image](figures/kargo-calico-rr.png?raw=true) +![Image](figures/kubespray-calico-rr.png?raw=true) ##### Optional : Define default endpoint to host action -By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kargo) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped. +By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped. To re-define default action please set the following variable in your inventory: diff --git a/docs/comparisons.md b/docs/comparisons.md index 63cb60102..cf34e37d5 100644 --- a/docs/comparisons.md +++ b/docs/comparisons.md @@ -1,25 +1,25 @@ -Kargo vs [Kops](https://github.com/kubernetes/kops) +Kubespray vs [Kops](https://github.com/kubernetes/kops) --------------- -Kargo runs on bare metal and most clouds, using Ansible as its substrate for +Kubespray runs on bare metal and most clouds, using Ansible as its substrate for provisioning and orchestration. Kops performs the provisioning and orchestration itself, and as such is less flexible in deployment platforms. For people with familiarity with Ansible, existing Ansible deployments or the desire to run a -Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops, +Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops, however, is more tightly integrated with the unique features of the clouds it supports so it could be a better choice if you know that you will only be using one platform for the foreseeable future. -Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm) +Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm) ------------------ Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle management, including self-hosted layouts, dynamic discovery services and so on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html), -it would've likely been named a "Kubernetes cluster operator". Kargo however, +it would've likely been named a "Kubernetes cluster operator". Kubespray however, does generic configuration management tasks from the "OS operators" ansible world, plus some initial K8s clustering (with networking plugins included) and -control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553) +control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553) to adopt kubeadm as a tool in order to consume life cycle management domain knowledge from it and offload generic OS configuration things from it, which hopefully benefits both sides. diff --git a/docs/dns-stack.md b/docs/dns-stack.md index 67afb6b43..f4de31544 100644 --- a/docs/dns-stack.md +++ b/docs/dns-stack.md @@ -1,7 +1,7 @@ -K8s DNS stack by Kargo +K8s DNS stack by Kubespray ====================== -For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/) +For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/) [cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md) to serve as an authoritative DNS server for a given ``dns_domain`` and its ``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels). @@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``). -DNS modes supported by kargo +DNS modes supported by Kubespray ============================ -You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``. +You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``. ## dns_mode -``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available: +``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available: #### dnsmasq_kubedns (default) This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some @@ -67,7 +67,7 @@ This does not install any of dnsmasq and kubedns/skydns. This basically disables leaves you with a non functional cluster. ## resolvconf_mode -``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers. +``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers. There are three modes available: #### docker_dns (default) @@ -100,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a servers, which in turn will forward queries to the system nameserver if required. #### host_resolvconf -This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient +This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode). As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first @@ -120,7 +120,7 @@ cluster service names. Limitations ----------- -* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can +* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can not answer with authority to arbitrary recursive resolvers. This task is left for future. See [official SkyDns docs](https://github.com/skynetservices/skydns) for details. diff --git a/docs/downloads.md b/docs/downloads.md index 2c3f3085f..50d9ef107 100644 --- a/docs/downloads.md +++ b/docs/downloads.md @@ -1,7 +1,7 @@ Downloading binaries and containers =================================== -Kargo supports several download/upload modes. The default is: +Kubespray supports several download/upload modes. The default is: * Each node downloads binaries and container images on its own, which is ``download_run_once: False``. diff --git a/docs/figures/kargo-calico-rr.png b/docs/figures/kubespray-calico-rr.png similarity index 100% rename from docs/figures/kargo-calico-rr.png rename to docs/figures/kubespray-calico-rr.png diff --git a/docs/getting-started.md b/docs/getting-started.md index 6e323d9cd..167dcaea2 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -23,12 +23,12 @@ Building your own inventory Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is an example inventory located -[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example). +[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example). You can use an -[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) +[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) to create or modify an Ansible inventory. Currently, it is limited in -functionality and is only use for making a basic Kargo cluster, but it does +functionality and is only use for making a basic Kubespray cluster, but it does support creating large clusters. It now supports separated ETCD and Kubernetes master roles from node role if the size exceeds a certain threshold. Run inventory.py help for more information. diff --git a/docs/ha-mode.md b/docs/ha-mode.md index 20578f705..5036345b7 100644 --- a/docs/ha-mode.md +++ b/docs/ha-mode.md @@ -22,7 +22,7 @@ Kube-apiserver -------------- K8s components require a loadbalancer to access the apiservers via a reverse -proxy. Kargo includes support for an nginx-based proxy that resides on each +proxy. Kubespray includes support for an nginx-based proxy that resides on each non-master Kubernetes node. This is referred to as localhost loadbalancing. It is less efficient than a dedicated load balancer because it creates extra health checks on the Kubernetes apiserver, but is more practical for scenarios @@ -30,12 +30,12 @@ where an external LB or virtual IP management is inconvenient. This option is configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`). You may also define the port the local internal loadbalancer users by changing, `nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`. -It is also import to note that Kargo will only configure kubelet and kube-proxy +It is also import to note that Kubespray will only configure kubelet and kube-proxy on non-master nodes to use the local internal loadbalancer. If you choose to NOT use the local internal loadbalancer, you will need to configure your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to -a user and is not covered by ansible roles in Kargo. By default, it only configures +a user and is not covered by ansible roles in Kubespray. By default, it only configures a non-HA endpoint, which points to the `access_ip` or IP address of the first server node in the `kube-master` group. It can also configure clients to use endpoints for a given loadbalancer type. The following diagram shows how traffic to the diff --git a/docs/netcheck.md b/docs/netcheck.md index bee04cbb3..80679cd73 100644 --- a/docs/netcheck.md +++ b/docs/netcheck.md @@ -1,7 +1,7 @@ Network Checker Application =========================== -With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a +With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker images. It consists of the server and agents trying to reach the server by usual for Kubernetes applications network connectivity meanings. Therefore, this @@ -17,7 +17,7 @@ any of the cluster nodes: ``` curl http://localhost:31081/api/v1/connectivity_check ``` -Note that Kargo does not invoke the check but only deploys the application, if +Note that Kubespray does not invoke the check but only deploys the application, if requested. There are related application specifc variables: diff --git a/docs/roadmap.md b/docs/roadmap.md index c0a3a7527..fb038ae1f 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,23 +1,23 @@ -Kargo's roadmap +Kubespray's roadmap ================= ### Kubeadm - Propose kubeadm as an option in order to setup the kubernetes cluster. -That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553) +That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553) -### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320) +### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320) - the playbook would install and configure docker/rkt and the etcd cluster - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. - a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm) - to be discussed, a way to provide the inventory -- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321) +- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321) ### Provisionning and cloud providers - [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure** - [ ] On AWS autoscaling, multi AZ -- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297) -- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280) -- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234) +- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297) +- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280) +- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234) (related issues: https://github.com/kubernetes/kubernetes/pull/20439
https://github.com/kubernetes/kubernetes/issues/18112) @@ -37,14 +37,14 @@ That would probably improve deployment speed and certs management [#553](https:/ - [ ] test scale up cluster: +1 etcd, +1 master, +1 node ### Lifecycle -- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553) -- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154) +- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553) +- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154) - [ ] Drain worker node when shutting down/deleting an instance - [ ] Upgrade granularity: select components to upgrade and skip others ### Networking -- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160) -- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159) +- [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160) +- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159) - [ ] Opencontrail - [x] Canal - [x] Cloud Provider native networking (instead of our network plugins) @@ -60,7 +60,7 @@ While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kub - switch to Terraform instead of Ansible for provisionning - update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context -### Kargo API +### Kubespray API - Perform all actions through an **API** - Store inventories / configurations of mulltiple clusters - make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory @@ -87,8 +87,8 @@ Include optionals deployments to init the cluster: ### Others - remove nodes (adding is already supported) - being able to choose any k8s version (almost done) -- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59) +- **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59) - Review documentation (split in categories) - **consul** -> if officialy supported by k8s -- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312) -- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329) +- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312) +- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329) diff --git a/docs/upgrades.md b/docs/upgrades.md index cb431d4c0..9a21cbdc4 100644 --- a/docs/upgrades.md +++ b/docs/upgrades.md @@ -1,11 +1,11 @@ -Upgrading Kubernetes in Kargo +Upgrading Kubernetes in Kubespray ============================= #### Description -Kargo handles upgrades the same way it handles initial deployment. That is to +Kubespray handles upgrades the same way it handles initial deployment. That is to say that each component is laid down in a fixed order. You should be able to -upgrade from Kargo tag 2.0 up to the current master without difficulty. You can +upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can also individually control versions of components by explicitly defining their versions. Here are all version vars for each component: @@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6 #### Graceful upgrade -Kargo also supports cordon, drain and uncordoning of nodes when performing +Kubespray also supports cordon, drain and uncordoning of nodes when performing a cluster upgrade. There is a separate playbook used for this purpose. It is important to note that upgrade-cluster.yml can only be used for upgrading an existing cluster. That means there must be at least 1 kube-master already diff --git a/docs/vars.md b/docs/vars.md index 603a614b2..4b9da186e 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -1,4 +1,4 @@ -Configurable Parameters in Kargo +Configurable Parameters in Kubespray ================================ #### Generic Ansible variables @@ -12,7 +12,7 @@ Some variables of note include: * *ansible_default_ipv4.address*: IP address Ansible automatically chooses. Generated based on the output from the command ``ip -4 route get 8.8.8.8`` -#### Common vars that are used in Kargo +#### Common vars that are used in Kubespray * *calico_version* - Specify version of Calico to use * *calico_cni_version* - Specify version of Calico CNI plugin to use @@ -35,16 +35,16 @@ Some variables of note include: * *access_ip* - IP for other hosts to use to connect to. Often required when deploying from a cloud, such as OpenStack or GCE and you have separate public/floating and private IPs. -* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip +* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip and access_ip are undefined * *loadbalancer_apiserver* - If defined, all hosts will connect to this address instead of localhost for kube-masters and kube-master[0] for kube-nodes. See more details in the - [HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md). + [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md). * *loadbalancer_apiserver_localhost* - makes all hosts to connect to the apiserver internally load balanced endpoint. Mutual exclusive to the `loadbalancer_apiserver`. See more details in the - [HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md). + [HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md). #### Cluster variables @@ -79,13 +79,13 @@ other settings from your existing /etc/resolv.conf are lost. Set the following variables to match your requirements. * *upstream_dns_servers* - Array of upstream DNS servers configured on host in - addition to Kargo deployed DNS + addition to Kubespray deployed DNS * *nameservers* - Array of DNS servers configured for use in dnsmasq * *searchdomains* - Array of up to 4 search domains * *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS) For more information, see [DNS -Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md). +Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md). #### Other service variables @@ -114,5 +114,5 @@ The possible vars are: #### User accounts -Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their +Kubespray sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their passwords default to changeme. You can set this by changing ``kube_api_pwd``. diff --git a/docs/vault.md b/docs/vault.md index 446d914c9..3850d04b5 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -39,7 +39,7 @@ vault group. It is *highly* recommended that these secrets are removed from the servers after your cluster has been deployed, and kept in a safe location of your choosing. Naturally, the seriousness of the situation depends on what you're doing with -your Kargo cluster, but with these secrets, an attacker will have the ability +your Kubespray cluster, but with these secrets, an attacker will have the ability to authenticate to almost everything in Kubernetes and decode all private (HTTPS) traffic on your network signed by Vault certificates. diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml index f10259b07..c2af6a301 100644 --- a/extra_playbooks/upgrade-only-k8s.yml +++ b/extra_playbooks/upgrade-only-k8s.yml @@ -11,7 +11,7 @@ - hosts: localhost gather_facts: False roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - hosts: k8s-cluster:etcd:calico-rr @@ -22,7 +22,7 @@ # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. ansible_ssh_pipelining: false roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: bootstrap-os, tags: bootstrap-os} - hosts: k8s-cluster:etcd:calico-rr @@ -34,7 +34,7 @@ - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: kubernetes/preinstall, tags: preinstall } #Handle upgrades to master components first to maintain backwards compat. @@ -42,7 +42,7 @@ any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: 1 roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } @@ -53,8 +53,8 @@ any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" roles: - - { role: kargo-defaults} + - { role: kubespray-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: upgrade/post-upgrade, tags: post-upgrade } - - { role: kargo-defaults} + - { role: kubespray-defaults} diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index ed74c3e06..e811e1995 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -19,7 +19,7 @@ spec: labels: k8s-app: dnsmasq kubernetes.io/cluster-service: "true" - kargo/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" + kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" spec: containers: - name: dnsmasq diff --git a/roles/kargo-defaults/tasks/main.yaml b/roles/kargo-defaults/tasks/main.yaml deleted file mode 100644 index 91d0bc463..000000000 --- a/roles/kargo-defaults/tasks/main.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- name: Configure defaults - debug: - msg: "Check roles/kargo-defaults/defaults/main.yml" - tags: - - always diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 851cca060..1cee58282 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -5,7 +5,7 @@ metadata: namespace: {{system_namespace}} labels: k8s-app: kube-apiserver - kargo: v2 + kubespray: v2 spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml index 10e5bba68..91fb9c694 100644 --- a/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml +++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks-undo.yml @@ -1,9 +1,9 @@ --- -# These tasks will undo changes done by kargo in the past if needed (e.g. when upgrading from kargo 2.0.x +# These tasks will undo changes done by kubespray in the past if needed (e.g. when upgrading from kubespray 2.0.x # or when changing resolvconf_mode) -- name: Remove kargo specific config from dhclient config +- name: Remove kubespray specific config from dhclient config blockinfile: dest: "{{dhclientconffile}}" state: absent @@ -13,7 +13,7 @@ when: dhclientconffile is defined notify: Preinstall | restart network -- name: Remove kargo specific dhclient hook +- name: Remove kubespray specific dhclient hook file: path: "{{ dhclienthookfile }}" state: absent diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml similarity index 100% rename from roles/kargo-defaults/defaults/main.yaml rename to roles/kubespray-defaults/defaults/main.yaml diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml new file mode 100644 index 000000000..5b2cb96a0 --- /dev/null +++ b/roles/kubespray-defaults/tasks/main.yaml @@ -0,0 +1,5 @@ +- name: Configure defaults + debug: + msg: "Check roles/kubespray-defaults/defaults/main.yml" + tags: + - always From d85f98d2a984828f5c983b35802bc66335dc7b0e Mon Sep 17 00:00:00 2001 From: vgkowski Date: Wed, 21 Jun 2017 11:00:11 +0200 Subject: [PATCH 07/26] change documentation from "self hosted" to "static pod" for the control plane --- docs/ansible.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/ansible.md b/docs/ansible.md index 4da6edb48..a6dd759f6 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -124,12 +124,12 @@ The following tags are defined in playbooks: | k8s-pre-upgrade | Upgrading K8s cluster | k8s-secrets | Configuring K8s certs/keys | kpm | Installing K8s apps definitions with KPM -| kube-apiserver | Configuring self-hosted kube-apiserver -| kube-controller-manager | Configuring self-hosted kube-controller-manager +| kube-apiserver | Configuring static pod kube-apiserver +| kube-controller-manager | Configuring static pod kube-controller-manager | kubectl | Installing kubectl and bash completion | kubelet | Configuring kubelet service -| kube-proxy | Configuring self-hosted kube-proxy -| kube-scheduler | Configuring self-hosted kube-scheduler +| kube-proxy | Configuring static pod kube-proxy +| kube-scheduler | Configuring static pod kube-scheduler | localhost | Special steps for the localhost (ansible runner) | master | Configuring K8s master node role | netchecker | Installing netchecker K8s app From 83265b7f751f9b1012b13a9dc1f6e9977fbccc3f Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 23 Jun 2017 12:35:10 -0400 Subject: [PATCH 08/26] renaming kargo-cli to kubespray-cli --- README.md | 14 +++++++------- contrib/inventory_builder/inventory.py | 4 ++-- contrib/inventory_builder/setup.cfg | 2 +- contrib/inventory_builder/tests/test_inventory.py | 2 +- contrib/kvm-setup/README.md | 8 ++++---- contrib/kvm-setup/group_vars/all | 2 +- contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml | 4 ++-- docs/cloud.md | 8 ++++---- docs/coreos.md | 4 ++-- docs/getting-started.md | 8 ++++---- docs/roadmap.md | 8 ++++---- 11 files changed, 32 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 50f6e3977..ab03ca853 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Deploy a production ready kubernetes cluster -If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**. +If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**. - Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** - **High available** cluster @@ -13,7 +13,7 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), To deploy the cluster you can use : -[**kargo-cli**](https://github.com/kubespray/kargo-cli)
+[**kubespray-cli**](https://github.com/kubespray/kubespray-cli)
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
**vagrant** by simply running `vagrant up` (for tests purposes)
@@ -97,14 +97,14 @@ option to leverage built-in cloud provider networking instead. See also [Network checker](docs/netcheck.md). ## Community docs and resources - - [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/) - - [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr + - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/) + - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty - - [Deploy a Kubernetes Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) + - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) ## Tools and projects on top of Kubespray - [Digital Rebar](https://github.com/digitalrebar/digitalrebar) - - [Kargo-cli](https://github.com/kubespray/kargo-cli) + - [Kubespray-cli](https://github.com/kubespray/kubespray-cli) - [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer) - [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform) @@ -112,7 +112,7 @@ See also [Network checker](docs/netcheck.md). ![Gitlab Logo](https://s27.postimg.org/wmtaig1wz/gitlabci.png) -[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines)
+[![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack). See the [test matrix](docs/test_cases.md) for details. diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py index 7e0a89f09..04c71aecc 100644 --- a/contrib/inventory_builder/inventory.py +++ b/contrib/inventory_builder/inventory.py @@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node") # Configurable as shell vars end -class KargoInventory(object): +class KubesprayInventory(object): def __init__(self, changed_hosts=None, config_file=None): self.config = configparser.ConfigParser(allow_no_value=True, @@ -337,7 +337,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200 def main(argv=None): if not argv: argv = sys.argv[1:] - KargoInventory(argv, CONFIG_FILE) + KubesprayInventory(argv, CONFIG_FILE) if __name__ == "__main__": sys.exit(main()) diff --git a/contrib/inventory_builder/setup.cfg b/contrib/inventory_builder/setup.cfg index a09927305..a775367e2 100644 --- a/contrib/inventory_builder/setup.cfg +++ b/contrib/inventory_builder/setup.cfg @@ -1,3 +1,3 @@ [metadata] -name = kargo-inventory-builder +name = kubespray-inventory-builder version = 0.1 diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py index ad393079d..43f6b2bb6 100644 --- a/contrib/inventory_builder/tests/test_inventory.py +++ b/contrib/inventory_builder/tests/test_inventory.py @@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase): sys_mock.exit = mock.Mock() super(TestInventory, self).setUp() self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4'] - self.inv = inventory.KargoInventory() + self.inv = inventory.KubesprayInventory() def test_get_ip_from_opts(self): optstring = "ansible_host=10.90.3.2 ip=10.90.3.2" diff --git a/contrib/kvm-setup/README.md b/contrib/kvm-setup/README.md index 61e626590..b77299a78 100644 --- a/contrib/kvm-setup/README.md +++ b/contrib/kvm-setup/README.md @@ -1,11 +1,11 @@ -# Kargo on KVM Virtual Machines hypervisor preparation +# Kubespray on KVM Virtual Machines hypervisor preparation -A simple playbook to ensure your system has the right settings to enable Kargo +A simple playbook to ensure your system has the right settings to enable Kubespray deployment on VMs. -This playbook does not create Virtual Machines, nor does it run Kargo itself. +This playbook does not create Virtual Machines, nor does it run Kubespray itself. ### User creation -If you want to create a user for running Kargo deployment, you should specify +If you want to create a user for running Kubespray deployment, you should specify both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. diff --git a/contrib/kvm-setup/group_vars/all b/contrib/kvm-setup/group_vars/all index d08c2c3d3..6edfd8fd1 100644 --- a/contrib/kvm-setup/group_vars/all +++ b/contrib/kvm-setup/group_vars/all @@ -1,3 +1,3 @@ -#k8s_deployment_user: kargo +#k8s_deployment_user: kubespray #k8s_deployment_user_pkey_path: /tmp/ssh_rsa diff --git a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml index 11f464bdf..5417708ac 100644 --- a/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml +++ b/contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml @@ -12,9 +12,9 @@ line: 'br_netfilter' when: br_netfilter is defined and ansible_os_family == 'Debian' -- name: Add br_netfilter into /etc/modules-load.d/kargo.conf +- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf copy: - dest: /etc/modules-load.d/kargo.conf + dest: /etc/modules-load.d/kubespray.conf content: |- ### This file is managed by Ansible br-netfilter diff --git a/docs/cloud.md b/docs/cloud.md index f0db21c70..7d966bafa 100644 --- a/docs/cloud.md +++ b/docs/cloud.md @@ -3,17 +3,17 @@ Cloud providers #### Provisioning -You can use kargo-cli to start new instances on cloud providers +You can use kubespray-cli to start new instances on cloud providers here's an example ``` -kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana +kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana ``` #### Deploy kubernetes -With kargo-cli +With kubespray-cli ``` -kargo deploy [--aws|--gce] -u admin +kubespray deploy [--aws|--gce] -u admin ``` Or ansible-playbook command diff --git a/docs/coreos.md b/docs/coreos.md index 546ad0e89..e8db71b2c 100644 --- a/docs/coreos.md +++ b/docs/coreos.md @@ -1,10 +1,10 @@ CoreOS bootstrap =============== -Example with **kargo-cli**: +Example with **kubespray-cli**: ``` -kargo deploy --gce --coreos +kubespray deploy --gce --coreos ``` Or with Ansible: diff --git a/docs/getting-started.md b/docs/getting-started.md index 167dcaea2..25bcbfaad 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,21 +1,21 @@ Getting started =============== -The easiest way to run the deployement is to use the **kargo-cli** tool. -A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli). +The easiest way to run the deployement is to use the **kubespray-cli** tool. +A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli). Here is a simple example on AWS: * Create instances and generate the inventory ``` -kargo aws --instances 3 +kubespray aws --instances 3 ``` * Run the deployment ``` -kargo deploy --aws -u centos -n calico +kubespray deploy --aws -u centos -n calico ``` Building your own inventory diff --git a/docs/roadmap.md b/docs/roadmap.md index fb038ae1f..9b23ffc1c 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -8,7 +8,7 @@ That would probably improve deployment speed and certs management [#553](https:/ ### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320) - the playbook would install and configure docker/rkt and the etcd cluster - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. -- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm) +- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm) - to be discussed, a way to provide the inventory - **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321) @@ -53,10 +53,10 @@ That would probably improve deployment speed and certs management [#553](https:/ - (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed. -### Kargo-cli +### Kubespray-cli - Delete instances -- `kargo vagrant` to setup a test cluster locally -- `kargo azure` for Microsoft Azure support +- `kubespray vagrant` to setup a test cluster locally +- `kubespray azure` for Microsoft Azure support - switch to Terraform instead of Ansible for provisionning - update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context From 1a8e92c922c8c7925935d71fcf251191d5e90b0b Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Thu, 22 Jun 2017 01:56:31 +0200 Subject: [PATCH 09/26] Fixing cordoning condition that cause fail for upgrading the cluster --- roles/upgrade/pre-upgrade/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index a2b34927f..decc9d05b 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -7,11 +7,11 @@ - set_fact: needs_cordoning: >- - {% if " Ready" in kubectl_nodes.stdout %} + {% if " Ready" in kubectl_nodes.stdout -%} true - {% else %} + {%- else -%} false - {% endif %} + {%- endif %} - name: Cordon node command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" From ec2255764a0879bb0b1d86fc4336a27d8b0ee8c3 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 26 Jun 2017 17:29:12 +0800 Subject: [PATCH 10/26] docker_dns_servers_strict to control docker_dns_servers rtrim --- inventory/group_vars/all.yml | 3 +++ roles/docker/defaults/main.yml | 2 ++ roles/docker/tasks/set_facts_dns.yml | 9 +++++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 6388ca7af..a30055367 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -83,6 +83,9 @@ bin_dir: /usr/local/bin ## Please note that overlay2 is only supported on newer kernels #docker_storage_options: -s overlay2 +# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. +#docker_dns_servers_strict: false + ## Default packages to install within the cluster, f.e: #kpm_packages: # - name: kube-system/grafana diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index c771953ff..e262d908a 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -8,3 +8,5 @@ docker_repo_key_info: docker_repo_info: repos: + +docker_dns_servers_strict: yes diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index f17c1bde2..64a09bff2 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -52,8 +52,13 @@ - name: check number of nameservers fail: - msg: "Too many nameservers" - when: docker_dns_servers|length > 3 + msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3." + when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool + +- name: rtrim number of nameservers to 3 + set_fact: + docker_dns_servers: "{{ docker_dns_servers[0:3] }}" + when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool - name: check number of search domains fail: From 4ba237c5d80994cab0851a53333c3ad72d09cf6c Mon Sep 17 00:00:00 2001 From: gdmelloatpoints Date: Mon, 26 Jun 2017 09:42:30 -0400 Subject: [PATCH 11/26] Make etcd_backup_prefix configurable. Ensures that backups can be stored on a different location other than ${HOST}/var/backups, say an EBS volume on AWS. --- inventory/group_vars/all.yml | 3 +++ roles/etcd/handlers/backup.yml | 5 ----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 6388ca7af..997386728 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -4,6 +4,9 @@ bootstrap_os: none #Directory where etcd data stored etcd_data_dir: /var/lib/etcd +#Directory where etcd backups are stored on the host +etcd_backup_prefix: /var/backups + # Directory where the binaries will be installed bin_dir: /usr/local/bin diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 9a611296b..68fe71f07 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -3,7 +3,6 @@ command: /bin/true notify: - Refresh Time Fact - - Set etcd Backup Directory Prefix - Set Backup Directory - Create Backup Directory - Backup etcd v2 data @@ -13,10 +12,6 @@ - name: Refresh Time Fact setup: filter=ansible_date_time -- name: Set etcd Backup Directory Prefix - set_fact: - etcd_backup_prefix: '/var/backups' - - name: Set Backup Directory set_fact: etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}" From b2a409fd4d651cb95117e332b1a7539f26c292cb Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Mon, 26 Jun 2017 14:24:52 -0400 Subject: [PATCH 12/26] turn off coreos updates --- .gitlab-ci.yml | 17 ++++++++++++++++- tests/cloud_playbooks/create-gce.yml | 4 ++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1cd419951..b64dd2a4e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -90,6 +90,7 @@ before_script: - pwd - ls - echo ${PWD} + - echo "${STARTUP_SCRIPT}" - > ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local ${LOG_LEVEL} @@ -103,6 +104,7 @@ before_script: -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -e mode=${CLUSTER_MODE} -e test_id=${TEST_ID} + -e startup_script="'${STARTUP_SCRIPT}'" # Check out latest tag if testing upgrade # Uncomment when gitlab kargo repo has tags @@ -261,6 +263,8 @@ before_script: CLUSTER_MODE: separate BOOTSTRAP_OS: coreos RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 + ##User-data to simply turn off coreos upgrades + STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' .ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables # stage: deploy-gce-part1 @@ -271,6 +275,7 @@ before_script: UPGRADE_TEST: "basic" CLUSTER_MODE: ha UPGRADE_TEST: "graceful" + STARTUP_SCRIPT: "" .rhel7_weave_variables: &rhel7_weave_variables # stage: deploy-gce-part1 @@ -278,6 +283,7 @@ before_script: CLOUD_IMAGE: rhel-7 CLOUD_REGION: europe-west1-b CLUSTER_MODE: default + STARTUP_SCRIPT: "" .centos7_flannel_variables: ¢os7_flannel_variables # stage: deploy-gce-part2 @@ -285,13 +291,15 @@ before_script: CLOUD_IMAGE: centos-7 CLOUD_REGION: us-west1-a CLUSTER_MODE: default - + STARTUP_SCRIPT: "" + .debian8_calico_variables: &debian8_calico_variables # stage: deploy-gce-part2 KUBE_NETWORK_PLUGIN: calico CLOUD_IMAGE: debian-8-kubespray CLOUD_REGION: us-central1-b CLUSTER_MODE: default + STARTUP_SCRIPT: "" .coreos_canal_variables: &coreos_canal_variables # stage: deploy-gce-part2 @@ -302,6 +310,7 @@ before_script: BOOTSTRAP_OS: coreos IDEMPOT_CHECK: "true" RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 + STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' .rhel7_canal_sep_variables: &rhel7_canal_sep_variables # stage: deploy-gce-special @@ -309,6 +318,7 @@ before_script: CLOUD_IMAGE: rhel-7 CLOUD_REGION: us-east1-b CLUSTER_MODE: separate + STARTUP_SCRIPT: "" .ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables # stage: deploy-gce-special @@ -317,6 +327,7 @@ before_script: CLOUD_REGION: us-central1-b CLUSTER_MODE: separate IDEMPOT_CHECK: "false" + STARTUP_SCRIPT: "" .centos7_calico_ha_variables: ¢os7_calico_ha_variables # stage: deploy-gce-special @@ -327,6 +338,7 @@ before_script: CLOUD_REGION: europe-west1-b CLUSTER_MODE: ha-scale IDEMPOT_CHECK: "true" + STARTUP_SCRIPT: "" .coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables # stage: deploy-gce-special @@ -336,6 +348,7 @@ before_script: CLUSTER_MODE: ha-scale BOOTSTRAP_OS: coreos RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12 + STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' .ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables # stage: deploy-gce-part1 @@ -345,6 +358,7 @@ before_script: CLUSTER_MODE: separate ETCD_DEPLOYMENT: rkt KUBELET_DEPLOYMENT: rkt + STARTUP_SCRIPT: "" .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables # stage: deploy-gce-part1 @@ -353,6 +367,7 @@ before_script: CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_REGION: us-central1-b CLUSTER_MODE: separate + STARTUP_SCRIPT: "" # Builds for PRs only (premoderated by unit-tests step) and triggers (auto) coreos-calico-sep: diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index d1e7c011e..1a82c50d7 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -30,7 +30,7 @@ credentials_file: "{{gce_credentials_file | default(omit)}}" project_id: "{{ gce_project_id }}" zone: "{{cloud_region}}" - metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}"}' + metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script}}"}' tags: "build-{{test_name}},{{kube_network_plugin}}" register: gce @@ -52,5 +52,5 @@ when: mode in ['scale', 'separate-scale', 'ha-scale'] - name: Wait for SSH to come up - wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started + wait_for: host={{item.public_ip}} port=22 delay=30 timeout=180 state=started with_items: "{{gce.instance_data}}" From d5516a4ca9c42cc6cfb4c1aef692e0ef323abfcc Mon Sep 17 00:00:00 2001 From: Seungkyu Ahn Date: Thu, 22 Jun 2017 17:05:37 +0900 Subject: [PATCH 13/26] Make kubedns up to date Update kube-dns version to 1.14.2 https://github.com/kubernetes/kubernetes/pull/45684 --- .../kubernetes-apps/ansible/defaults/main.yml | 20 ++- roles/kubernetes-apps/ansible/tasks/main.yml | 4 +- .../ansible/templates/kubedns-autoscaler.yml | 2 +- .../ansible/templates/kubedns-deploy.yml | 132 +++++++++++------- .../ansible/templates/kubedns-svc.yml | 10 +- 5 files changed, 103 insertions(+), 65 deletions(-) diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 89bdd4277..2255ba416 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,23 +1,20 @@ # Versions -kubedns_version: 1.9 -kubednsmasq_version: 1.3 -exechealthz_version: 1.1 +kubedns_version : 1.14.2 # Limits for dnsmasq/kubedns apps -dns_cpu_limit: 100m dns_memory_limit: 170Mi -dns_cpu_requests: 70m -dns_memory_requests: 50Mi +dns_cpu_requests: 100m +dns_memory_requests: 70Mi kubedns_min_replicas: 1 kubedns_nodes_per_replica: 10 # Images -kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" +kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64" kubedns_image_tag: "{{ kubedns_version }}" -kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64" -kubednsmasq_image_tag: "{{ kubednsmasq_version }}" -exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64" -exechealthz_image_tag: "{{ exechealthz_version }}" +dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64" +dnsmasq_nanny_image_tag: "{{ kubedns_version }}" +dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64" +dnsmasq_sidecar_image_tag: "{{ kubedns_version }}" # Netchecker deploy_netchecker: false @@ -40,3 +37,4 @@ netchecker_server_memory_requests: 64M # SSL etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" + diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index ed0d11f28..9ec3b7ddc 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,8 +13,8 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: - - {name: kubedns, file: kubedns-deploy.yml, type: deployment} - - {name: kubedns, file: kubedns-svc.yml, type: svc} + - {name: kube-dns, file: kubedns-deploy.yml, type: deployment} + - {name: kube-dns, file: kubedns-svc.yml, type: svc} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index c0f519e2c..6ef0f2f7a 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -42,7 +42,7 @@ spec: - --namespace=kube-system - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - - --target=Deployment/kubedns + - --target=Deployment/kube-dns - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --logtostderr=true - --v=2 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index a2150cc70..3f07aa905 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -1,25 +1,39 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: kubedns - namespace: {{ system_namespace }} + name: kube-dns + namespace: "{{system_namespace}}" labels: - k8s-app: kubedns - version: v19 + k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: - replicas: {{ kubedns_min_replicas }} + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 selector: matchLabels: - k8s-app: kubedns - version: v19 + k8s-app: kube-dns template: metadata: labels: - k8s-app: kubedns - version: v19 - kubernetes.io/cluster-service: "true" + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true containers: - name: kubedns image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}" @@ -30,15 +44,14 @@ spec: # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: - cpu: {{ dns_cpu_limit }} memory: {{ dns_memory_limit }} requests: cpu: {{ dns_cpu_requests }} memory: {{ dns_memory_requests }} livenessProbe: httpGet: - path: /healthz - port: 8080 + path: /healthcheck/kubedns + port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 @@ -51,13 +64,16 @@ spec: scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. - initialDelaySeconds: 30 + initialDelaySeconds: 3 timeoutSeconds: 5 args: - # command = "/kube-dns" - --domain={{ dns_domain }}. - --dns-port=10053 + - --config-dir=/kube-dns-config - --v={{ kube_log_level }} + env: + - name: PROMETHEUS_PORT + value: "10055" ports: - containerPort: 10053 name: dns-local @@ -65,25 +81,36 @@ spec: - containerPort: 10053 name: dns-tcp-local protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config - name: dnsmasq - image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}" + image: "{{ dnsmasq_nanny_image_repo }}:{{ dnsmasq_nanny_image_tag }}" imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ dns_cpu_limit }} - memory: {{ dns_memory_limit }} - requests: - cpu: {{ dns_cpu_requests }} - memory: {{ dns_memory_requests }} + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 args: - - --log-facility=- + - -v={{ kube_log_level }} + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k - --cache-size=1000 - - --no-resolv - - --server=127.0.0.1#10053 -{% if kube_log_level == '4' %} - - --log-queries -{% endif %} - - --local=/{{ bogus_domains }} + - --log-facility=- + - --server=/{{ dns_domain }}/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns @@ -91,26 +118,37 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP - - name: healthz - image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}" - imagePullPolicy: {{ k8s_image_pull_policy }} + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 10m - memory: 50Mi requests: - cpu: 10m - # Note that this container shouldn't really need 50Mi of memory. The - # limits are set higher than expected pending investigation on #29688. - # The extra memory was stolen from the kubedns container to keep the - # net memory requested by the pod constant. - memory: 50Mi + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: "{{ dnsmasq_sidecar_image_repo }}:{{ dnsmasq_sidecar_image_tag }}" + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 args: - - -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null - - -port=8080 - - -quiet + - --v={{ kube_log_level }} + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ dns_domain }},5,A + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ dns_domain }},5,A ports: - - containerPort: 8080 + - containerPort: 10054 + name: metrics protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m dnsPolicy: Default # Don't use cluster DNS. + diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml index ce8779326..0565a01e8 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml @@ -1,15 +1,16 @@ apiVersion: v1 kind: Service metadata: - name: kubedns + name: kube-dns namespace: {{ system_namespace }} labels: - k8s-app: kubedns + k8s-app: kube-dns kubernetes.io/cluster-service: "true" - kubernetes.io/name: "kubedns" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" spec: selector: - k8s-app: kubedns + k8s-app: kube-dns clusterIP: {{ skydns_server }} ports: - name: dns @@ -18,3 +19,4 @@ spec: - name: dns-tcp port: 53 protocol: TCP + From 17d54cffbba9688df68bf10b0756e4d8832d5e04 Mon Sep 17 00:00:00 2001 From: vincent gromakowski Date: Tue, 27 Jun 2017 10:08:57 +0200 Subject: [PATCH 14/26] add six package to bootstrap role --- roles/bootstrap-os/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/bootstrap-os/defaults/main.yml b/roles/bootstrap-os/defaults/main.yml index cf40f692d..1bc47cf1e 100644 --- a/roles/bootstrap-os/defaults/main.yml +++ b/roles/bootstrap-os/defaults/main.yml @@ -2,3 +2,4 @@ pypy_version: 2.4.0 pip_python_modules: - httplib2 + - six \ No newline at end of file From 3123502f4c5aed29938252c62f61fa10a032eba3 Mon Sep 17 00:00:00 2001 From: gdmelloatpoints Date: Tue, 27 Jun 2017 09:12:34 -0400 Subject: [PATCH 15/26] move `etcd_backup_prefix` to new home. --- inventory/group_vars/all.yml | 3 --- roles/etcd/defaults/main.yml | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 997386728..6388ca7af 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -4,9 +4,6 @@ bootstrap_os: none #Directory where etcd data stored etcd_data_dir: /var/lib/etcd -#Directory where etcd backups are stored on the host -etcd_backup_prefix: /var/backups - # Directory where the binaries will be installed bin_dir: /usr/local/bin diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 6326741b3..8fd72f3db 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -2,6 +2,7 @@ # Set to false to only do certificate management etcd_cluster_setup: true +etcd_backup_prefix: "/var/backups" etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/" etcd_data_dir: "/var/lib/etcd" From a69de8be4066d100b02310e5483463b298e8f31c Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Tue, 27 Jun 2017 16:42:18 +0200 Subject: [PATCH 16/26] changing username from "ubuntu" to the correct one "vagrant" for ubuntu --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index f0fd92ce4..ab8073280 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,7 +13,7 @@ SUPPORTED_OS = { "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]}, "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]}, "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, - "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"}, + "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"}, } # Defaults for config options defined in CONFIG From 8467bce2a6d0f4f842209eb2b40a7e457c2a27f6 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Thu, 15 Jun 2017 19:37:55 -0500 Subject: [PATCH 17/26] Fix inconsistent kubedns version and parameterize kubedns autoscaler image vars --- roles/download/defaults/main.yml | 42 +++++++++++-------- .../kubernetes-apps/ansible/defaults/main.yml | 3 ++ .../ansible/templates/kubedns-autoscaler.yml | 2 +- roles/kubernetes/node/meta/main.yml | 11 +++-- 4 files changed, 35 insertions(+), 23 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 9284fbbdf..334406a14 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -52,9 +52,6 @@ calico_policy_image_repo: "calico/kube-policy-controller" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "v0.3.0" -exechealthz_version: 1.1 -exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64" -exechealthz_image_tag: "{{ exechealthz_version }}" hyperkube_image_repo: "quay.io/coreos/hyperkube" hyperkube_image_tag: "{{ kube_version }}_coreos.0" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" @@ -74,12 +71,16 @@ nginx_image_tag: 1.11.4-alpine dnsmasq_version: 2.72 dnsmasq_image_repo: "andyshinn/dnsmasq" dnsmasq_image_tag: "{{ dnsmasq_version }}" -kubednsmasq_version: 1.3 -kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64" -kubednsmasq_image_tag: "{{ kubednsmasq_version }}" -kubedns_version: 1.7 -kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" +kubedns_version: 1.14.2 +kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64" kubedns_image_tag: "{{ kubedns_version }}" +dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64" +dnsmasq_nanny_image_tag: "{{ kubedns_version }}" +dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64" +dnsmasq_sidecar_image_tag: "{{ kubedns_version }}" +kubednsautoscaler_version: 1.1.1 +kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64" +kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}" test_image_repo: busybox test_image_tag: latest elasticsearch_version: "v2.4.1" @@ -193,26 +194,31 @@ downloads: repo: "{{ dnsmasq_image_repo }}" tag: "{{ dnsmasq_image_tag }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}" - kubednsmasq: - container: true - repo: "{{ kubednsmasq_image_repo }}" - tag: "{{ kubednsmasq_image_tag }}" - sha256: "{{ kubednsmasq_digest_checksum|default(None) }}" kubedns: container: true repo: "{{ kubedns_image_repo }}" tag: "{{ kubedns_image_tag }}" sha256: "{{ kubedns_digest_checksum|default(None) }}" + dnsmasq_nanny: + container: true + repo: "{{ dnsmasq_nanny_image_repo }}" + tag: "{{ dnsmasq_nanny_image_tag }}" + sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}" + dnsmasq_sidecar: + container: true + repo: "{{ dnsmasq_sidecar_image_repo }}" + tag: "{{ dnsmasq_sidecar_image_tag }}" + sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}" + kubednsautoscaler: + container: true + repo: "{{ kubednsautoscaler_image_repo }}" + tag: "{{ kubednsautoscaler_image_tag }}" + sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}" testbox: container: true repo: "{{ test_image_repo }}" tag: "{{ test_image_tag }}" sha256: "{{ testbox_digest_checksum|default(None) }}" - exechealthz: - container: true - repo: "{{ exechealthz_image_repo }}" - tag: "{{ exechealthz_image_tag }}" - sha256: "{{ exechealthz_digest_checksum|default(None) }}" elasticsearch: container: true repo: "{{ elasticsearch_image_repo }}" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 2255ba416..3d2e7a419 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,5 +1,6 @@ # Versions kubedns_version : 1.14.2 +kubednsautoscaler_version: 1.1.1 # Limits for dnsmasq/kubedns apps dns_memory_limit: 170Mi @@ -15,6 +16,8 @@ dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64" dnsmasq_nanny_image_tag: "{{ kubedns_version }}" dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64" dnsmasq_sidecar_image_tag: "{{ kubedns_version }}" +kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64" +kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}" # Netchecker deploy_netchecker: false diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index 6ef0f2f7a..a1d5455ad 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -32,7 +32,7 @@ spec: spec: containers: - name: autoscaler - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 + image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" resources: requests: cpu: "20m" diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml index 2ef549c90..12a7d73b7 100644 --- a/roles/kubernetes/node/meta/main.yml +++ b/roles/kubernetes/node/meta/main.yml @@ -22,12 +22,15 @@ dependencies: file: "{{ downloads.netcheck_agent }}" when: deploy_netchecker tags: [download, netchecker] - - role: download - file: "{{ downloads.kubednsmasq }}" - tags: [download, dnsmasq] - role: download file: "{{ downloads.kubedns }}" tags: [download, dnsmasq] - role: download - file: "{{ downloads.exechealthz }}" + file: "{{ downloads.dnsmasq_nanny }}" tags: [download, dnsmasq] + - role: download + file: "{{ downloads.dnsmasq_sidecar }}" + tags: [download, dnsmasq] + - role: download + file: "{{ downloads.kubednsautoscaler }}" + tags: [download, dnsmasq] \ No newline at end of file From 007ee0da8e2d6c987541fc68f22936b4c4134c22 Mon Sep 17 00:00:00 2001 From: tanshanshan Date: Thu, 29 Jun 2017 14:45:15 +0800 Subject: [PATCH 18/26] fix reset --- roles/reset/tasks/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 96984f92b..af3e66601 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -83,6 +83,15 @@ - /etc/dhcp/dhclient.d/zdnsupdate.sh - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate - "{{ bin_dir }}/kubelet" + - "{{ bin_dir }}/kubernetes-scripts" + - /run/flannel + - /etc/flannel + - /run/kubernetes + - /usr/local/share/ca-certificates/kube-ca.crt + - /usr/local/share/ca-certificates/etcd-ca.crt + - /etc/ssl/certs/kube-ca.pem + - /etc/ssl/certs/etcd-ca.pem + - /var/log/pods/ tags: ['files'] From c8258171ca4253b2dde753e83631c3d12176da43 Mon Sep 17 00:00:00 2001 From: Anton Nerozya Date: Thu, 29 Jun 2017 19:46:27 +0200 Subject: [PATCH 19/26] Better naming for recurrent tasks --- roles/vault/tasks/bootstrap/ca_trust.yml | 2 +- roles/vault/tasks/shared/issue_cert.yml | 6 +++--- roles/vault/tasks/shared/sync.yml | 4 ++-- roles/vault/tasks/shared/sync_file.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml index 63ab256d5..ae67f7405 100644 --- a/roles/vault/tasks/bootstrap/ca_trust.yml +++ b/roles/vault/tasks/bootstrap/ca_trust.yml @@ -1,6 +1,6 @@ --- -- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first +- name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}" command: "cat {{ vault_cert_dir }}/ca.pem" register: vault_cert_file_cat delegate_to: "{{ groups['vault']|first }}" diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 0733e86a0..cb3685bf5 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -26,7 +26,7 @@ mode: "{{ issue_cert_dir_mode | d('0755') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: issue_cert | Generate the cert +- name: "issue_cert | Generate the cert for {{ issue_cert_role }}" uri: url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}" headers: "{{ issue_cert_headers }}" @@ -40,7 +40,7 @@ register: issue_cert_result when: inventory_hostname == issue_cert_hosts|first -- name: issue_cert | Copy the cert to all hosts +- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts" copy: content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['certificate'] }}" dest: "{{ issue_cert_path }}" @@ -48,7 +48,7 @@ mode: "{{ issue_cert_file_mode | d('0644') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: issue_cert | Copy the key to all hosts +- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts" copy: content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['private_key'] }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}" diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml index bbfedbc4c..102532f0c 100644 --- a/roles/vault/tasks/shared/sync.yml +++ b/roles/vault/tasks/shared/sync.yml @@ -28,7 +28,7 @@ state: directory when: inventory_hostname not in sync_file_srcs -- name: "sync_file | Copy the file to hosts that don't have it" +- name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it" copy: content: "{{ sync_file_contents }}" dest: "{{ sync_file_path }}" @@ -37,7 +37,7 @@ owner: "{{ sync_file_owner|d('root') }}" when: inventory_hostname not in sync_file_srcs -- name: "sync_file | Copy the key file to hosts that don't have it" +- name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it" copy: content: "{{ sync_file_key_contents }}" dest: "{{ sync_file_key_path }}" diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml index ef53e9d90..be5284154 100644 --- a/roles/vault/tasks/shared/sync_file.yml +++ b/roles/vault/tasks/shared/sync_file.yml @@ -19,12 +19,12 @@ when: >- sync_file_is_cert|d() and (sync_file_key_path is not defined or sync_file_key_path == '') -- name: "sync_file | Check if file exists" +- name: "sync_file | Check if {{sync_file_path}} file exists" stat: path: "{{ sync_file_path }}" register: sync_file_stat -- name: "sync_file | Check if key file exists" +- name: "sync_file | Check if {{ sync_file_key_path }} key file exists" stat: path: "{{ sync_file_key_path }}" register: sync_file_key_stat From c2b3920b50ccc9aff694fc31935a1b22ab0283fd Mon Sep 17 00:00:00 2001 From: Martin Joehren Date: Fri, 30 Jun 2017 12:17:03 +0000 Subject: [PATCH 20/26] added flag for not populating inventory entries to etc hosts file --- roles/kubernetes/preinstall/defaults/main.yml | 5 ++++- roles/kubernetes/preinstall/tasks/etchosts.yml | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index dd5cbf810..686c0e9bc 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" # For the vsphere integration, kubelet will need credentials to access # vsphere apis -# Documentation regarting these values can be found +# Documentation regarding these values can be found # https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" @@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # for hostnet pods and infra needs resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf + +# All inventory hostnames will be written into each /etc/hosts file. +populate_inventory_to_hosts_file: true diff --git a/roles/kubernetes/preinstall/tasks/etchosts.yml b/roles/kubernetes/preinstall/tasks/etchosts.yml index df330be08..69496b7c2 100644 --- a/roles/kubernetes/preinstall/tasks/etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/etchosts.yml @@ -9,6 +9,7 @@ create: yes backup: yes marker: "# Ansible inventory hosts {mark}" + when: populate_inventory_to_hosts_file - name: Hosts | populate kubernetes loadbalancer address into hosts file lineinfile: From 5f75d4c09905ab437cbd61935eca3c247064851c Mon Sep 17 00:00:00 2001 From: Abdelsalam Abbas Date: Tue, 27 Jun 2017 22:11:44 +0200 Subject: [PATCH 21/26] Uncodron Masters which have scheduling Enabled --- roles/upgrade/post-upgrade/tasks/main.yml | 3 ++- upgrade-cluster.yml | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index c32f42491..e7efa0601 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -3,4 +3,5 @@ - name: Uncordon node command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" - when: needs_cordoning|default(false) + when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} ) + diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 09f268ecf..1a66904ce 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -67,6 +67,7 @@ - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } - { role: network_plugin, tags: network } + - { role: upgrade/post-upgrade, tags: post-upgrade } #Finally handle worker upgrades, based on given batch size - hosts: kube-node:!kube-master From 38f5d1b18e6ddb7e1b670cb432310f8d9c061c13 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 14 Jun 2017 12:37:35 +0200 Subject: [PATCH 22/26] Set kubedns minimum replicas to 2 --- roles/kubernetes-apps/ansible/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 3d2e7a419..2787472c8 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -6,7 +6,7 @@ kubednsautoscaler_version: 1.1.1 dns_memory_limit: 170Mi dns_cpu_requests: 100m dns_memory_requests: 70Mi -kubedns_min_replicas: 1 +kubedns_min_replicas: 2 kubedns_nodes_per_replica: 10 # Images From 5df757a4039fb4ba8ebb222a022b73c0064cb4b1 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 21 Jun 2017 10:37:13 +0200 Subject: [PATCH 23/26] Correct indentation and line endings for gitlab config --- .gitlab-ci.yml | 126 ++++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b64dd2a4e..9b890870b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -92,7 +92,7 @@ before_script: - echo ${PWD} - echo "${STARTUP_SCRIPT}" - > - ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local + ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local ${LOG_LEVEL} -e cloud_image=${CLOUD_IMAGE} -e cloud_region=${CLOUD_REGION} @@ -118,7 +118,7 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e ansible_python_interpreter=${PYPATH} - -e ansible_ssh_user=${SSH_USER} + -e ansible_ssh_user=${SSH_USER} -e bootstrap_os=${BOOTSTRAP_OS} -e cert_management=${CERT_MGMT:-script} -e cloud_provider=gce @@ -136,30 +136,30 @@ before_script: # Repeat deployment if testing upgrade - > - if [ "${UPGRADE_TEST}" != "false" ]; then + if [ "${UPGRADE_TEST}" != "false" ]; then test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; - pip install ansible==2.3.0; - git checkout "${CI_BUILD_REF}"; - ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER - ${SSH_ARGS} - ${LOG_LEVEL} - -e ansible_python_interpreter=${PYPATH} - -e ansible_ssh_user=${SSH_USER} - -e bootstrap_os=${BOOTSTRAP_OS} - -e cloud_provider=gce - -e deploy_netchecker=true - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - -e local_release_dir=${PWD}/downloads - -e resolvconf_mode=${RESOLVCONF_MODE} - -e weave_cpu_requests=${WEAVE_CPU_LIMIT} - -e weave_cpu_limit=${WEAVE_CPU_LIMIT} - --limit "all:!fake_hosts" - $PLAYBOOK; + pip install ansible==2.3.0; + git checkout "${CI_BUILD_REF}"; + ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER + ${SSH_ARGS} + ${LOG_LEVEL} + -e ansible_python_interpreter=${PYPATH} + -e ansible_ssh_user=${SSH_USER} + -e bootstrap_os=${BOOTSTRAP_OS} + -e cloud_provider=gce + -e deploy_netchecker=true + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + -e local_release_dir=${PWD}/downloads + -e resolvconf_mode=${RESOLVCONF_MODE} + -e weave_cpu_requests=${WEAVE_CPU_LIMIT} + -e weave_cpu_limit=${WEAVE_CPU_LIMIT} + --limit "all:!fake_hosts" + $PLAYBOOK; fi # Tests Cases @@ -175,40 +175,40 @@ before_script: ## Idempotency checks 1/5 (repeat deployment) - > if [ "${IDEMPOT_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e deploy_netchecker=true - -e resolvconf_mode=${RESOLVCONF_MODE} - -e local_release_dir=${PWD}/downloads - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e deploy_netchecker=true + -e resolvconf_mode=${RESOLVCONF_MODE} + -e local_release_dir=${PWD}/downloads + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + --limit "all:!fake_hosts" cluster.yml; fi ## Idempotency checks 2/5 (Advanced DNS checks) - > if [ "${IDEMPOT_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} - -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} + -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root + --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL; fi ## Idempotency checks 3/5 (reset deployment) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e reset_confirmation=yes + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml; fi @@ -216,28 +216,28 @@ before_script: ## Idempotency checks 4/5 (redeploy after reset) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS - -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} - --private-key=${HOME}/.ssh/id_rsa - -e bootstrap_os=${BOOTSTRAP_OS} - -e ansible_python_interpreter=${PYPATH} - -e download_localhost=${DOWNLOAD_LOCALHOST} - -e download_run_once=${DOWNLOAD_RUN_ONCE} - -e deploy_netchecker=true - -e resolvconf_mode=${RESOLVCONF_MODE} - -e local_release_dir=${PWD}/downloads - -e etcd_deployment_type=${ETCD_DEPLOYMENT} - -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS + -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + --private-key=${HOME}/.ssh/id_rsa + -e bootstrap_os=${BOOTSTRAP_OS} + -e ansible_python_interpreter=${PYPATH} + -e download_localhost=${DOWNLOAD_LOCALHOST} + -e download_run_once=${DOWNLOAD_RUN_ONCE} + -e deploy_netchecker=true + -e resolvconf_mode=${RESOLVCONF_MODE} + -e local_release_dir=${PWD}/downloads + -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + --limit "all:!fake_hosts" cluster.yml; fi ## Idempotency checks 5/5 (Advanced DNS checks) - > if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} - -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root - --limit "all:!fake_hosts" + ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} + -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root + --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL; fi @@ -603,7 +603,7 @@ ci-authorized: script: - /bin/sh scripts/premoderator.sh except: ['triggers', 'master'] - + syntax-check: <<: *job stage: unit-tests From 6bd27038ccd5a98c8812355b5d89d42b2c013977 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Wed, 21 Jun 2017 10:38:25 +0200 Subject: [PATCH 24/26] Set kubedns min replicas to 1 in gitlab config --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9b890870b..259c45614 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -127,6 +127,7 @@ before_script: -e download_run_once=${DOWNLOAD_RUN_ONCE} -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e local_release_dir=${PWD}/downloads -e resolvconf_mode=${RESOLVCONF_MODE} @@ -153,6 +154,7 @@ before_script: -e download_run_once=${DOWNLOAD_RUN_ONCE} -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e local_release_dir=${PWD}/downloads -e resolvconf_mode=${RESOLVCONF_MODE} @@ -186,6 +188,7 @@ before_script: -e resolvconf_mode=${RESOLVCONF_MODE} -e local_release_dir=${PWD}/downloads -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} --limit "all:!fake_hosts" cluster.yml; @@ -227,6 +230,7 @@ before_script: -e resolvconf_mode=${RESOLVCONF_MODE} -e local_release_dir=${PWD}/downloads -e etcd_deployment_type=${ETCD_DEPLOYMENT} + -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} --limit "all:!fake_hosts" cluster.yml; From a742d10c54116538f50b35a2379ef8efcad3c3fb Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Tue, 4 Jul 2017 19:05:16 -0400 Subject: [PATCH 25/26] Allow calico ipPool to be created with mode "cross-subnet" --- roles/network_plugin/calico/defaults/main.yml | 1 + roles/network_plugin/calico/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 3ef70413f..8cd120234 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -4,6 +4,7 @@ nat_outgoing: true # Use IP-over-IP encapsulation across hosts ipip: false +ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets # Set to true if you want your calico cni binaries to overwrite the # ones from hyperkube while leaving other cni plugins intact. diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index fa734464e..cdd17ffa6 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -94,7 +94,7 @@ shell: > echo '{ "kind": "ipPool", - "spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}}, + "spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}, "mode": "{{ ipip_mode }}"}, "nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, "apiVersion": "v1", "metadata": {"cidr": "{{ kube_pods_subnet }}"} From e26be9cb8a8119ea7cb4cf9209e782e667122323 Mon Sep 17 00:00:00 2001 From: Vladimir Kozyrev Date: Wed, 31 May 2017 12:11:47 +0300 Subject: [PATCH 26/26] add private dns server for a specific zone --- inventory/group_vars/k8s-cluster.yml | 5 +++++ roles/dnsmasq/defaults/main.yml | 3 +++ roles/dnsmasq/templates/01-kube-dns.conf.j2 | 5 +++++ 3 files changed, 13 insertions(+) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index ef5e363dc..350be8240 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -132,3 +132,8 @@ efk_enabled: false # Helm deployment helm_enabled: false + +# dnsmasq +# dnsmasq_upstream_dns_servers: +# - /resolvethiszone.with/10.0.4.250 +# - 8.8.8.8 diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml index 58b1b7f1d..bf670c788 100644 --- a/roles/dnsmasq/defaults/main.yml +++ b/roles/dnsmasq/defaults/main.yml @@ -30,3 +30,6 @@ dns_memory_requests: 50Mi # Autoscaler parameters dnsmasq_nodes_per_replica: 10 dnsmasq_min_replicas: 1 + +# Custom name servers +dnsmasq_upstream_dns_servers: [] diff --git a/roles/dnsmasq/templates/01-kube-dns.conf.j2 b/roles/dnsmasq/templates/01-kube-dns.conf.j2 index dce26d726..483be2090 100644 --- a/roles/dnsmasq/templates/01-kube-dns.conf.j2 +++ b/roles/dnsmasq/templates/01-kube-dns.conf.j2 @@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }} local=/{{ bogus_domains }} #Set upstream dns servers +{% if dnsmasq_upstream_dns_servers|length > 0 %} +{% for srv in dnsmasq_upstream_dns_servers %} +server={{ srv }} +{% endfor %} +{% endif %} {% if system_and_upstream_dns_servers|length > 0 %} {% for srv in system_and_upstream_dns_servers %} server={{ srv }}