mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-01 17:48:12 -03:30
Compare commits
314 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b3ce6e418 | ||
|
|
d8e77600e2 | ||
|
|
e3dcd96301 | ||
|
|
001cae5894 | ||
|
|
fd380615a0 | ||
|
|
039180b2ca | ||
|
|
22b89edbbc | ||
|
|
4650f04b37 | ||
|
|
82f9652fd8 | ||
|
|
94ae945bea | ||
|
|
f6189885c2 | ||
|
|
5c039d87aa | ||
|
|
08dfb7b59f | ||
|
|
4c0e723ead | ||
|
|
ea6af449a8 | ||
|
|
f72d74f951 | ||
|
|
d285565475 | ||
|
|
4eadf3228e | ||
|
|
99c5aa5a02 | ||
|
|
6ed65d762b | ||
|
|
ac18f6cf8b | ||
|
|
1f7a42f3a4 | ||
|
|
e71f261935 | ||
|
|
fcfe12437c | ||
|
|
b902602d16 | ||
|
|
b1ef336ffa | ||
|
|
538cb3b1bd | ||
|
|
17e335c6a7 | ||
|
|
280d6cac1a | ||
|
|
c288ffc55d | ||
|
|
9075dbdd3c | ||
|
|
16bd0d2b5d | ||
|
|
7850bce254 | ||
|
|
3d19e03294 | ||
|
|
496cb306bc | ||
|
|
b1f8bfdf7c | ||
|
|
2c38e4e1ac | ||
|
|
411d07a4f6 | ||
|
|
7d3a6541d7 | ||
|
|
0f400a113c | ||
|
|
e8447e3d71 | ||
|
|
f086b6824e | ||
|
|
ac644ed049 | ||
|
|
453fea1977 | ||
|
|
a953f1ca8b | ||
|
|
4b5cb1185f | ||
|
|
275cdc1ce3 | ||
|
|
8d6f67e476 | ||
|
|
9172150966 | ||
|
|
1f2831967e | ||
|
|
c19643cee2 | ||
|
|
a5c165bb13 | ||
|
|
d43f09081e | ||
|
|
1385091768 | ||
|
|
72074f283b | ||
|
|
a5db3dbea9 | ||
|
|
a2c9331b56 | ||
|
|
1a38a9df88 | ||
|
|
9b349a9049 | ||
|
|
329e97c4d3 | ||
|
|
0366600b45 | ||
|
|
6a4ce96b7d | ||
|
|
b61c64a8ea | ||
|
|
ca62c75bdf | ||
|
|
38bd328abb | ||
|
|
37ccf7e405 | ||
|
|
cb91003cea | ||
|
|
4ad7b229d3 | ||
|
|
97e0de7e29 | ||
|
|
83d1486a67 | ||
|
|
9081b3f914 | ||
|
|
cf445fd4fe | ||
|
|
72f053d9bb | ||
|
|
a0defefb3f | ||
|
|
9e19159547 | ||
|
|
62b1166911 | ||
|
|
810596c6d8 | ||
|
|
a488d55c2c | ||
|
|
8106f1c86d | ||
|
|
e63bc65a9d | ||
|
|
d306c9708c | ||
|
|
6a65345ef3 | ||
|
|
f1e348ab95 | ||
|
|
1a3b9dd864 | ||
|
|
8fee1ab102 | ||
|
|
5c617c5a8b | ||
|
|
0b939a495b | ||
|
|
4d7426ec95 | ||
|
|
4092f96dd8 | ||
|
|
fa003af8f0 | ||
|
|
77c870b7d0 | ||
|
|
32a6ca4fd6 | ||
|
|
958eca2863 | ||
|
|
af635ff3ff | ||
|
|
728024e8ff | ||
|
|
b548f6f320 | ||
|
|
62df6ac724 | ||
|
|
8bcad4f5ef | ||
|
|
31e6c44b07 | ||
|
|
77c910c1c3 | ||
|
|
c20196f9a0 | ||
|
|
f6a15b1829 | ||
|
|
7c22def422 | ||
|
|
87e49f0055 | ||
|
|
a36e3fbec3 | ||
|
|
4bceaf77ee | ||
|
|
35a3597416 | ||
|
|
2a279e30b0 | ||
|
|
c685dc493f | ||
|
|
aacc89e4e6 | ||
|
|
e24f888bc4 | ||
|
|
3d2ea28c96 | ||
|
|
a643f72d93 | ||
|
|
73a2a18006 | ||
|
|
2ef05fb3b7 | ||
|
|
e06d02365e | ||
|
|
d6f2dbc723 | ||
|
|
20dba8b388 | ||
|
|
f624ba47fb | ||
|
|
94aa062d51 | ||
|
|
c0935e161b | ||
|
|
70fbc01cc1 | ||
|
|
6c2f169ea2 | ||
|
|
c230e617f0 | ||
|
|
1aee6ec371 | ||
|
|
d3fdfee211 | ||
|
|
3232e2743e | ||
|
|
cbb959151c | ||
|
|
c3d8b131db | ||
|
|
236d1a448d | ||
|
|
cfd51b1ac7 | ||
|
|
61e97251a5 | ||
|
|
c192a01b20 | ||
|
|
97a05ff34a | ||
|
|
6aaaf4a272 | ||
|
|
cd64f41524 | ||
|
|
df279b1ff6 | ||
|
|
aa859bc640 | ||
|
|
6ac601fd2d | ||
|
|
3a569c9dcb | ||
|
|
27d62941b2 | ||
|
|
ab345c5f69 | ||
|
|
a06f641b6c | ||
|
|
f2f1e7f9d1 | ||
|
|
0686b8452e | ||
|
|
72504d26dc | ||
|
|
1e98e8444e | ||
|
|
f216e7339b | ||
|
|
291dd1aca8 | ||
|
|
38da0adead | ||
|
|
81b3343796 | ||
|
|
f2c160e7e0 | ||
|
|
3d819a6edd | ||
|
|
20bd656975 | ||
|
|
9f245dd9b2 | ||
|
|
cf8e9eed69 | ||
|
|
10c9fe96b0 | ||
|
|
42b24616ac | ||
|
|
f9ccb93825 | ||
|
|
daeea75fbb | ||
|
|
0ad0202e8f | ||
|
|
a2a26755fe | ||
|
|
1f02cc70f1 | ||
|
|
fe010504aa | ||
|
|
05e3c76b1d | ||
|
|
63a458063b | ||
|
|
a8715f9f0f | ||
|
|
59be578842 | ||
|
|
cb0a257349 | ||
|
|
e1cfe83825 | ||
|
|
69ea28e187 | ||
|
|
2f5a9e180c | ||
|
|
f912a4ece5 | ||
|
|
d1e66f9cc8 | ||
|
|
1a25903583 | ||
|
|
0728a2a78a | ||
|
|
b67cf74c5e | ||
|
|
2832a1cdcd | ||
|
|
4e0ed1ea50 | ||
|
|
164122555d | ||
|
|
11d87ecc37 | ||
|
|
7433348aae | ||
|
|
3673ed6262 | ||
|
|
16f860bbc2 | ||
|
|
d973ecf5cc | ||
|
|
f88cd27686 | ||
|
|
2a4fc70e1c | ||
|
|
c9c12129fd | ||
|
|
38f7ba2584 | ||
|
|
c4b1808983 | ||
|
|
f3ed740a75 | ||
|
|
b3f9cae820 | ||
|
|
a67bdff28c | ||
|
|
e3c8b230a0 | ||
|
|
9689a28d15 | ||
|
|
095d33bc51 | ||
|
|
821966b319 | ||
|
|
ab46687a8a | ||
|
|
be7278ce9d | ||
|
|
428218dbf0 | ||
|
|
d110999d31 | ||
|
|
4b8daa22f6 | ||
|
|
3f1887316b | ||
|
|
e60a63ea51 | ||
|
|
a2a7bcd43d | ||
|
|
c1bc4615fe | ||
|
|
76dca877da | ||
|
|
38e727dbe1 | ||
|
|
eba486f229 | ||
|
|
4ac79993e2 | ||
|
|
7c93e71801 | ||
|
|
1be399ab7b | ||
|
|
eae4fa040a | ||
|
|
a3c53efaf7 | ||
|
|
0f7fefd1b5 | ||
|
|
76fc786c07 | ||
|
|
76a1fd37ff | ||
|
|
73800ef111 | ||
|
|
742a8782dd | ||
|
|
8f6c863d7b | ||
|
|
cd7c58e8d3 | ||
|
|
a1de8a07d6 | ||
|
|
476b14b06e | ||
|
|
49d106f615 | ||
|
|
63fdfae918 | ||
|
|
ad48606e4e | ||
|
|
32f312f4a6 | ||
|
|
52ffd5dae4 | ||
|
|
c75da43f22 | ||
|
|
65f14f636d | ||
|
|
d7d85d2d3e | ||
|
|
363627d9f8 | ||
|
|
322b528ee0 | ||
|
|
0fe5f120a3 | ||
|
|
7950a49e28 | ||
|
|
698da78768 | ||
|
|
ba320e918d | ||
|
|
07cc981971 | ||
|
|
e23fd5ca44 | ||
|
|
7df5edef52 | ||
|
|
1eaa6925b9 | ||
|
|
86212d59ae | ||
|
|
82deb2c57f | ||
|
|
7507031cb1 | ||
|
|
51a9379d3c | ||
|
|
d73d60c9b0 | ||
|
|
004b4a0436 | ||
|
|
67ce8925e4 | ||
|
|
3a1f6810b7 | ||
|
|
066016cd3e | ||
|
|
28d6eb6af1 | ||
|
|
1a47a9b850 | ||
|
|
addd67dc63 | ||
|
|
70e0998a70 | ||
|
|
988bd88468 | ||
|
|
0d88972d3e | ||
|
|
0e012e5987 | ||
|
|
595e96ebf1 | ||
|
|
4c81cd2a71 | ||
|
|
32a8ea8094 | ||
|
|
c594bd7feb | ||
|
|
223ed98828 | ||
|
|
39e3df25a3 | ||
|
|
0fb017b9c1 | ||
|
|
fb465f8b4b | ||
|
|
3501eb6916 | ||
|
|
00db751646 | ||
|
|
df6c5b28a1 | ||
|
|
59789ae02a | ||
|
|
414e420bd2 | ||
|
|
03de4c0806 | ||
|
|
4fb8e6d455 | ||
|
|
06cdb260f6 | ||
|
|
c3c5817af6 | ||
|
|
9168c71359 | ||
|
|
1a14f1ecc1 | ||
|
|
44cb126e7d | ||
|
|
51f4e6585a | ||
|
|
f81e6d2ccf | ||
|
|
80dd230a65 | ||
|
|
d1b4ea5807 | ||
|
|
f5db403c45 | ||
|
|
75950344fb | ||
|
|
a49e06b54b | ||
|
|
0945eb990a | ||
|
|
a498cc223b | ||
|
|
ddd200bbfa | ||
|
|
9707aa8091 | ||
|
|
2e6a260ab1 | ||
|
|
49c6bf8fa6 | ||
|
|
296b92dbd4 | ||
|
|
b2756d148a | ||
|
|
756af57787 | ||
|
|
cb7096f2ec | ||
|
|
3c4871d9b8 | ||
|
|
f90673ac68 | ||
|
|
d435e17681 | ||
|
|
23e9737b85 | ||
|
|
54beb27eaa | ||
|
|
7968437a65 | ||
|
|
693b7c5fd0 | ||
|
|
1bd49ff125 | ||
|
|
9f460dd1bf | ||
|
|
2441dd6f6f | ||
|
|
ea44ad4d75 | ||
|
|
4b4786f75d | ||
|
|
c432697667 | ||
|
|
3535c29e59 | ||
|
|
94eb18b3d9 | ||
|
|
af5943f7e6 | ||
|
|
f26e16bf79 | ||
|
|
86e3506ae6 | ||
|
|
ba2107ea8c | ||
|
|
3f44a33738 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,6 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
inventory/vagrant_ansible_inventory
|
||||
**/vagrant_ansible_inventory
|
||||
inventory/credentials/
|
||||
inventory/group_vars/fake_hosts.yml
|
||||
inventory/host_vars/
|
||||
@@ -12,9 +12,9 @@ temp
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
|
||||
@@ -93,7 +93,7 @@ before_script:
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kubespray repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 02cd5418c22d51e40261775908d55bc562206023
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||
@@ -304,6 +304,10 @@ before_script:
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "basic"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
@@ -560,7 +564,7 @@ gce_rhel7-canal-sep:
|
||||
<<: *rhel7_canal_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/,]
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_rhel7-canal-sep-triggers:
|
||||
stage: deploy-part2
|
||||
@@ -638,6 +642,17 @@ gce_ubuntu-vault-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-vault-upgrade:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_vault_upgrade_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-flannel-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
|
||||
55
README.md
55
README.md
@@ -1,14 +1,14 @@
|
||||

|
||||

|
||||
|
||||
Deploy a Production Ready Kubernetes Cluster
|
||||
============================================
|
||||
|
||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||
- **High available** cluster
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Support most popular **Linux distributions**
|
||||
- Supports most popular **Linux distributions**
|
||||
- **Continuous integration tests**
|
||||
|
||||
Quick Start
|
||||
@@ -18,6 +18,9 @@ To deploy the cluster you can use :
|
||||
|
||||
### Ansible
|
||||
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
@@ -34,7 +37,15 @@ To deploy the cluster you can use :
|
||||
|
||||
### Vagrant
|
||||
|
||||
# Simply running `vagrant up` (for tests purposes)
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
|
||||
python -V && pip -V
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
Install the necessary requirements
|
||||
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
|
||||
Documents
|
||||
@@ -75,19 +86,25 @@ Supported Linux Distributions
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
Versions of supported components
|
||||
--------------------------------
|
||||
Supported Components
|
||||
--------------------
|
||||
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
|
||||
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
||||
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
|
||||
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
|
||||
- [contiv](https://github.com/contiv/install/releases) v1.1.7
|
||||
- [weave](http://weave.works/) v2.2.1
|
||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.10.4
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.18
|
||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||
- Network Plugin
|
||||
- [calico](https://github.com/projectcalico/calico) v2.6.8
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.1.2
|
||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.4.0
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v1.1.0-k8s1.10
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.4.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.17.1
|
||||
|
||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
@@ -122,7 +139,7 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
@@ -152,8 +169,6 @@ Tools and projects on top of Kubespray
|
||||
CI Tests
|
||||
--------
|
||||
|
||||

|
||||
|
||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by Google (GCE)
|
||||
|
||||
13
SECURITY_CONTACTS
Normal file
13
SECURITY_CONTACTS
Normal file
@@ -0,0 +1,13 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
@@ -33,11 +33,11 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: docker, tags: docker, when: manage_docker|default(true) }
|
||||
- role: rkt
|
||||
tags: rkt
|
||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||
- { role: download, tags: download, skip_downloads: false }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
@@ -51,13 +51,13 @@
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
|
||||
|
||||
- hosts: k8s-cluster:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
|
||||
@@ -9,8 +9,8 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
|
||||
- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
|
||||
- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
|
||||
- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
|
||||
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
||||
|
||||
## Configuration through group_vars/all
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python3
|
||||
#!/usr/bin/env python3
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
@@ -1 +1 @@
|
||||
../../../inventory/group_vars
|
||||
../../../inventory/local/group_vars
|
||||
@@ -17,21 +17,20 @@ This project will create:
|
||||
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="www"
|
||||
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||
export AWS_SSH_KEY_NAME="yyy"
|
||||
export AWS_DEFAULT_REGION="zzz"
|
||||
export TF_VAR_AWS_ACCESS_KEY_ID="www"
|
||||
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
|
||||
Example:
|
||||
```commandline
|
||||
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
||||
terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
@@ -46,7 +45,7 @@ ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
||||
Example (this one assumes you are using CoreOS)
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
```
|
||||
***Using other distrib than CoreOs***
|
||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
@@ -1 +1 @@
|
||||
../../inventory/group_vars
|
||||
../../inventory/local/group_vars
|
||||
@@ -32,7 +32,11 @@ floating IP addresses or not.
|
||||
- Kubernetes worker nodes
|
||||
|
||||
Note that the Ansible script will report an invalid configuration if you wind up
|
||||
with an even number of etcd instances since that is not a valid configuration.
|
||||
with an even number of etcd instances since that is not a valid configuration. This
|
||||
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
||||
master nodes with etcd replicas. As an example, if you have three master nodes with
|
||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||
now six total etcd replicas.
|
||||
|
||||
### GlusterFS
|
||||
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||
@@ -135,7 +139,7 @@ the one you want to use with the environment variable `OS_CLOUD`:
|
||||
export OS_CLOUD=mycloud
|
||||
```
|
||||
|
||||
##### Openrc method (deprecated)
|
||||
##### Openrc method
|
||||
|
||||
When using classic environment variables, Terraform uses default `OS_*`
|
||||
environment variables. A script suitable for your environment may be available
|
||||
@@ -218,6 +222,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
@@ -299,11 +304,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
|
||||
|
||||
#### Bastion host
|
||||
|
||||
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
|
||||
Bastion access will be determined by:
|
||||
|
||||
```
|
||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
|
||||
```
|
||||
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||
|
||||
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
|
||||
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
|
||||
|
||||
So, either a bastion host, or at least master/node with a floating IP are required.
|
||||
|
||||
#### Test access
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ module "network" {
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
subnet_cidr = "${var.subnet_cidr}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
@@ -24,6 +25,7 @@ module "compute" {
|
||||
source = "modules/compute"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
az_list = "${var.az_list}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
@@ -48,6 +50,7 @@ module "compute" {
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
@@ -59,6 +59,17 @@ resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
self = true
|
||||
}
|
||||
}
|
||||
resource "openstack_compute_secgroup_v2" "worker" {
|
||||
name = "${var.cluster_name}-k8s-worker"
|
||||
description = "${var.cluster_name} - Kubernetes worker nodes"
|
||||
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "30000"
|
||||
to_port = "32767"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
@@ -83,7 +94,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
@@ -91,6 +102,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -107,15 +119,20 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -125,20 +142,26 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,k8s-cluster,vault"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -160,6 +183,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -175,7 +199,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
@@ -184,6 +208,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -198,7 +223,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
@@ -207,6 +232,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -217,6 +243,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"${openstack_compute_secgroup_v2.worker.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
@@ -226,11 +253,16 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
@@ -240,6 +272,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.worker.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
@@ -279,6 +312,7 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
variable "cluster_name" {}
|
||||
|
||||
variable "az_list" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {}
|
||||
@@ -55,3 +59,7 @@ variable "k8s_node_fips" {
|
||||
variable "bastion_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ resource "openstack_networking_network_v2" "k8s" {
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
cidr = "10.0.0.0/24"
|
||||
cidr = "${var.subnet_cidr}"
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
@@ -7,3 +7,5 @@ variable "cluster_name" {}
|
||||
variable "dns_nameservers" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {}
|
||||
|
||||
@@ -41,5 +41,6 @@ number_of_k8s_nodes_no_floating_ip = 4
|
||||
# networking
|
||||
network_name = "<network>"
|
||||
external_net = "<UUID>"
|
||||
subnet_cidr = "<cidr>"
|
||||
floatingip_pool = "<pool>"
|
||||
|
||||
|
||||
@@ -2,6 +2,12 @@ variable "cluster_name" {
|
||||
default = "example"
|
||||
}
|
||||
|
||||
variable "az_list" {
|
||||
description = "List of Availability Zones available in your OpenStack cluster"
|
||||
type = "list"
|
||||
default = ["nova"]
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
default = 1
|
||||
}
|
||||
@@ -97,6 +103,12 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
description = "Subnet CIDR block."
|
||||
type = "string"
|
||||
default = "10.0.0.0/24"
|
||||
}
|
||||
|
||||
variable "dns_nameservers" {
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = "list"
|
||||
@@ -111,3 +123,8 @@ variable "floatingip_pool" {
|
||||
variable "external_net" {
|
||||
description = "uuid of the external/public network"
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||
default = ""
|
||||
}
|
||||
|
||||
@@ -706,6 +706,10 @@ def query_list(hosts):
|
||||
|
||||
for name, attrs, hostgroups in hosts:
|
||||
for group in set(hostgroups):
|
||||
# Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
|
||||
# Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
|
||||
if not group: group = "all"
|
||||
|
||||
groups[group].setdefault('hosts', [])
|
||||
groups[group]['hosts'].append(name)
|
||||
|
||||
|
||||
@@ -123,7 +123,6 @@ The following tags are defined in playbooks:
|
||||
| hyperkube | Manipulations with K8s hyperkube image
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||
| k8s-secrets | Configuring K8s certs/keys
|
||||
| kpm | Installing K8s apps definitions with KPM
|
||||
| kube-apiserver | Configuring static pod kube-apiserver
|
||||
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||
| kubectl | Installing kubectl and bash completion
|
||||
|
||||
@@ -5,7 +5,7 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi
|
||||
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
|
||||
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||
|
||||
|
||||
@@ -169,3 +169,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is
|
||||
```
|
||||
calico_node_ignorelooserpf: true
|
||||
```
|
||||
|
||||
Note that in OpenStack you must allow `ipip` traffic in your security groups,
|
||||
otherwise you will experience timeouts.
|
||||
To do this you must add a rule which allows it, for example:
|
||||
|
||||
```
|
||||
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
|
||||
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
|
||||
```
|
||||
|
||||
@@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
|
||||
## dns_mode
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||
|
||||
#### dnsmasq_kubedns (default)
|
||||
#### dnsmasq_kubedns
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||
|
||||
#### kubedns
|
||||
#### kubedns (default)
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
|
||||
@@ -38,9 +38,9 @@ See more details in the [ansible guide](ansible.md).
|
||||
Adding nodes
|
||||
------------
|
||||
|
||||
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||
|
||||
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||
@@ -51,11 +51,26 @@ Remove nodes
|
||||
|
||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||
|
||||
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
||||
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
|
||||
|
||||
We support two ways to select the nodes:
|
||||
|
||||
- Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
--private-key=~/.ssh/private_key \
|
||||
--extra-vars "node=nodename,nodename2"
|
||||
```
|
||||
or
|
||||
- Use `--limit nodename,nodename2` to select the node
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key \
|
||||
--limit nodename,nodename2"
|
||||
```
|
||||
|
||||
Connecting to Kubernetes
|
||||
|
||||
BIN
docs/img/kubernetes-logo.png
Normal file
BIN
docs/img/kubernetes-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.8 KiB |
@@ -25,13 +25,13 @@ There are related application specifc variables:
|
||||
netchecker_port: 31081
|
||||
agent_report_interval: 15
|
||||
netcheck_namespace: default
|
||||
agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
|
||||
server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
|
||||
agent_img: "mirantis/k8s-netchecker-agent:v1.2.2"
|
||||
server_img: "mirantis/k8s-netchecker-server:v1.2.2"
|
||||
```
|
||||
|
||||
Note that the application verifies DNS resolve for FQDNs comprising only the
|
||||
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
|
||||
``netchecker-service.default.cluster.local``. If you want to deploy the application
|
||||
``netchecker-service.default.svc.cluster.local``. If you want to deploy the application
|
||||
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
|
||||
so the resulting search domain records to contain that namespace, like:
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Kubespray's roadmap
|
||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
|
||||
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
|
||||
- to be discussed, a way to provide the inventory
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||
|
||||
|
||||
@@ -81,3 +81,61 @@ kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
|
||||
recreated. All other invalidated service account tokens are cleaned up
|
||||
automatically, but other pods are not deleted out of an abundance of caution
|
||||
for impact to user deployed pods.
|
||||
|
||||
### Component-based upgrades
|
||||
|
||||
A deployer may want to upgrade specific components in order to minimize risk
|
||||
or save time. This strategy is not covered by CI as of this writing, so it is
|
||||
not guaranteed to work.
|
||||
|
||||
These commands are useful only for upgrading fully-deployed, healthy, existing
|
||||
hosts. This will definitely not work for undeployed or partially deployed
|
||||
hosts.
|
||||
|
||||
Upgrade docker:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker
|
||||
```
|
||||
|
||||
Upgrade etcd:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
|
||||
```
|
||||
|
||||
Upgrade vault:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
|
||||
```
|
||||
|
||||
Upgrade kubelet:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens
|
||||
```
|
||||
|
||||
Upgrade Kubernetes master components:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master
|
||||
```
|
||||
|
||||
Upgrade network plugins:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network
|
||||
```
|
||||
|
||||
Upgrade all add-ons:
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps
|
||||
```
|
||||
|
||||
Upgrade just helm (assuming `helm_enabled` is true):
|
||||
|
||||
```
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm
|
||||
```
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
version: "{{ item.version }}"
|
||||
state: "{{ item.state }}"
|
||||
with_items:
|
||||
- { state: "present", name: "docker", version: "2.7.0" }
|
||||
- { state: "present", name: "docker-compose", version: "1.18.0" }
|
||||
- { state: "present", name: "docker", version: "3.4.1" }
|
||||
- { state: "present", name: "docker-compose", version: "1.21.2" }
|
||||
|
||||
- name: CephFS Provisioner | Check Go version
|
||||
shell: |
|
||||
@@ -35,19 +35,19 @@
|
||||
- name: CephFS Provisioner | Clone repo
|
||||
git:
|
||||
repo: https://github.com/kubernetes-incubator/external-storage.git
|
||||
dest: "~/go/src/github.com/kubernetes-incubator"
|
||||
version: 92295a30
|
||||
clone: no
|
||||
dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
|
||||
version: 06fddbe2
|
||||
clone: yes
|
||||
update: yes
|
||||
|
||||
- name: CephFS Provisioner | Build image
|
||||
shell: |
|
||||
cd ~/go/src/github.com/kubernetes-incubator/external-storage
|
||||
REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
|
||||
REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
|
||||
|
||||
- name: CephFS Provisioner | Push image
|
||||
docker_image:
|
||||
name: quay.io/kubespray/cephfs-provisioner:92295a30
|
||||
name: quay.io/kubespray/cephfs-provisioner:06fddbe2
|
||||
push: yes
|
||||
retries: 10
|
||||
|
||||
|
||||
@@ -110,10 +110,6 @@ bin_dir: /usr/local/bin
|
||||
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
||||
#docker_dns_servers_strict: false
|
||||
|
||||
## Default packages to install within the cluster, f.e:
|
||||
#kpm_packages:
|
||||
# - name: kube-system/grafana
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts or whether a
|
||||
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
|
||||
|
||||
@@ -19,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.9.5
|
||||
kube_version: v1.10.4
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -58,32 +58,30 @@ kube_users:
|
||||
## Optional settings for OIDC
|
||||
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
||||
# kube_oidc_username_claim: sub
|
||||
# kube_oidc_username_prefix: oidc:
|
||||
# kube_oidc_groups_claim: groups
|
||||
# kube_oidc_groups_prefix: oidc:
|
||||
|
||||
|
||||
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
# weave's network password for encryption
|
||||
# if null then no network encryption
|
||||
# you can use --extra-vars to pass the password in command line
|
||||
weave_password: EnterPasswordHere
|
||||
|
||||
# Weave uses consensus mode by default
|
||||
# Enabling seed mode allow to dynamically add or remove hosts
|
||||
# https://www.weave.works/docs/net/latest/ipam/
|
||||
weave_mode_seed: false
|
||||
|
||||
# This two variable are automatically changed by the weave's role, do not manually change these values
|
||||
# To reset values :
|
||||
# weave_seed: uninitialized
|
||||
# weave_peers: uninitialized
|
||||
weave_seed: uninitialized
|
||||
weave_peers: uninitialized
|
||||
|
||||
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
|
||||
weave_mtu: 1376
|
||||
# Weave deployment
|
||||
# weave_password: ~
|
||||
# weave_checkpoint_disable: false
|
||||
# weave_conn_limit: 100
|
||||
# weave_hairpin_mode: true
|
||||
# weave_ipalloc_range: {{ kube_pods_subnet }}
|
||||
# weave_expect_npc: {{ enable_network_policy }}
|
||||
# weave_kube_peers: ~
|
||||
# weave_ipalloc_init: ~
|
||||
# weave_expose_ip: ~
|
||||
# weave_metrics_addr: ~
|
||||
# weave_status_addr: ~
|
||||
# weave_mtu: 1376
|
||||
# weave_no_masq_local: true
|
||||
# weave_extra_args: ~
|
||||
|
||||
# Enable kubernetes network policies
|
||||
enable_network_policy: false
|
||||
@@ -138,6 +136,9 @@ dns_domain: "{{ cluster_name }}"
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## Used to set docker daemon iptables options to true
|
||||
#docker_iptables_enabled: "true"
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
@@ -146,6 +147,13 @@ docker_daemon_graph: "/var/lib/docker"
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
|
||||
docker_bin_dir: "/usr/bin"
|
||||
|
||||
## If non-empty will override default system MounFlags value.
|
||||
## This option takes a mount propagation flag: shared, slave
|
||||
## or private, which control whether mounts in the file system
|
||||
## namespace set up for docker will receive or propagate mounts
|
||||
## and unmounts. Leave empty for system default
|
||||
docker_mount_flags:
|
||||
|
||||
# Settings for containerized control plane (etcd/kubelet/secrets)
|
||||
etcd_deployment_type: docker
|
||||
kubelet_deployment_type: host
|
||||
@@ -165,9 +173,6 @@ efk_enabled: false
|
||||
# Helm deployment
|
||||
helm_enabled: false
|
||||
|
||||
# Istio deployment
|
||||
istio_enabled: false
|
||||
|
||||
# Registry deployment
|
||||
registry_enabled: false
|
||||
# registry_namespace: "{{ system_namespace }}"
|
||||
@@ -183,19 +188,21 @@ local_volume_provisioner_enabled: false
|
||||
|
||||
# CephFS provisioner deployment
|
||||
cephfs_provisioner_enabled: false
|
||||
# cephfs_provisioner_namespace: "{{ system_namespace }}"
|
||||
# cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||
# cephfs_provisioner_cluster: ceph
|
||||
# cephfs_provisioner_monitors:
|
||||
# - 172.24.0.1:6789
|
||||
# - 172.24.0.2:6789
|
||||
# - 172.24.0.3:6789
|
||||
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||
# cephfs_provisioner_admin_id: admin
|
||||
# cephfs_provisioner_secret: secret
|
||||
# cephfs_provisioner_storage_class: cephfs
|
||||
# cephfs_provisioner_reclaim_policy: Delete
|
||||
# cephfs_provisioner_claim_root: /volumes
|
||||
# cephfs_provisioner_deterministic_names: true
|
||||
|
||||
# Nginx ingress controller deployment
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
# ingress_nginx_nodeselector:
|
||||
# node-role.kubernetes.io/master: "true"
|
||||
# ingress_nginx_namespace: "ingress-nginx"
|
||||
# ingress_nginx_insecure_port: 80
|
||||
# ingress_nginx_secure_port: 443
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
# node1 ansible_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6
|
||||
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# bastion ansible_ssh_host=x.x.x.x
|
||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||
|
||||
# [kube-master]
|
||||
# node1
|
||||
@@ -26,11 +26,6 @@
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [kube-ingress]
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-master
|
||||
# kube-node
|
||||
# kube-ingress
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
- hosts: "{{ node | default('etcd:k8s-cluster:vault:calico-rr') }}"
|
||||
vars_prompt:
|
||||
name: "delete_nodes_confirmation"
|
||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||
@@ -20,8 +22,9 @@
|
||||
roles:
|
||||
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||
|
||||
- hosts: kube-node
|
||||
- hosts: "{{ node | default('kube-node') }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: reset, tags: reset }
|
||||
|
||||
- hosts: kube-master
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
pbr>=1.6
|
||||
ansible>=2.4.0
|
||||
netaddr
|
||||
jinja2>=2.9.6
|
||||
netaddr
|
||||
pbr>=1.6
|
||||
ansible-modules-hashivault>=3.9.4
|
||||
hvac
|
||||
|
||||
@@ -18,7 +18,11 @@ mv -n pypy-$PYPY_VERSION-linux64 pypy
|
||||
|
||||
## library fixup
|
||||
mkdir -p pypy/lib
|
||||
ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
if [ -f /lib64/libncurses.so.5.9 ]; then
|
||||
ln -snf /lib64/libncurses.so.5.9 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
elif [ -f /lib64/libncurses.so.6.1 ]; then
|
||||
ln -snf /lib64/libncurses.so.6.1 $BINDIR/pypy/lib/libtinfo.so.5
|
||||
fi
|
||||
|
||||
cat > $BINDIR/python <<EOF
|
||||
#!/bin/bash
|
||||
|
||||
@@ -7,49 +7,58 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Force binaries directory for Container Linux by CoreOS
|
||||
set_fact:
|
||||
bin_dir: "/opt/bin"
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Run bootstrap.sh
|
||||
script: bootstrap.sh
|
||||
when: need_bootstrap.rc != 0
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/opt/bin/python"
|
||||
ansible_python_interpreter: "{{ bin_dir }}/python"
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Check if we need to install pip
|
||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||
shell: "pip --version"
|
||||
register: need_pip
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: need_bootstrap.rc != 0
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"
|
||||
|
||||
- name: Bootstrap | Copy get-pip.py
|
||||
copy:
|
||||
src: get-pip.py
|
||||
dest: ~/get-pip.py
|
||||
when: need_pip != 0
|
||||
when: need_pip.rc != 0
|
||||
|
||||
- name: Bootstrap | Install pip
|
||||
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
||||
when: need_pip != 0
|
||||
when: need_pip.rc != 0
|
||||
|
||||
- name: Bootstrap | Remove get-pip.py
|
||||
file:
|
||||
path: ~/get-pip.py
|
||||
state: absent
|
||||
when: need_pip != 0
|
||||
when: need_pip.rc != 0
|
||||
|
||||
- name: Bootstrap | Install pip launcher
|
||||
copy:
|
||||
src: runner
|
||||
dest: /opt/bin/pip
|
||||
dest: "{{ bin_dir }}/pip"
|
||||
mode: 0755
|
||||
when: need_pip != 0
|
||||
when: need_pip.rc != 0
|
||||
|
||||
- name: Install required python modules
|
||||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_coreos_modules}}"
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
changed_when: false
|
||||
with_items:
|
||||
- python
|
||||
- python-apt
|
||||
- pip
|
||||
- dbus-daemon
|
||||
tags:
|
||||
|
||||
@@ -3,15 +3,11 @@
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: ensure dnsmasq.d-available directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d-available
|
||||
state: directory
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: check system nameservers
|
||||
shell: awk '/^nameserver/ {print $NF}' /etc/resolv.conf
|
||||
|
||||
@@ -17,12 +17,13 @@ dockerproject_repo_key_info:
|
||||
dockerproject_repo_info:
|
||||
repos:
|
||||
|
||||
docker_dns_servers_strict: yes
|
||||
docker_dns_servers_strict: true
|
||||
|
||||
docker_container_storage_setup: false
|
||||
|
||||
# Used to override obsoletes=0
|
||||
yum_conf: /etc/yum.conf
|
||||
yum_repo_dir: /etc/yum.repos.d
|
||||
docker_yum_conf: /etc/yum_docker.conf
|
||||
|
||||
# CentOS/RedHat docker-ce repo
|
||||
@@ -39,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/
|
||||
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
||||
|
||||
# Used to set docker daemon iptables options
|
||||
docker_iptables_enabled: "false"
|
||||
|
||||
@@ -85,7 +85,7 @@
|
||||
- name: Configure docker repository on RedHat/CentOS
|
||||
template:
|
||||
src: "rh_docker.repo.j2"
|
||||
dest: "/etc/yum.repos.d/docker.repo"
|
||||
dest: "{{ yum_repo_dir }}/docker.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Copy yum.conf for editing
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
- name: add upstream dns servers (only when dnsmasq is not used)
|
||||
set_fact:
|
||||
docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}"
|
||||
when: dns_mode in ['kubedns', 'coredns', 'coreos_dual']
|
||||
when: dns_mode in ['kubedns', 'coredns', 'coredns_dual']
|
||||
|
||||
- name: add global searchdomains
|
||||
set_fact:
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
- name: check number of nameservers
|
||||
fail:
|
||||
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
|
||||
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
|
||||
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
|
||||
|
||||
- name: rtrim number of nameservers to 3
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
[Service]
|
||||
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
|
||||
--iptables=false"
|
||||
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
|
||||
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
|
||||
MountFlags={{ docker_mount_flags }}
|
||||
{% endif %}
|
||||
|
||||
@@ -9,6 +9,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'17.09': docker-ce=17.09.0~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||
'1.13': docker-engine-1.13.1-1.el7.centos
|
||||
'17.03': docker-ce-17.03.2.ce-1.el7.centos
|
||||
'17.09': docker-ce-17.09.0.ce-1.el7.centos
|
||||
'stable': docker-ce-17.03.2.ce-1.el7.centos
|
||||
'edge': docker-ce-17.12.1.ce-1.el7.centos
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ docker_versioned_pkg:
|
||||
'1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'17.09': docker-ce=17.09.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }}
|
||||
|
||||
|
||||
@@ -24,9 +24,9 @@ download_always_pull: False
|
||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
||||
|
||||
# Versions
|
||||
kube_version: v1.9.5
|
||||
kube_version: v1.10.4
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
etcd_version: v3.2.4
|
||||
etcd_version: v3.2.18
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: "v2.6.8"
|
||||
@@ -36,21 +36,18 @@ calico_policy_version: "v1.0.3"
|
||||
calico_rr_version: "v0.4.2"
|
||||
flannel_version: "v0.10.0"
|
||||
flannel_cni_version: "v0.3.0"
|
||||
istio_version: "0.2.6"
|
||||
vault_version: 0.8.1
|
||||
weave_version: 2.2.1
|
||||
vault_version: 0.10.1
|
||||
weave_version: "2.4.0"
|
||||
pod_infra_version: 3.0
|
||||
contiv_version: 1.1.7
|
||||
cilium_version: "v1.0.0-rc8"
|
||||
cilium_version: "v1.1.2"
|
||||
|
||||
# Download URLs
|
||||
istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux"
|
||||
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm"
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
|
||||
|
||||
# Checksums
|
||||
istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370
|
||||
kubeadm_checksum: 12b6e9ac1624852b7c978bde70b9bde9ca0e4fc6581d09bddfb117bb41f93c74
|
||||
kubeadm_checksum: 7e1169bbbeed973ab402941672dec957638dea5952a1e8bc89a37d5e709cc4b4
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
|
||||
# Containers
|
||||
@@ -70,32 +67,16 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||
istio_proxy_image_repo: docker.io/istio/proxy
|
||||
istio_proxy_image_tag: "{{ istio_version }}"
|
||||
istio_proxy_init_image_repo: docker.io/istio/proxy_init
|
||||
istio_proxy_init_image_tag: "{{ istio_version }}"
|
||||
istio_ca_image_repo: docker.io/istio/istio-ca
|
||||
istio_ca_image_tag: "{{ istio_version }}"
|
||||
istio_mixer_image_repo: docker.io/istio/mixer
|
||||
istio_mixer_image_tag: "{{ istio_version }}"
|
||||
istio_pilot_image_repo: docker.io/istio/pilot
|
||||
istio_pilot_image_tag: "{{ istio_version }}"
|
||||
istio_proxy_debug_image_repo: docker.io/istio/proxy_debug
|
||||
istio_proxy_debug_image_tag: "{{ istio_version }}"
|
||||
istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer
|
||||
istio_sidecar_initializer_image_tag: "{{ istio_version }}"
|
||||
istio_statsd_image_repo: prom/statsd-exporter
|
||||
istio_statsd_image_tag: latest
|
||||
hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
|
||||
hyperkube_image_tag: "{{ kube_version }}"
|
||||
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||
install_socat_image_repo: "xueshanf/install-socat"
|
||||
install_socat_image_tag: "latest"
|
||||
netcheck_version: "v1.0"
|
||||
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
|
||||
netcheck_version: "v1.2.2"
|
||||
netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
|
||||
netcheck_agent_tag: "{{ netcheck_version }}"
|
||||
netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
|
||||
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
|
||||
netcheck_server_tag: "{{ netcheck_version }}"
|
||||
weave_kube_image_repo: "weaveworks/weave-kube"
|
||||
weave_kube_image_tag: "{{ weave_version }}"
|
||||
@@ -105,6 +86,8 @@ contiv_image_repo: "contiv/netplugin"
|
||||
contiv_image_tag: "{{ contiv_version }}"
|
||||
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
||||
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
||||
contiv_etcd_init_image_repo: "ferest/etcd-initer"
|
||||
contiv_etcd_init_image_tag: latest
|
||||
cilium_image_repo: "docker.io/cilium/cilium"
|
||||
cilium_image_tag: "{{ cilium_version }}"
|
||||
nginx_image_repo: nginx
|
||||
@@ -112,10 +95,10 @@ nginx_image_tag: 1.13
|
||||
dnsmasq_version: 2.78
|
||||
dnsmasq_image_repo: "andyshinn/dnsmasq"
|
||||
dnsmasq_image_tag: "{{ dnsmasq_version }}"
|
||||
kubedns_version: 1.14.8
|
||||
kubedns_version: 1.14.10
|
||||
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64"
|
||||
kubedns_image_tag: "{{ kubedns_version }}"
|
||||
coredns_version: 1.1.0
|
||||
coredns_version: 1.1.2
|
||||
coredns_image_repo: "docker.io/coredns/coredns"
|
||||
coredns_image_tag: "{{ coredns_version }}"
|
||||
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64"
|
||||
@@ -130,16 +113,16 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
|
||||
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
|
||||
test_image_repo: busybox
|
||||
test_image_tag: latest
|
||||
elasticsearch_version: "v2.4.1"
|
||||
elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
|
||||
elasticsearch_version: "v5.6.4"
|
||||
elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
|
||||
elasticsearch_image_tag: "{{ elasticsearch_version }}"
|
||||
fluentd_version: "1.22"
|
||||
fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
|
||||
fluentd_version: "v2.0.4"
|
||||
fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
|
||||
fluentd_image_tag: "{{ fluentd_version }}"
|
||||
kibana_version: "v4.6.1"
|
||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||
kibana_version: "5.6.4"
|
||||
kibana_image_repo: "docker.elastic.co/kibana/kibana"
|
||||
kibana_image_tag: "{{ kibana_version }}"
|
||||
helm_version: "v2.8.1"
|
||||
helm_version: "v2.9.1"
|
||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||
helm_image_tag: "{{ helm_version }}"
|
||||
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
||||
@@ -152,17 +135,15 @@ registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
|
||||
registry_proxy_image_tag: "0.4"
|
||||
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
|
||||
local_volume_provisioner_image_tag: "v2.0.0"
|
||||
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
|
||||
cephfs_provisioner_image_tag: "92295a30"
|
||||
cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
|
||||
cephfs_provisioner_image_tag: "v1.1.0-k8s1.10"
|
||||
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
|
||||
ingress_nginx_controller_image_tag: "0.12.0"
|
||||
ingress_nginx_controller_image_tag: "0.17.1"
|
||||
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
||||
ingress_nginx_default_backend_image_tag: "1.4"
|
||||
cert_manager_version: "v0.2.3"
|
||||
cert_manager_version: "v0.4.0"
|
||||
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
|
||||
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
|
||||
|
||||
downloads:
|
||||
netcheck_server:
|
||||
@@ -202,83 +183,6 @@ downloads:
|
||||
mode: "0755"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
istioctl:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
file: true
|
||||
version: "{{ istio_version }}"
|
||||
dest: "istio/istioctl"
|
||||
sha256: "{{ istioctl_checksum }}"
|
||||
source_url: "{{ istioctl_download_url }}"
|
||||
url: "{{ istioctl_download_url }}"
|
||||
unarchive: false
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
- kube-master
|
||||
istio_proxy:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_image_repo }}"
|
||||
tag: "{{ istio_proxy_image_tag }}"
|
||||
sha256: "{{ istio_proxy_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_proxy_init:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_init_image_repo }}"
|
||||
tag: "{{ istio_proxy_init_image_tag }}"
|
||||
sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_ca:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_ca_image_repo }}"
|
||||
tag: "{{ istio_ca_image_tag }}"
|
||||
sha256: "{{ istio_ca_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_mixer:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_mixer_image_repo }}"
|
||||
tag: "{{ istio_mixer_image_tag }}"
|
||||
sha256: "{{ istio_mixer_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_pilot:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_pilot_image_repo }}"
|
||||
tag: "{{ istio_pilot_image_tag }}"
|
||||
sha256: "{{ istio_pilot_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_proxy_debug:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_proxy_debug_image_repo }}"
|
||||
tag: "{{ istio_proxy_debug_image_tag }}"
|
||||
sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_sidecar_initializer:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_sidecar_initializer_image_repo }}"
|
||||
tag: "{{ istio_sidecar_initializer_image_tag }}"
|
||||
sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
istio_statsd:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
container: true
|
||||
repo: "{{ istio_statsd_image_repo }}"
|
||||
tag: "{{ istio_statsd_image_tag }}"
|
||||
sha256: "{{ istio_statsd_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
hyperkube:
|
||||
enabled: true
|
||||
container: true
|
||||
@@ -383,6 +287,14 @@ downloads:
|
||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
contiv_etcd_init:
|
||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||
container: true
|
||||
repo: "{{ contiv_etcd_init_image_repo }}"
|
||||
tag: "{{ contiv_etcd_init_image_tag }}"
|
||||
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
pod_infra:
|
||||
enabled: true
|
||||
container: true
|
||||
@@ -556,7 +468,7 @@ downloads:
|
||||
tag: "{{ ingress_nginx_controller_image_tag }}"
|
||||
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-ingress
|
||||
- kube-node
|
||||
ingress_nginx_default_backend:
|
||||
enabled: "{{ ingress_nginx_enabled }}"
|
||||
container: true
|
||||
@@ -564,7 +476,7 @@ downloads:
|
||||
tag: "{{ ingress_nginx_default_backend_image_tag }}"
|
||||
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-ingress
|
||||
- kube-node
|
||||
cert_manager_controller:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
@@ -573,14 +485,6 @@ downloads:
|
||||
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
cert_manager_ingress_shim:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
repo: "{{ cert_manager_ingress_shim_image_repo }}"
|
||||
tag: "{{ cert_manager_ingress_shim_image_tag }}"
|
||||
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
download_defaults:
|
||||
container: false
|
||||
|
||||
@@ -20,6 +20,6 @@
|
||||
when:
|
||||
- not skip_downloads|default(false)
|
||||
- item.value.enabled
|
||||
- item.value.container
|
||||
- "{{ item.value.container | default(False) }}"
|
||||
- download_run_once
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
- name: Register docker images info
|
||||
raw: >-
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
|
||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} if .RepoTags {{ '}}' }}{{ '{{' }} (index .RepoTags 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}{{ '{{' }} if .RepoDigests {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}{{ '{{' }} end {{ '}}' }}" | tr '\n' ','
|
||||
no_log: true
|
||||
register: docker_images
|
||||
failed_when: false
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
---
|
||||
# Set to false to only do certificate management
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: false
|
||||
|
||||
# Set to true to separate k8s events to a different etcd cluster
|
||||
etcd_events_cluster_enabled: false
|
||||
|
||||
etcd_backup_prefix: "/var/backups"
|
||||
etcd_data_dir: "/var/lib/etcd"
|
||||
@@ -31,6 +35,12 @@ etcd_election_timeout: "5000"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
## A dictionary of extra environment variables to add to etcd.env, formatted like:
|
||||
## etcd_extra_vars:
|
||||
## ETCD_VAR1: "value1"
|
||||
## ETCD_VAR2: "value2"
|
||||
etcd_extra_vars: {}
|
||||
|
||||
# Limits
|
||||
# Limit memory only if <4GB memory on host. 0=unlimited
|
||||
etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %}"
|
||||
@@ -44,7 +54,7 @@ etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr',
|
||||
|
||||
etcd_compaction_retention: "8"
|
||||
|
||||
etcd_vault_mount_path: etcd
|
||||
etcd_vault_mount_path: "/etcd"
|
||||
|
||||
# Force clients like etcdctl to use TLS certs (different than peer security)
|
||||
etcd_secure_client: true
|
||||
|
||||
@@ -95,4 +95,9 @@ if [ -n "$HOSTS" ]; then
|
||||
fi
|
||||
|
||||
# Install certs
|
||||
if [ -e "$SSLDIR/ca-key.pem" ]; then
|
||||
# No pass existing CA
|
||||
rm -f ca.pem ca-key.pem
|
||||
fi
|
||||
|
||||
mv *.pem ${SSLDIR}/
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- name: restart etcd-events
|
||||
command: /bin/true
|
||||
notify:
|
||||
- etcd-events | reload systemd
|
||||
- etcd | reload systemd
|
||||
- reload etcd-events
|
||||
- wait for etcd-events up
|
||||
|
||||
@@ -19,9 +19,6 @@
|
||||
- name: etcd | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: etcd-events | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: reload etcd
|
||||
service:
|
||||
name: etcd
|
||||
|
||||
@@ -1,11 +1,104 @@
|
||||
---
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Configure | Copy etcd.service systemd file
|
||||
template:
|
||||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
backup: yes
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Configure | Copy etcd-events.service systemd file
|
||||
template:
|
||||
src: "etcd-events-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd-events.service
|
||||
backup: yes
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Configure | Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Configure | Ensure etcd-events is running
|
||||
service:
|
||||
name: etcd-events
|
||||
state: started
|
||||
enabled: yes
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
until: etcd_cluster_is_healthy.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
ignore_errors: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
until: etcd_events_cluster_is_healthy.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
ignore_errors: false
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if member is in etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
@@ -25,44 +118,16 @@
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Copy etcd.service systemd file
|
||||
template:
|
||||
src: "etcd-{{ etcd_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
backup: yes
|
||||
when: is_etcd_master
|
||||
notify: restart etcd
|
||||
|
||||
- name: Configure | Copy etcd-events.service systemd file
|
||||
template:
|
||||
src: "etcd-events-host.service.j2"
|
||||
dest: /etc/systemd/system/etcd-events.service
|
||||
backup: yes
|
||||
when: is_etcd_master and etcd_deployment_type == "host" and etcd_events_cluster_setup
|
||||
notify: restart etcd-events
|
||||
|
||||
- name: Configure | Copy etcd-events.service systemd file
|
||||
template:
|
||||
src: "etcd-events-docker.service.j2"
|
||||
dest: /etc/systemd/system/etcd-events.service
|
||||
backup: yes
|
||||
when: is_etcd_master and etcd_deployment_type == "docker" and etcd_events_cluster_setup
|
||||
notify: restart etcd-events
|
||||
|
||||
- name: Configure | Join member(s) to etcd cluster one at a time
|
||||
include_tasks: join_etcd_member.yml
|
||||
vars:
|
||||
target_node: "{{ item }}"
|
||||
loop_control:
|
||||
pause: 10
|
||||
with_items: "{{ groups['etcd'] }}"
|
||||
when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||
when: inventory_hostname == item and etcd_cluster_setup and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
|
||||
|
||||
- name: Configure | Join member(s) to etcd-events cluster one at a time
|
||||
include_tasks: join_etcd-evetns_member.yml
|
||||
include_tasks: join_etcd-events_member.yml
|
||||
vars:
|
||||
target_node: "{{ item }}"
|
||||
loop_control:
|
||||
pause: 10
|
||||
with_items: "{{ groups['etcd'] }}"
|
||||
when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
owner: root
|
||||
mode: 0700
|
||||
run_once: yes
|
||||
when: inventory_hostname == groups['etcd'][0]
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
|
||||
@@ -26,6 +27,7 @@
|
||||
recurse: yes
|
||||
mode: 0700
|
||||
run_once: yes
|
||||
when: inventory_hostname == groups['etcd'][0]
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
|
||||
- name: Gen_certs | write openssl config
|
||||
@@ -34,7 +36,9 @@
|
||||
dest: "{{ etcd_config_dir }}/openssl.conf"
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when: gen_certs|default(false)
|
||||
when:
|
||||
- gen_certs|default(false)
|
||||
- inventory_hostname == groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | copy certs generation script
|
||||
copy:
|
||||
@@ -43,8 +47,9 @@
|
||||
mode: 0700
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when: gen_certs|default(false)
|
||||
|
||||
when:
|
||||
- gen_certs|default(false)
|
||||
- inventory_hostname == groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | run cert generation script
|
||||
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
|
||||
@@ -61,7 +66,9 @@
|
||||
{% endfor %}"
|
||||
run_once: yes
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when: gen_certs|default(false)
|
||||
when:
|
||||
- gen_certs|default(false)
|
||||
- inventory_hostname == groups['etcd'][0]
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- set_fact:
|
||||
@@ -160,5 +167,5 @@
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: kube
|
||||
mode: "u=rwX,g-rwx,o-rwx"
|
||||
mode: "640"
|
||||
recurse: yes
|
||||
|
||||
@@ -9,22 +9,22 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
when: etcd_cluster_setup
|
||||
|
||||
- name: Install etcd launch script
|
||||
template:
|
||||
src: etcd.j2
|
||||
dest: "{{ bin_dir }}/etcd"
|
||||
owner: 'root'
|
||||
mode: 0755
|
||||
mode: 0750
|
||||
backup: yes
|
||||
notify: restart etcd
|
||||
when: etcd_cluster_setup
|
||||
|
||||
- name: Install etcd-events launch script
|
||||
template:
|
||||
src: etcd-events.j2
|
||||
dest: "{{ bin_dir }}/etcd-events"
|
||||
owner: 'root'
|
||||
mode: 0755
|
||||
mode: 0750
|
||||
backup: yes
|
||||
when: etcd_events_cluster_setup
|
||||
notify: restart etcd-events
|
||||
|
||||
@@ -10,3 +10,4 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
when: etcd_cluster_setup
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
environment: "{{proxy_env}}"
|
||||
when: etcd_cluster_setup
|
||||
|
||||
- name: Install | Copy etcdctl binary from rkt container
|
||||
command: >-
|
||||
@@ -26,3 +27,4 @@
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
changed_when: false
|
||||
environment: "{{proxy_env}}"
|
||||
when: etcd_cluster_setup
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Join Member | Add member to cluster
|
||||
- name: Join Member | Add member to etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} member add {{ etcd_member_name }} {{ etcd_events_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
@@ -23,17 +23,6 @@
|
||||
{%- endfor -%}
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure etcd-events is running
|
||||
service:
|
||||
name: etcd-events
|
||||
state: started
|
||||
enabled: yes
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}"
|
||||
register: etcd_events_member_in_cluster
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: Join Member | Add member to cluster
|
||||
- name: Join Member | Add member to etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
@@ -23,18 +23,7 @@
|
||||
{%- endfor -%}
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in cluster
|
||||
- name: Join Member | Ensure member is in etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
- name: Join Member | Add member to cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
|
||||
register: member_add_result
|
||||
until: member_add_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
vars:
|
||||
etcd_peer_addresses: >-
|
||||
{% for host in groups['etcd'] -%}
|
||||
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
|
||||
{{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2380,
|
||||
{%- endif -%}
|
||||
{%- if loop.last -%}
|
||||
{{ etcd_member_name }}={{ etcd_peer_url }}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | reload systemd
|
||||
command: systemctl daemon-reload
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: target_node == inventory_hostname
|
||||
|
||||
- name: Join Member | Ensure member is in cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
tags:
|
||||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
@@ -6,6 +6,7 @@
|
||||
- facts
|
||||
|
||||
- include_tasks: "gen_certs_{{ cert_management }}.yml"
|
||||
when:
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
@@ -18,58 +19,45 @@
|
||||
register: "etcd_client_cert_serial_result"
|
||||
changed_when: false
|
||||
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
|
||||
- name: Set etcd_client_cert_serial
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
|
||||
- include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
||||
when: is_etcd_master
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- include_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- include_tasks: configure.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
when: is_etcd_master
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
when: is_etcd_master
|
||||
|
||||
- name: Restart etcd if certs changed
|
||||
command: /bin/true
|
||||
notify: restart etcd
|
||||
when: is_etcd_master and etcd_secret_changed|default(false)
|
||||
|
||||
- name: Restart etcd-events if certs changed
|
||||
command: /bin/true
|
||||
notify: restart etcd
|
||||
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
|
||||
|
||||
# reload-systemd
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Ensure etcd is running
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
state: restarted
|
||||
enabled: yes
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed|default(false)
|
||||
|
||||
- name: Ensure etcd-events is running
|
||||
- name: Restart etcd-events if certs changed
|
||||
service:
|
||||
name: etcd-events
|
||||
state: started
|
||||
state: restarted
|
||||
enabled: yes
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false)
|
||||
|
||||
# After etcd cluster is assembled, make sure that
|
||||
# initial state of the cluster is in `existing`
|
||||
# state insted of `new`.
|
||||
- include_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
when: is_etcd_master
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
src: etcd.env.j2
|
||||
dest: /etc/etcd.env
|
||||
notify: restart etcd
|
||||
when: is_etcd_master
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Refresh config | Create etcd-events config file
|
||||
template:
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: Configure | Check if etcd cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
register: etcd_events_cluster_is_healthy
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: is_etcd_master and etcd_events_cluster_setup
|
||||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
@@ -13,6 +13,8 @@
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_master_cert_list|d([]) }}"
|
||||
|
||||
|
||||
31
roles/etcd/templates/etcd-events-rkt.service.j2
Normal file
31
roles/etcd/templates/etcd-events-rkt.service.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
[Unit]
|
||||
Description=etcd events rkt wrapper
|
||||
Documentation=https://github.com/coreos/etcd
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Restart=on-failure
|
||||
RestartSec=10s
|
||||
TimeoutStartSec=0
|
||||
LimitNOFILE=40000
|
||||
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--uuid-file-save=/var/run/etcd-events.uuid \
|
||||
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
|
||||
--mount volume=hosts,target=/etc/hosts \
|
||||
--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
|
||||
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
|
||||
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
||||
--volume=etcd-data-dir,kind=host,source={{ etcd_events_data_dir }},readOnly=false \
|
||||
--mount=volume=etcd-data-dir,target={{ etcd_events_data_dir }} \
|
||||
--set-env-file=/etc/etcd-events.env \
|
||||
--stage1-from-dir=stage1-fly.aci \
|
||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||
--name={{ etcd_member_name | default("etcd-events") }}
|
||||
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd-events.uuid
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd-events.uuid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,7 +1,7 @@
|
||||
ETCD_DATA_DIR={{ etcd_events_data_dir }}
|
||||
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }}
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
|
||||
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
||||
ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
|
||||
|
||||
ETCD_METRICS={{ etcd_metrics }}
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2381,https://127.0.0.1:2381
|
||||
|
||||
@@ -27,3 +27,7 @@ ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem
|
||||
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem
|
||||
ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }}
|
||||
|
||||
{% for key, value in etcd_extra_vars.items() %}
|
||||
{{ key }}={{ value }}
|
||||
{% endfor %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
# Versions
|
||||
kubedns_version: 1.14.8
|
||||
kubedns_version: 1.14.10
|
||||
kubednsautoscaler_version: 1.1.2
|
||||
|
||||
# Limits for dnsmasq/kubedns apps
|
||||
@@ -59,6 +59,9 @@ dashboard_certs_secret_name: kubernetes-dashboard-certs
|
||||
dashboard_tls_key_file: dashboard.key
|
||||
dashboard_tls_cert_file: dashboard.crt
|
||||
|
||||
# Override dashboard default settings
|
||||
dashboard_token_ttl: 900
|
||||
|
||||
# SSL
|
||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||
canal_cert_dir: "/etc/canal/certs"
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
- rbac_enabled or item.type not in rbac_resources
|
||||
tags:
|
||||
- dnsmasq
|
||||
- kubedns
|
||||
|
||||
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns
|
||||
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole
|
||||
@@ -39,3 +40,4 @@
|
||||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
||||
tags:
|
||||
- dnsmasq
|
||||
- kubedns
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
- dnsmasq
|
||||
- coredns
|
||||
- kubedns
|
||||
|
||||
- name: Kubernetes Apps | CoreDNS
|
||||
import_tasks: "tasks/coredns.yml"
|
||||
@@ -56,6 +59,8 @@
|
||||
delay: 5
|
||||
tags:
|
||||
- dnsmasq
|
||||
- coredns
|
||||
- kubedns
|
||||
|
||||
- name: Kubernetes Apps | Netchecker
|
||||
import_tasks: tasks/netchecker.yml
|
||||
|
||||
@@ -11,7 +11,7 @@ data:
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa {
|
||||
kubernetes {{ dns_domain }} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
|
||||
@@ -34,6 +34,22 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}"
|
||||
|
||||
@@ -166,6 +166,7 @@ spec:
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --token-ttl={{ dashboard_token_ttl }}
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
@@ -199,6 +200,7 @@ apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
|
||||
@@ -30,7 +30,24 @@ spec:
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
operator: Equal
|
||||
key: node-role.kubernetes.io/master
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: kubedns-autoscaler
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
|
||||
|
||||
@@ -30,8 +30,25 @@ spec:
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
key: "node-role.kubernetes.io/master"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
|
||||
@@ -7,3 +7,6 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ['*']
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: efk
|
||||
|
||||
@@ -6,3 +6,4 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging-v1
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
serviceName: elasticsearch-logging
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -30,12 +32,12 @@ spec:
|
||||
limits:
|
||||
cpu: {{ elasticsearch_cpu_limit }}
|
||||
{% if elasticsearch_mem_limit is defined and elasticsearch_mem_limit != "0M" %}
|
||||
mem: {{ elasticsearch_mem_limit }}
|
||||
memory: "{{ elasticsearch_mem_limit }}"
|
||||
{% endif %}
|
||||
requests:
|
||||
cpu: {{ elasticsearch_cpu_requests }}
|
||||
{% if elasticsearch_mem_requests is defined and elasticsearch_mem_requests != "0M" %}
|
||||
mem: {{ elasticsearch_mem_requests }}
|
||||
memory: "{{ elasticsearch_mem_requests }}"
|
||||
{% endif %}
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
@@ -53,4 +55,10 @@ spec:
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
initContainers:
|
||||
- image: alpine:3.6
|
||||
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
||||
name: elasticsearch-logging-init
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
fluentd_cpu_limit: 0m
|
||||
fluentd_mem_limit: 200Mi
|
||||
fluentd_mem_limit: 500Mi
|
||||
fluentd_cpu_requests: 100m
|
||||
fluentd_mem_requests: 200Mi
|
||||
fluentd_config_dir: /etc/kubernetes/fluentd
|
||||
fluentd_config_file: fluentd.conf
|
||||
fluentd_config_dir: /etc/fluent/config.d
|
||||
# fluentd_config_file: fluentd.conf
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: fluentd-config
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
data:
|
||||
{{ fluentd_config_file }}: |
|
||||
system.conf: |-
|
||||
<system>
|
||||
root_dir /tmp/fluentd-buffers/
|
||||
</system>
|
||||
|
||||
containers.input.conf: |-
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
# to watch changes to Docker log files. The kubelet creates symlinks that
|
||||
# capture the pod name, namespace, container name & Docker container ID
|
||||
@@ -18,7 +27,6 @@ data:
|
||||
# See https://github.com/uken/fluent-plugin-elasticsearch &
|
||||
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
|
||||
# more information about the plugins.
|
||||
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
|
||||
#
|
||||
# Example
|
||||
# =======
|
||||
@@ -99,63 +107,87 @@ data:
|
||||
# This makes it easier for users to search for logs by pod name or by
|
||||
# the name of the Kubernetes container regardless of how many times the
|
||||
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
|
||||
#
|
||||
# TODO: Propagate the labels associated with a container along with its logs
|
||||
# so users can query logs using labels as well as or instead of the pod name
|
||||
# and container name. This is simply done via configuration of the Kubernetes
|
||||
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
|
||||
# problem yet to be solved as secrets are not usable in static pods which the fluentd
|
||||
# pod must be until a per-node controller is available in Kubernetes.
|
||||
# Prevent fluentd from handling records containing its own logs. Otherwise
|
||||
# it can lead to an infinite loop, when error in sending one message generates
|
||||
# another message which also fails to be sent and so on.
|
||||
<match fluent.**>
|
||||
type null
|
||||
</match>
|
||||
# Example:
|
||||
|
||||
# Json Log Example:
|
||||
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
|
||||
# CRI Log Example:
|
||||
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
|
||||
<source>
|
||||
type tail
|
||||
@id fluentd-containers.log
|
||||
@type tail
|
||||
path /var/log/containers/*.log
|
||||
pos_file /var/log/es-containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
tag kubernetes.*
|
||||
format json
|
||||
tag raw.kubernetes.*
|
||||
read_from_head true
|
||||
<parse>
|
||||
@type multi_format
|
||||
<pattern>
|
||||
format json
|
||||
time_key time
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</parse>
|
||||
</source>
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
@id raw.kubernetes
|
||||
@type detect_exceptions
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</match>
|
||||
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
type tail
|
||||
@id minion
|
||||
@type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/es-salt.pos
|
||||
pos_file /var/log/salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
type tail
|
||||
@id startupscript.log
|
||||
@type tail
|
||||
format syslog
|
||||
path /var/log/startupscript.log
|
||||
pos_file /var/log/es-startupscript.log.pos
|
||||
tag startupscript
|
||||
</source>
|
||||
|
||||
# Examples:
|
||||
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
|
||||
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
type tail
|
||||
@id docker.log
|
||||
@type tail
|
||||
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
|
||||
path /var/log/docker.log
|
||||
pos_file /var/log/es-docker.log.pos
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
|
||||
<source>
|
||||
type tail
|
||||
@id etcd.log
|
||||
@type tail
|
||||
# Not parsing this, because it doesn't have anything particularly useful to
|
||||
# parse out of it (like severities).
|
||||
format none
|
||||
@@ -163,13 +195,16 @@ data:
|
||||
pos_file /var/log/es-etcd.log.pos
|
||||
tag etcd
|
||||
</source>
|
||||
|
||||
# Multi-line parsing is required for all the kube logs because very large log
|
||||
# statements, such as those that include entire object bodies, get split into
|
||||
# multiple lines by glog.
|
||||
|
||||
# Example:
|
||||
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
|
||||
<source>
|
||||
type tail
|
||||
@id kubelet.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -179,10 +214,12 @@ data:
|
||||
pos_file /var/log/es-kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
|
||||
<source>
|
||||
type tail
|
||||
@id kube-proxy.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -192,10 +229,12 @@ data:
|
||||
pos_file /var/log/es-kube-proxy.log.pos
|
||||
tag kube-proxy
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
|
||||
<source>
|
||||
type tail
|
||||
@id kube-apiserver.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -205,10 +244,12 @@ data:
|
||||
pos_file /var/log/es-kube-apiserver.log.pos
|
||||
tag kube-apiserver
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
|
||||
<source>
|
||||
type tail
|
||||
@id kube-controller-manager.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -218,10 +259,12 @@ data:
|
||||
pos_file /var/log/es-kube-controller-manager.log.pos
|
||||
tag kube-controller-manager
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
|
||||
<source>
|
||||
type tail
|
||||
@id kube-scheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -231,10 +274,12 @@ data:
|
||||
pos_file /var/log/es-kube-scheduler.log.pos
|
||||
tag kube-scheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
|
||||
<source>
|
||||
type tail
|
||||
@id rescheduler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -244,10 +289,12 @@ data:
|
||||
pos_file /var/log/es-rescheduler.log.pos
|
||||
tag rescheduler
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
@id glbc.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -257,10 +304,12 @@ data:
|
||||
pos_file /var/log/es-glbc.log.pos
|
||||
tag glbc
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
|
||||
<source>
|
||||
type tail
|
||||
@id cluster-autoscaler.log
|
||||
@type tail
|
||||
format multiline
|
||||
multiline_flush_interval 5s
|
||||
format_firstline /^\w\d{4}/
|
||||
@@ -270,59 +319,123 @@ data:
|
||||
pos_file /var/log/es-cluster-autoscaler.log.pos
|
||||
tag cluster-autoscaler
|
||||
</source>
|
||||
|
||||
# Logs from systemd-journal for interesting services.
|
||||
# TODO(random-liu): Remove this after cri container runtime rolls out.
|
||||
<source>
|
||||
@id journald-docker
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag docker
|
||||
</source>
|
||||
|
||||
# <source>
|
||||
# @id journald-container-runtime
|
||||
# @type systemd
|
||||
# filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
|
||||
# <storage>
|
||||
# @type local
|
||||
# persistent true
|
||||
# </storage>
|
||||
# read_from_head true
|
||||
# tag container-runtime
|
||||
# </source>
|
||||
|
||||
<source>
|
||||
@id journald-kubelet
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@id journald-node-problem-detector
|
||||
@type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
<storage>
|
||||
@type local
|
||||
persistent true
|
||||
</storage>
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
|
||||
forward.input.conf: |-
|
||||
# Takes the messages sent over TCP
|
||||
<source>
|
||||
@type forward
|
||||
</source>
|
||||
|
||||
monitoring.conf: |-
|
||||
# Prometheus Exporter Plugin
|
||||
# input plugin that exports metrics
|
||||
<source>
|
||||
@type prometheus
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type monitor_agent
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics from MonitorAgent
|
||||
<source>
|
||||
@type prometheus_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for output plugin
|
||||
<source>
|
||||
@type prometheus_output_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for in_tail plugin
|
||||
<source>
|
||||
@type prometheus_tail_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
output.conf: |-
|
||||
# Enriches records with Kubernetes metadata
|
||||
<filter kubernetes.**>
|
||||
type kubernetes_metadata
|
||||
@type kubernetes_metadata
|
||||
</filter>
|
||||
## Prometheus Exporter Plugin
|
||||
## input plugin that exports metrics
|
||||
#<source>
|
||||
# type prometheus
|
||||
#</source>
|
||||
#<source>
|
||||
# type monitor_agent
|
||||
#</source>
|
||||
#<source>
|
||||
# type forward
|
||||
#</source>
|
||||
## input plugin that collects metrics from MonitorAgent
|
||||
#<source>
|
||||
# @type prometheus_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
## input plugin that collects metrics for output plugin
|
||||
#<source>
|
||||
# @type prometheus_output_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
## input plugin that collects metrics for in_tail plugin
|
||||
#<source>
|
||||
# @type prometheus_tail_monitor
|
||||
# <labels>
|
||||
# host ${hostname}
|
||||
# </labels>
|
||||
#</source>
|
||||
|
||||
<match **>
|
||||
type elasticsearch
|
||||
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
|
||||
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
# Set the chunk limit the same as for fluentd-gcp.
|
||||
buffer_chunk_limit 2M
|
||||
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
|
||||
buffer_queue_limit 32
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 8
|
||||
</match>
|
||||
@id elasticsearch
|
||||
@type elasticsearch
|
||||
@log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging
|
||||
port 9200
|
||||
logstash_format true
|
||||
<buffer>
|
||||
@type file
|
||||
path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
flush_mode interval
|
||||
retry_type exponential_backoff
|
||||
flush_thread_count 2
|
||||
flush_interval 5s
|
||||
retry_forever
|
||||
retry_max_interval 30
|
||||
chunk_limit_size 2M
|
||||
queue_limit_length 8
|
||||
overflow_action block
|
||||
</buffer>
|
||||
</match>
|
||||
@@ -1,32 +1,42 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: "fluentd-es-v{{ fluentd_version }}"
|
||||
name: "fluentd-es-{{ fluentd_version }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "v{{ fluentd_version }}"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: "{{ fluentd_version }}"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: "v{{ fluentd_version }}"
|
||||
version: "{{ fluentd_version }}"
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
priorityClassName: system-node-critical
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
- '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: "--no-supervisor -q"
|
||||
resources:
|
||||
limits:
|
||||
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
|
||||
@@ -34,27 +44,26 @@ spec:
|
||||
{% endif %}
|
||||
memory: {{ fluentd_mem_limit }}
|
||||
requests:
|
||||
cpu: {{ fluentd_cpu_requests }}
|
||||
cpu: {{ fluentd_cpu_requests }}
|
||||
memory: {{ fluentd_mem_requests }}
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: dockercontainers
|
||||
- name: varlibdockercontainers
|
||||
mountPath: "{{ docker_daemon_graph }}/containers"
|
||||
readOnly: true
|
||||
- name: config
|
||||
- name: config-volume
|
||||
mountPath: "{{ fluentd_config_dir }}"
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/fluentd-ds-ready: "true"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dockercontainers
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: {{ docker_daemon_graph }}/containers
|
||||
- name: config
|
||||
configMap:
|
||||
name: fluentd-config
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: efk
|
||||
{% endif %}
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: fluentd-config
|
||||
@@ -4,3 +4,4 @@ kibana_mem_limit: 0M
|
||||
kibana_cpu_requests: 100m
|
||||
kibana_mem_requests: 0M
|
||||
kibana_service_port: 5601
|
||||
kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
@@ -26,20 +26,22 @@ spec:
|
||||
limits:
|
||||
cpu: {{ kibana_cpu_limit }}
|
||||
{% if kibana_mem_limit is defined and kibana_mem_limit != "0M" %}
|
||||
mem: {{ kibana_mem_limit }}
|
||||
memory: "{{ kibana_mem_limit }}"
|
||||
{% endif %}
|
||||
requests:
|
||||
cpu: {{ kibana_cpu_requests }}
|
||||
{% if kibana_mem_requests is defined and kibana_mem_requests != "0M" %}
|
||||
mem: {{ kibana_mem_requests }}
|
||||
memory: "{{ kibana_mem_requests }}"
|
||||
{% endif %}
|
||||
env:
|
||||
- name: "ELASTICSEARCH_URL"
|
||||
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
|
||||
{% if kibana_base_url is defined and kibana_base_url != "" %}
|
||||
- name: "KIBANA_BASE_URL"
|
||||
- name: "SERVER_BASEPATH"
|
||||
value: "{{ kibana_base_url }}"
|
||||
{% endif %}
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
---
|
||||
cephfs_provisioner_namespace: "kube-system"
|
||||
cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||
cephfs_provisioner_cluster: ceph
|
||||
cephfs_provisioner_monitors: []
|
||||
cephfs_provisioner_monitors: ~
|
||||
cephfs_provisioner_admin_id: admin
|
||||
cephfs_provisioner_secret: secret
|
||||
cephfs_provisioner_storage_class: cephfs
|
||||
cephfs_provisioner_reclaim_policy: Delete
|
||||
cephfs_provisioner_claim_root: /volumes
|
||||
cephfs_provisioner_deterministic_names: true
|
||||
|
||||
@@ -1,5 +1,32 @@
|
||||
---
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy addon dir and manifests
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Remove legacy storageclass
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: CephFS Provisioner | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
||||
@@ -7,22 +34,24 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: CephFS Provisioner | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
|
||||
- { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
|
||||
- { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
|
||||
- { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
|
||||
- { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
|
||||
- { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
|
||||
- { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
|
||||
- { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
|
||||
register: cephfs_manifests
|
||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
||||
- { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
|
||||
- { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
|
||||
- { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
|
||||
- { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
|
||||
- { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
|
||||
- { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
|
||||
- { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: rs }
|
||||
- { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
|
||||
register: cephfs_provisioner_manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: CephFS Provisioner | Apply manifests
|
||||
@@ -33,5 +62,5 @@
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
with_items: "{{ cephfs_manifests.results }}"
|
||||
with_items: "{{ cephfs_provisioner_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
|
||||
namespace: {{ cephfs_provisioner_namespace }}
|
||||
@@ -4,9 +4,12 @@ kind: StorageClass
|
||||
metadata:
|
||||
name: {{ cephfs_provisioner_storage_class }}
|
||||
provisioner: ceph.com/cephfs
|
||||
reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
|
||||
parameters:
|
||||
cluster: {{ cephfs_provisioner_cluster }}
|
||||
monitors: {{ cephfs_provisioner_monitors | join(',') }}
|
||||
monitors: {{ cephfs_provisioner_monitors }}
|
||||
adminId: {{ cephfs_provisioner_admin_id }}
|
||||
adminSecretName: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
|
||||
adminSecretName: cephfs-provisioner
|
||||
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
|
||||
claimRoot: {{ cephfs_provisioner_claim_root }}
|
||||
deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"
|
||||
@@ -2,7 +2,7 @@
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
|
||||
name: cephfs-provisioner
|
||||
namespace: {{ cephfs_provisioner_namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
@@ -18,3 +18,6 @@ helm_skip_refresh: false
|
||||
|
||||
# Override values for the Tiller Deployment manifest.
|
||||
# tiller_override: "key1=val1,key2=val2"
|
||||
|
||||
# Limit the maximum number of revisions saved per release. Use 0 for no limit.
|
||||
# tiller_max_history: 0
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
|
||||
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
|
||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
|
||||
|
||||
- name: Helm | Set up bash completion
|
||||
|
||||
@@ -1,5 +1,23 @@
|
||||
---
|
||||
|
||||
- name: Cert Manager | Remove legacy addon dir and manifests
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||
state: absent
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: Cert Manager | Remove legacy namespace
|
||||
shell: |
|
||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
||||
ignore_errors: yes
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- upgrade
|
||||
|
||||
- name: Cert Manager | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||
@@ -7,20 +25,22 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Cert Manager | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
|
||||
- { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
|
||||
- { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
|
||||
- { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
|
||||
- { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
|
||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
||||
- { name: sa-cert-manager, file: sa-cert-manager.yml, type: sa }
|
||||
- { name: crd-certificate, file: crd-certificate.yml, type: crd }
|
||||
- { name: crd-clusterissuer, file: crd-clusterissuer.yml, type: crd }
|
||||
- { name: crd-issuer, file: crd-issuer.yml, type: crd }
|
||||
- { name: clusterrole-cert-manager, file: clusterrole-cert-manager.yml, type: clusterrole }
|
||||
- { name: clusterrolebinding-cert-manager, file: clusterrolebinding-cert-manager.yml, type: clusterrolebinding }
|
||||
- { name: deploy-cert-manager, file: deploy-cert-manager.yml, type: deploy }
|
||||
register: cert_manager_manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
chart: cert-manager-v0.4.0
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
rules:
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
chart: cert-manager-v0.4.0
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
roleRef:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user