mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-02 10:08:13 -03:30
Compare commits
153 Commits
release-2.
...
release-2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5f3f2de38 | ||
|
|
d744e3fb92 | ||
|
|
6921b70a22 | ||
|
|
6f9d003999 | ||
|
|
8a685bd9b6 | ||
|
|
1d6a6c86f9 | ||
|
|
0b2488dfaa | ||
|
|
30e1878d27 | ||
|
|
9a506636e3 | ||
|
|
f7af33fac4 | ||
|
|
184b15f8ae | ||
|
|
ee2d3916f2 | ||
|
|
75e12e8981 | ||
|
|
b35a3ff924 | ||
|
|
cd26b379fe | ||
|
|
0b9872cd27 | ||
|
|
6b487d513e | ||
|
|
e5bdb3b0b7 | ||
|
|
552c6bb975 | ||
|
|
5ad38a4d0d | ||
|
|
6c553d75d2 | ||
|
|
3f44293f69 | ||
|
|
84324f50be | ||
|
|
4577ee4a5d | ||
|
|
d80686acb0 | ||
|
|
f9ebd45c74 | ||
|
|
7f527f6195 | ||
|
|
3da6c4fc18 | ||
|
|
e744a117d6 | ||
|
|
03372d883a | ||
|
|
8a961a60c2 | ||
|
|
db0138b2f9 | ||
|
|
b0be5f2dad | ||
|
|
27c7dc7008 | ||
|
|
acc5e579f6 | ||
|
|
60b323b17f | ||
|
|
924a979955 | ||
|
|
5fe8714f05 | ||
|
|
6acb44eeaf | ||
|
|
c89ea7e4c7 | ||
|
|
3d9e4951ce | ||
|
|
776b40a329 | ||
|
|
a3d0ba230d | ||
|
|
9a7b021eb8 | ||
|
|
5c5421e453 | ||
|
|
1798989f99 | ||
|
|
961a6a8c9e | ||
|
|
2f84567a69 | ||
|
|
171b0e60aa | ||
|
|
c4338687e1 | ||
|
|
ad1ce92b41 | ||
|
|
1093c76f9b | ||
|
|
c7935e2988 | ||
|
|
0306771c29 | ||
|
|
390d74706c | ||
|
|
ce9ba9a8bf | ||
|
|
fe4cbbccd1 | ||
|
|
e43e08c7d1 | ||
|
|
28712045a5 | ||
|
|
1968db9a52 | ||
|
|
cc03ca62be | ||
|
|
5f18fe739e | ||
|
|
343d680371 | ||
|
|
3d1653f950 | ||
|
|
dd51ef6f96 | ||
|
|
4e99b94dcc | ||
|
|
54ac5a6de4 | ||
|
|
2799f11475 | ||
|
|
8d497b49a6 | ||
|
|
86f980393c | ||
|
|
d469503e84 | ||
|
|
351832ba1d | ||
|
|
468c5641b2 | ||
|
|
2299e49e0e | ||
|
|
c0fabccaf6 | ||
|
|
2ac5b37aa9 | ||
|
|
8208a3f04f | ||
|
|
2d194af85e | ||
|
|
8022eddb55 | ||
|
|
242edd14ff | ||
|
|
8f5f75211f | ||
|
|
5394715d9b | ||
|
|
56e26d6061 | ||
|
|
513e18cb90 | ||
|
|
5f35b66256 | ||
|
|
bab0398c1e | ||
|
|
d993b2b8cf | ||
|
|
c89f901595 | ||
|
|
2615805da2 | ||
|
|
464cc716d7 | ||
|
|
1ebd860c13 | ||
|
|
474b259cf8 | ||
|
|
a0d03d9fa6 | ||
|
|
0bcedd4603 | ||
|
|
413572eced | ||
|
|
0be525c76f | ||
|
|
fe97b99984 | ||
|
|
348335ece5 | ||
|
|
ee3fef1051 | ||
|
|
a0587e0b8e | ||
|
|
ff18f65a17 | ||
|
|
35e904d7c3 | ||
|
|
9a6922125c | ||
|
|
821dfbfdba | ||
|
|
cce585066e | ||
|
|
619938da95 | ||
|
|
88b502f29d | ||
|
|
db316a566d | ||
|
|
817c61695d | ||
|
|
0c84175e3b | ||
|
|
cae266a045 | ||
|
|
15b62cc7ce | ||
|
|
c352773737 | ||
|
|
af0ac977a5 | ||
|
|
40f5b28302 | ||
|
|
2d612cde4d | ||
|
|
27cb22cee4 | ||
|
|
b7873a0891 | ||
|
|
edce2b528d | ||
|
|
647092b483 | ||
|
|
921b0c0bed | ||
|
|
24dc4cef56 | ||
|
|
3e72be2f72 | ||
|
|
f85e96904d | ||
|
|
0c8d29462d | ||
|
|
351393e32a | ||
|
|
b70eaa0470 | ||
|
|
ef6d24a49e | ||
|
|
6cf11a9c72 | ||
|
|
aba79d1b3c | ||
|
|
4b82e90dcb | ||
|
|
dedc00661a | ||
|
|
0624a3061a | ||
|
|
3082fa3d0f | ||
|
|
d85b29aae1 | ||
|
|
eff4eec8de | ||
|
|
af593465b2 | ||
|
|
870049523f | ||
|
|
184b1add54 | ||
|
|
37d824fd2d | ||
|
|
ff48144607 | ||
|
|
0faa805525 | ||
|
|
bc21433a05 | ||
|
|
19851bb07c | ||
|
|
7f7b65d388 | ||
|
|
d50f61eae5 | ||
|
|
77bfb53455 | ||
|
|
0e449ca75e | ||
|
|
f6d9ff4196 | ||
|
|
21aba10e08 | ||
|
|
bd9d90e00c | ||
|
|
5616a4a3ee | ||
|
|
4b9349a052 |
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -4,4 +4,6 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
labels: [ "dependencies" ]
|
labels:
|
||||||
|
- dependencies
|
||||||
|
- release-note-none
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ stages:
|
|||||||
- deploy-extended
|
- deploy-extended
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.24.1
|
KUBESPRAY_VERSION: v2.25.0
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
|
|||||||
@@ -88,6 +88,11 @@ packet_ubuntu20-crio:
|
|||||||
packet_ubuntu22-calico-all-in-one:
|
packet_ubuntu22-calico-all-in-one:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
|
|
||||||
|
packet_ubuntu22-calico-all-in-one-upgrade:
|
||||||
|
extends: .packet_pr
|
||||||
|
variables:
|
||||||
|
UPGRADE_TEST: graceful
|
||||||
|
|
||||||
packet_ubuntu24-calico-etcd-datastore:
|
packet_ubuntu24-calico-etcd-datastore:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
|
|
||||||
@@ -114,8 +119,13 @@ packet_rockylinux9-cilium:
|
|||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
|
# Need an update of the container image to use schema v2
|
||||||
|
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||||
packet_amazon-linux-2-all-in-one:
|
packet_amazon-linux-2-all-in-one:
|
||||||
extends: .packet_pr
|
extends: .packet_pr_manual
|
||||||
|
rules:
|
||||||
|
- when: manual
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_opensuse-docker-cilium:
|
packet_opensuse-docker-cilium:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
|
|||||||
@@ -39,14 +39,14 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- ansible==9.5.1
|
- ansible==9.8.0
|
||||||
- jsonschema==4.22.0
|
- jsonschema==4.22.0
|
||||||
- jmespath==1.0.1
|
- jmespath==1.0.1
|
||||||
- netaddr==1.2.1
|
- netaddr==1.3.0
|
||||||
|
- distlib
|
||||||
|
|
||||||
- repo: https://github.com/VannTen/misspell
|
- repo: https://github.com/golangci/misspell
|
||||||
# Waiting on https://github.com/golangci/misspell/pull/19 to get merged
|
rev: v0.6.0
|
||||||
rev: 8592a4e
|
|
||||||
hooks:
|
hooks:
|
||||||
- id: misspell
|
- id: misspell
|
||||||
exclude: "OWNERS_ALIASES$"
|
exclude: "OWNERS_ALIASES$"
|
||||||
@@ -61,14 +61,6 @@ repos:
|
|||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- ansible==9.5.1
|
- ansible==9.5.1
|
||||||
|
|
||||||
- id: tox-inventory-builder
|
|
||||||
name: tox-inventory-builder
|
|
||||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
|
||||||
language: python
|
|
||||||
pass_filenames: false
|
|
||||||
additional_dependencies:
|
|
||||||
- tox==4.15.0
|
|
||||||
|
|
||||||
- id: check-readme-versions
|
- id: check-readme-versions
|
||||||
name: check-readme-versions
|
name: check-readme-versions
|
||||||
entry: tests/scripts/check_readme_versions.sh
|
entry: tests/scripts/check_readme_versions.sh
|
||||||
@@ -80,6 +72,7 @@ repos:
|
|||||||
language: python
|
language: python
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- ansible-core>=2.16.4
|
- ansible-core>=2.16.4
|
||||||
|
- distlib
|
||||||
entry: tests/scripts/collection-build-install.sh
|
entry: tests/scripts/collection-build-install.sh
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
|
|||||||
10
.yamllint
10
.yamllint
@@ -6,7 +6,7 @@ ignore: |
|
|||||||
.github/
|
.github/
|
||||||
# Generated file
|
# Generated file
|
||||||
tests/files/custom_cni/cilium.yaml
|
tests/files/custom_cni/cilium.yaml
|
||||||
|
# https://ansible.readthedocs.io/projects/lint/rules/yaml/
|
||||||
rules:
|
rules:
|
||||||
braces:
|
braces:
|
||||||
min-spaces-inside: 0
|
min-spaces-inside: 0
|
||||||
@@ -14,9 +14,15 @@ rules:
|
|||||||
brackets:
|
brackets:
|
||||||
min-spaces-inside: 0
|
min-spaces-inside: 0
|
||||||
max-spaces-inside: 1
|
max-spaces-inside: 1
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
# https://github.com/adrienverge/yamllint/issues/384
|
||||||
|
comments-indentation: false
|
||||||
indentation:
|
indentation:
|
||||||
spaces: 2
|
spaces: 2
|
||||||
indent-sequences: consistent
|
indent-sequences: consistent
|
||||||
line-length: disable
|
line-length: disable
|
||||||
new-line-at-end-of-file: disable
|
new-line-at-end-of-file: disable
|
||||||
truthy: disable
|
octal-values:
|
||||||
|
forbid-implicit-octal: true # yamllint defaults to false
|
||||||
|
forbid-explicit-octal: true # yamllint defaults to false
|
||||||
|
|||||||
34
README.md
34
README.md
@@ -75,11 +75,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
|||||||
to access the inventory and SSH key in the container, like this:
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.25.1
|
git checkout v2.25.0
|
||||||
docker pull quay.io/kubespray/kubespray:v2.25.1
|
docker pull quay.io/kubespray/kubespray:v2.25.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.25.1 bash
|
quay.io/kubespray/kubespray:v2.25.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
@@ -143,11 +143,11 @@ vagrant up
|
|||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bookworm, Bullseye
|
- **Debian** Bookworm, Bullseye
|
||||||
- **Ubuntu** 20.04, 22.04, 24.04
|
- **Ubuntu** 20.04, 22.04, 24.04
|
||||||
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
- **CentOS/RHEL** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Fedora** 37, 38
|
- **Fedora** 37, 38
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
- **Oracle Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||||
@@ -160,28 +160,28 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.10
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.30.6
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.16
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.16
|
||||||
- [docker](https://www.docker.com/) v26.1
|
- [docker](https://www.docker.com/) v26.1
|
||||||
- [containerd](https://containerd.io/) v1.7.22
|
- [containerd](https://containerd.io/) v1.7.23
|
||||||
- [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.30.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.27.4
|
- [calico](https://github.com/projectcalico/calico) v3.28.1
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.12.21
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/rajch/weave) v2.8.7
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.14.7
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.11.2
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.11.5
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||||
- [helm](https://helm.sh/) v3.14.4
|
- [helm](https://helm.sh/) v3.15.4
|
||||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
@@ -189,11 +189,11 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.29.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.14.2
|
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.16.4
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
@@ -201,7 +201,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.27**
|
- **Minimum required version of Kubernetes is v1.28**
|
||||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||||||
1. The release issue is closed
|
1. The release issue is closed
|
||||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
|
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||||
|
|
||||||
## Major/minor releases and milestones
|
## Major/minor releases and milestones
|
||||||
|
|
||||||
|
|||||||
5
Vagrantfile
vendored
5
Vagrantfile
vendored
@@ -1,7 +1,7 @@
|
|||||||
# -*- mode: ruby -*-
|
# -*- mode: ruby -*-
|
||||||
# # vi: set ft=ruby :
|
# # vi: set ft=ruby :
|
||||||
|
|
||||||
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
@@ -22,8 +22,6 @@ SUPPORTED_OS = {
|
|||||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||||
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
|
||||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
@@ -36,7 +34,6 @@ SUPPORTED_OS = {
|
|||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
|
||||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||||
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
||||||
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ gathering = smart
|
|||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
fact_caching_connection = /tmp
|
fact_caching_connection = /tmp
|
||||||
fact_caching_timeout = 86400
|
fact_caching_timeout = 86400
|
||||||
|
timeout = 300
|
||||||
stdout_callback = default
|
stdout_callback = default
|
||||||
display_skipped_hosts = no
|
display_skipped_hosts = no
|
||||||
library = ./library
|
library = ./library
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure inventory
|
- name: Generate Azure inventory
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure inventory
|
- name: Generate Azure inventory
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure templates
|
- name: Generate Azure templates
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|||||||
@@ -12,4 +12,4 @@
|
|||||||
template:
|
template:
|
||||||
src: inventory.j2
|
src: inventory.j2
|
||||||
dest: "{{ playbook_dir }}/inventory"
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|||||||
@@ -22,10 +22,10 @@
|
|||||||
template:
|
template:
|
||||||
src: inventory.j2
|
src: inventory.j2
|
||||||
dest: "{{ playbook_dir }}/inventory"
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|
||||||
- name: Generate Load Balancer variables
|
- name: Generate Load Balancer variables
|
||||||
template:
|
template:
|
||||||
src: loadbalancer_vars.j2
|
src: loadbalancer_vars.j2
|
||||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|||||||
@@ -8,13 +8,13 @@
|
|||||||
path: "{{ base_dir }}"
|
path: "{{ base_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: true
|
recurse: true
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
|
|
||||||
- name: Store json files in base_dir
|
- name: Store json files in base_dir
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}"
|
src: "{{ item }}"
|
||||||
dest: "{{ base_dir }}/{{ item }}"
|
dest: "{{ base_dir }}/{{ item }}"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- network.json
|
- network.json
|
||||||
- storage.json
|
- storage.json
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
- name: Create nodes as docker containers
|
- name: Create nodes as docker containers
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ distro_settings:
|
|||||||
init: |
|
init: |
|
||||||
/sbin/init
|
/sbin/init
|
||||||
centos: &CENTOS
|
centos: &CENTOS
|
||||||
image: "centos:7"
|
image: "centos:8"
|
||||||
user: "centos"
|
user: "centos"
|
||||||
pid1_exe: /usr/lib/systemd/systemd
|
pid1_exe: /usr/lib/systemd/systemd
|
||||||
init: |
|
init: |
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check
|
|||||||
|
|
||||||
dns_mode: coredns
|
dns_mode: coredns
|
||||||
|
|
||||||
deploy_netchecker: True
|
deploy_netchecker: true
|
||||||
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||||
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||||
netcheck_agent_image_tag: v1.0
|
netcheck_agent_image_tag: v1.0
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
src: "/bin/true"
|
src: "/bin/true"
|
||||||
dest: "{{ item }}"
|
dest: "{{ item }}"
|
||||||
state: link
|
state: link
|
||||||
force: yes
|
force: true
|
||||||
with_items:
|
with_items:
|
||||||
# DIND box may have swap enable, don't bother
|
# DIND box may have swap enable, don't bother
|
||||||
- /sbin/swapoff
|
- /sbin/swapoff
|
||||||
@@ -35,7 +35,7 @@
|
|||||||
path-exclude=/usr/share/doc/*
|
path-exclude=/usr/share/doc/*
|
||||||
path-include=/usr/share/doc/*/copyright
|
path-include=/usr/share/doc/*/copyright
|
||||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
@@ -58,13 +58,13 @@
|
|||||||
name: "{{ distro_user }}"
|
name: "{{ distro_user }}"
|
||||||
uid: 1000
|
uid: 1000
|
||||||
# groups: sudo
|
# groups: sudo
|
||||||
append: yes
|
append: true
|
||||||
|
|
||||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||||
copy:
|
copy:
|
||||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: "0640"
|
||||||
|
|
||||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||||
ansible.posix.authorized_key:
|
ansible.posix.authorized_key:
|
||||||
|
|||||||
@@ -19,7 +19,7 @@
|
|||||||
state: started
|
state: started
|
||||||
hostname: "{{ item }}"
|
hostname: "{{ item }}"
|
||||||
command: "{{ distro_init }}"
|
command: "{{ distro_init }}"
|
||||||
# recreate: yes
|
# recreate: true
|
||||||
privileged: true
|
privileged: true
|
||||||
tmpfs:
|
tmpfs:
|
||||||
- /sys/module/nf_conntrack/parameters
|
- /sys/module/nf_conntrack/parameters
|
||||||
@@ -42,7 +42,7 @@
|
|||||||
template:
|
template:
|
||||||
src: inventory_builder.sh.j2
|
src: inventory_builder.sh.j2
|
||||||
dest: /tmp/kubespray.dind.inventory_builder.sh
|
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
tags:
|
tags:
|
||||||
- addresses
|
- addresses
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
---
|
---
|
||||||
- name: Prepare Hypervisor to later install kubespray VMs
|
- name: Prepare Hypervisor to later install kubespray VMs
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
become: yes
|
become: true
|
||||||
vars:
|
vars:
|
||||||
bootstrap_os: none
|
bootstrap_os: none
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -11,12 +11,12 @@
|
|||||||
|
|
||||||
- name: Install required packages
|
- name: Install required packages
|
||||||
apt:
|
apt:
|
||||||
upgrade: yes
|
upgrade: true
|
||||||
update_cache: yes
|
update_cache: true
|
||||||
cache_valid_time: 3600
|
cache_valid_time: 3600
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
install_recommends: no
|
install_recommends: false
|
||||||
with_items:
|
with_items:
|
||||||
- dnsutils
|
- dnsutils
|
||||||
- ntp
|
- ntp
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
br-netfilter
|
br-netfilter
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
when: br_netfilter is defined
|
when: br_netfilter is defined
|
||||||
|
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
value: 1
|
value: 1
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
|
||||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
ansible.posix.sysctl:
|
ansible.posix.sysctl:
|
||||||
@@ -38,7 +38,7 @@
|
|||||||
state: present
|
state: present
|
||||||
value: 0
|
value: 0
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
reload: yes
|
reload: true
|
||||||
with_items:
|
with_items:
|
||||||
- net.bridge.bridge-nf-call-arptables
|
- net.bridge.bridge-nf-call-arptables
|
||||||
- net.bridge.bridge-nf-call-ip6tables
|
- net.bridge.bridge-nf-call-ip6tables
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
state: directory
|
state: directory
|
||||||
owner: "{{ k8s_deployment_user }}"
|
owner: "{{ k8s_deployment_user }}"
|
||||||
group: "{{ k8s_deployment_user }}"
|
group: "{{ k8s_deployment_user }}"
|
||||||
mode: 0700
|
mode: "0700"
|
||||||
|
|
||||||
- name: Configure sudo for deployment user
|
- name: Configure sudo for deployment user
|
||||||
copy:
|
copy:
|
||||||
@@ -20,13 +20,13 @@
|
|||||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|
||||||
- name: Write private SSH key
|
- name: Write private SSH key
|
||||||
copy:
|
copy:
|
||||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||||
mode: 0400
|
mode: "0400"
|
||||||
owner: "{{ k8s_deployment_user }}"
|
owner: "{{ k8s_deployment_user }}"
|
||||||
group: "{{ k8s_deployment_user }}"
|
group: "{{ k8s_deployment_user }}"
|
||||||
when: k8s_deployment_user_pkey_path is defined
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
@@ -41,7 +41,7 @@
|
|||||||
- name: Fix ssh-pub-key permissions
|
- name: Fix ssh-pub-key permissions
|
||||||
file:
|
file:
|
||||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
mode: 0600
|
mode: "0600"
|
||||||
owner: "{{ k8s_deployment_user }}"
|
owner: "{{ k8s_deployment_user }}"
|
||||||
group: "{{ k8s_deployment_user }}"
|
group: "{{ k8s_deployment_user }}"
|
||||||
when: k8s_deployment_user_pkey_path is defined
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
become: false
|
become: false
|
||||||
loop:
|
loop:
|
||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|
||||||
- name: Extract archive
|
- name: Extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
@@ -40,7 +40,7 @@
|
|||||||
- name: Add strategy to ansible.cfg
|
- name: Add strategy to ansible.cfg
|
||||||
community.general.ini_file:
|
community.general.ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
section: "{{ item.section | d('defaults') }}"
|
section: "{{ item.section | d('defaults') }}"
|
||||||
option: "{{ item.option }}"
|
option: "{{ item.option }}"
|
||||||
value: "{{ item.value }}"
|
value: "{{ item.value }}"
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ glusterfs_default_release: ""
|
|||||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: true
|
||||||
glusterfs_ppa_version: "3.5"
|
glusterfs_ppa_version: "3.5"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: true
|
||||||
glusterfs_ppa_version: "4.1"
|
glusterfs_ppa_version: "4.1"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0775
|
mode: "0775"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
apt_repository:
|
apt_repository:
|
||||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
state: present
|
state: present
|
||||||
update_cache: yes
|
update_cache: true
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: true
|
||||||
glusterfs_ppa_version: "3.12"
|
glusterfs_ppa_version: "3.12"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
|
|||||||
@@ -43,13 +43,13 @@
|
|||||||
service:
|
service:
|
||||||
name: "{{ glusterfs_daemon }}"
|
name: "{{ glusterfs_daemon }}"
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: true
|
||||||
|
|
||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0775
|
mode: "0775"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
@@ -62,7 +62,7 @@
|
|||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: true
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster'] | length > 1
|
when: groups['gfs-cluster'] | length > 1
|
||||||
|
|
||||||
@@ -73,7 +73,7 @@
|
|||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: true
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster'] | length <= 1
|
when: groups['gfs-cluster'] | length <= 1
|
||||||
|
|
||||||
@@ -101,7 +101,7 @@
|
|||||||
template:
|
template:
|
||||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||||
src: test-file.txt
|
src: test-file.txt
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
apt_repository:
|
apt_repository:
|
||||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
state: present
|
state: present
|
||||||
update_cache: yes
|
update_cache: true
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "{{ item.file }}"
|
src: "{{ item.file }}"
|
||||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
|||||||
@@ -6,6 +6,6 @@
|
|||||||
|
|
||||||
- name: Teardown disks in heketi
|
- name: Teardown disks in heketi
|
||||||
hosts: heketi-node
|
hosts: heketi-node
|
||||||
become: yes
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "heketi-bootstrap.json.j2"
|
src: "heketi-bootstrap.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
mode: 0640
|
mode: "0640"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
kube:
|
kube:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "glusterfs-daemonset.json.j2"
|
src: "glusterfs-daemonset.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
become: true
|
become: true
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "heketi-service-account.json.j2"
|
src: "heketi-service-account.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
become: true
|
become: true
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "heketi-deployment.json.j2"
|
src: "heketi-deployment.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "heketi.json.j2"
|
src: "heketi.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi.json"
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|
||||||
- name: "Deploy Heketi config secret"
|
- name: "Deploy Heketi config secret"
|
||||||
when: "secret_state.stdout | length == 0"
|
when: "secret_state.stdout | length == 0"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "heketi-storage.json.j2"
|
src: "heketi-storage.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
kube:
|
kube:
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
kube:
|
kube:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
template:
|
template:
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
- name: "Copy topology configuration into container." # noqa no-handler
|
- name: "Copy topology configuration into container." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
- name: Collect container images for offline deployment
|
- name: Collect container images for offline deployment
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
become: no
|
become: false
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
# Just load default variables from roles.
|
# Just load default variables from roles.
|
||||||
@@ -16,7 +16,7 @@
|
|||||||
template:
|
template:
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- files
|
- files
|
||||||
- images
|
- images
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ function register_container_images() {
|
|||||||
if [ "${org_image}" == "ID:" ]; then
|
if [ "${org_image}" == "ID:" ]; then
|
||||||
org_image=$(echo "${load_image}" | awk '{print $4}')
|
org_image=$(echo "${load_image}" | awk '{print $4}')
|
||||||
fi
|
fi
|
||||||
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
image_id=$(sudo ${runtime} image inspect --format "{{.Id}}" "${org_image}")
|
||||||
if [ -z "${file_name}" ]; then
|
if [ -z "${file_name}" ]; then
|
||||||
echo "Failed to get file_name for line ${line}"
|
echo "Failed to get file_name for line ${line}"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@@ -7,17 +7,17 @@
|
|||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
- name: Disable service firewalld
|
- name: Disable service firewalld
|
||||||
systemd:
|
systemd_service:
|
||||||
name: firewalld
|
name: firewalld
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: false
|
||||||
when:
|
when:
|
||||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||||
|
|
||||||
- name: Disable service ufw
|
- name: Disable service ufw
|
||||||
systemd:
|
systemd_service:
|
||||||
name: ufw
|
name: ufw
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: false
|
||||||
when:
|
when:
|
||||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.12.0"
|
required_version = ">= 0.12.0"
|
||||||
|
required_providers {
|
||||||
|
aws = {
|
||||||
|
source = "hashicorp/aws"
|
||||||
|
version = "~> 5.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ ${list_master}
|
|||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s_cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-master
|
kube_control_plane
|
||||||
kube-node
|
kube_node
|
||||||
|
|
||||||
[k8s_cluster:vars]
|
[k8s_cluster:vars]
|
||||||
network_id=${network_id}
|
network_id=${network_id}
|
||||||
|
|||||||
@@ -368,7 +368,7 @@ def iter_host_ips(hosts, ips):
|
|||||||
'ansible_host': ip,
|
'ansible_host': ip,
|
||||||
})
|
})
|
||||||
|
|
||||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0" and 'access_ip' in host[1]:
|
||||||
host[1].pop('access_ip')
|
host[1].pop('access_ip')
|
||||||
|
|
||||||
yield host
|
yield host
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
# See: https://developers.upcloud.com/1.3/5-zones/
|
# See: https://developers.upcloud.com/1.3/5-zones/
|
||||||
zone = "fi-hel1"
|
zone = "fi-hel1"
|
||||||
|
private_cloud = false
|
||||||
|
|
||||||
|
# Only used if private_cloud = true, public zone equivalent
|
||||||
|
# For example use finnish public zone for finnish private zone
|
||||||
|
public_zone = "fi-hel2"
|
||||||
|
|
||||||
username = "ubuntu"
|
username = "ubuntu"
|
||||||
|
|
||||||
# Prefix to use for all resources to separate them from other resources
|
# Prefix to use for all resources to separate them from other resources
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ module "kubernetes" {
|
|||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
|
private_cloud = var.private_cloud
|
||||||
|
public_zone = var.public_zone
|
||||||
|
|
||||||
template_name = var.template_name
|
template_name = var.template_name
|
||||||
username = var.username
|
username = var.username
|
||||||
|
|||||||
@@ -56,9 +56,10 @@ resource "upcloud_server" "master" {
|
|||||||
|
|
||||||
hostname = "${local.resource-prefix}${each.key}"
|
hostname = "${local.resource-prefix}${each.key}"
|
||||||
plan = each.value.plan
|
plan = each.value.plan
|
||||||
cpu = each.value.plan == null ? each.value.cpu : null
|
cpu = each.value.plan == null ? null : each.value.cpu
|
||||||
mem = each.value.plan == null ? each.value.mem : null
|
mem = each.value.plan == null ? null : each.value.mem
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
|
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||||
|
|
||||||
template {
|
template {
|
||||||
storage = var.template_name
|
storage = var.template_name
|
||||||
@@ -113,9 +114,11 @@ resource "upcloud_server" "worker" {
|
|||||||
|
|
||||||
hostname = "${local.resource-prefix}${each.key}"
|
hostname = "${local.resource-prefix}${each.key}"
|
||||||
plan = each.value.plan
|
plan = each.value.plan
|
||||||
cpu = each.value.plan == null ? each.value.cpu : null
|
cpu = each.value.plan == null ? null : each.value.cpu
|
||||||
mem = each.value.plan == null ? each.value.mem : null
|
mem = each.value.plan == null ? null : each.value.mem
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
|
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||||
|
|
||||||
|
|
||||||
template {
|
template {
|
||||||
storage = var.template_name
|
storage = var.template_name
|
||||||
@@ -512,9 +515,19 @@ resource "upcloud_loadbalancer" "lb" {
|
|||||||
configured_status = "started"
|
configured_status = "started"
|
||||||
name = "${local.resource-prefix}lb"
|
name = "${local.resource-prefix}lb"
|
||||||
plan = var.loadbalancer_plan
|
plan = var.loadbalancer_plan
|
||||||
zone = var.zone
|
zone = var.private_cloud ? var.public_zone : var.zone
|
||||||
|
networks {
|
||||||
|
name = "Private-Net"
|
||||||
|
type = "private"
|
||||||
|
family = "IPv4"
|
||||||
network = upcloud_network.private.id
|
network = upcloud_network.private.id
|
||||||
}
|
}
|
||||||
|
networks {
|
||||||
|
name = "Public-Net"
|
||||||
|
type = "public"
|
||||||
|
family = "IPv4"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resource "upcloud_loadbalancer_backend" "lb_backend" {
|
resource "upcloud_loadbalancer_backend" "lb_backend" {
|
||||||
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
||||||
@@ -534,6 +547,9 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
|||||||
mode = "tcp"
|
mode = "tcp"
|
||||||
port = each.value.port
|
port = each.value.port
|
||||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||||
|
networks {
|
||||||
|
name = "Public-Net"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||||
@@ -557,5 +573,9 @@ resource "upcloud_server_group" "server_groups" {
|
|||||||
title = each.key
|
title = each.key
|
||||||
anti_affinity_policy = each.value.anti_affinity_policy
|
anti_affinity_policy = each.value.anti_affinity_policy
|
||||||
labels = {}
|
labels = {}
|
||||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
# Managed upstream via upcloud_server resource
|
||||||
|
members = []
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [members]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,14 @@ variable "zone" {
|
|||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "private_cloud" {
|
||||||
|
type = bool
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "public_zone" {
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
variable "template_name" {}
|
variable "template_name" {}
|
||||||
|
|
||||||
variable "username" {}
|
variable "username" {}
|
||||||
@@ -20,6 +28,7 @@ variable "machines" {
|
|||||||
cpu = string
|
cpu = string
|
||||||
mem = string
|
mem = string
|
||||||
disk_size = number
|
disk_size = number
|
||||||
|
server_group : string
|
||||||
additional_disks = map(object({
|
additional_disks = map(object({
|
||||||
size = number
|
size = number
|
||||||
tier = string
|
tier = string
|
||||||
@@ -104,6 +113,5 @@ variable "server_groups" {
|
|||||||
|
|
||||||
type = map(object({
|
type = map(object({
|
||||||
anti_affinity_policy = string
|
anti_affinity_policy = string
|
||||||
servers = list(string)
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>2.12.0"
|
version = "~>5.6.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -9,6 +9,15 @@ variable "zone" {
|
|||||||
description = "The zone where to run the cluster"
|
description = "The zone where to run the cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "private_cloud" {
|
||||||
|
description = "Whether the environment is in the private cloud region"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "public_zone" {
|
||||||
|
description = "The public zone equivalent if the cluster is running in a private cloud zone"
|
||||||
|
}
|
||||||
|
|
||||||
variable "template_name" {
|
variable "template_name" {
|
||||||
description = "Block describing the preconfigured operating system"
|
description = "Block describing the preconfigured operating system"
|
||||||
}
|
}
|
||||||
@@ -32,6 +41,7 @@ variable "machines" {
|
|||||||
cpu = string
|
cpu = string
|
||||||
mem = string
|
mem = string
|
||||||
disk_size = number
|
disk_size = number
|
||||||
|
server_group : string
|
||||||
additional_disks = map(object({
|
additional_disks = map(object({
|
||||||
size = number
|
size = number
|
||||||
tier = string
|
tier = string
|
||||||
@@ -142,7 +152,6 @@ variable "server_groups" {
|
|||||||
|
|
||||||
type = map(object({
|
type = map(object({
|
||||||
anti_affinity_policy = string
|
anti_affinity_policy = string
|
||||||
servers = list(string)
|
|
||||||
}))
|
}))
|
||||||
|
|
||||||
default = {}
|
default = {}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>2.12.0"
|
version = "~>5.6.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -424,7 +424,7 @@ calico_wireguard_enabled: true
|
|||||||
|
|
||||||
The following OSes will require enabling the EPEL repo in order to bring in wireguard tools:
|
The following OSes will require enabling the EPEL repo in order to bring in wireguard tools:
|
||||||
|
|
||||||
* CentOS 7 & 8
|
* CentOS 8
|
||||||
* AlmaLinux 8
|
* AlmaLinux 8
|
||||||
* Rocky Linux 8
|
* Rocky Linux 8
|
||||||
* Amazon Linux 2
|
* Amazon Linux 2
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ Wireguard option is only available in Cilium 1.10.0 and newer.
|
|||||||
|
|
||||||
### IPsec Encryption
|
### IPsec Encryption
|
||||||
|
|
||||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/)
|
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/)
|
||||||
|
|
||||||
To enable IPsec encryption, you just need to set three variables.
|
To enable IPsec encryption, you just need to set three variables.
|
||||||
|
|
||||||
@@ -157,7 +157,7 @@ echo "cilium_ipsec_key: "$(echo -n "3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/uran
|
|||||||
|
|
||||||
### Wireguard Encryption
|
### Wireguard Encryption
|
||||||
|
|
||||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/)
|
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-wireguard/)
|
||||||
|
|
||||||
To enable Wireguard encryption, you just need to set two variables.
|
To enable Wireguard encryption, you just need to set two variables.
|
||||||
|
|
||||||
|
|||||||
@@ -16,14 +16,6 @@ Enabling the `overlay2` graph driver:
|
|||||||
docker_storage_options: -s overlay2
|
docker_storage_options: -s overlay2
|
||||||
```
|
```
|
||||||
|
|
||||||
Enabling `docker_container_storage_setup`, it will configure devicemapper driver on Centos7 or RedHat7.
|
|
||||||
Deployers must be define a disk path for `docker_container_storage_setup_devs`, otherwise docker-storage-setup will be executed incorrectly.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
docker_container_storage_setup: true
|
|
||||||
docker_container_storage_setup_devs: /dev/vdb
|
|
||||||
```
|
|
||||||
|
|
||||||
Changing the Docker cgroup driver (native.cgroupdriver); valid options are `systemd` or `cgroupfs`, default is `systemd`:
|
Changing the Docker cgroup driver (native.cgroupdriver); valid options are `systemd` or `cgroupfs`, default is `systemd`:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|||||||
@@ -231,6 +231,7 @@ The following tags are defined in playbooks:
|
|||||||
| services | Remove services (etcd, kubelet etc...) when resetting |
|
| services | Remove services (etcd, kubelet etc...) when resetting |
|
||||||
| snapshot | Enabling csi snapshot |
|
| snapshot | Enabling csi snapshot |
|
||||||
| snapshot-controller | Configuring csi snapshot controller |
|
| snapshot-controller | Configuring csi snapshot controller |
|
||||||
|
| system-packages | Install packages using OS package manager |
|
||||||
| upgrade | Upgrading, f.e. container images/binaries |
|
| upgrade | Upgrading, f.e. container images/binaries |
|
||||||
| upload | Distributing images/binaries across hosts |
|
| upload | Distributing images/binaries across hosts |
|
||||||
| vsphere-csi-driver | Configuring csi driver: vsphere |
|
| vsphere-csi-driver | Configuring csi driver: vsphere |
|
||||||
|
|||||||
@@ -216,6 +216,8 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
|||||||
The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive.
|
The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive.
|
||||||
When specified, the value must be less than imageGCHighThresholdPercent. Default: 80
|
When specified, the value must be less than imageGCHighThresholdPercent. Default: 80
|
||||||
|
|
||||||
|
* *kubelet_max_parallel_image_pulls* - Sets the maximum number of image pulls in parallel. The value is `1` by default which means the default is serial image pulling, set it to a integer great than `1` to enable image pulling in parallel.
|
||||||
|
|
||||||
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
||||||
|
|
||||||
* *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)
|
* *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)
|
||||||
@@ -243,6 +245,10 @@ kubelet_cpu_manager_policy_options:
|
|||||||
|
|
||||||
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
|
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
|
||||||
|
|
||||||
|
* *kubelet_systemd_wants_dependencies* - List of kubelet service dependencies, other than container runtime.
|
||||||
|
|
||||||
|
If you use nfs dynamically mounted volumes, sometimes rpc-statd does not start within the kubelet. You can fix it with this parameter : `kubelet_systemd_wants_dependencies: ["rpc-statd.service"]` This will add `Wants=rpc-statd.service` in `[Unit]` section of /etc/systemd/system/kubelet.service
|
||||||
|
|
||||||
* *node_labels* - Labels applied to nodes via `kubectl label node`.
|
* *node_labels* - Labels applied to nodes via `kubectl label node`.
|
||||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
*node_labels* can only be defined as a dict:
|
*node_labels* can only be defined as a dict:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
|||||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
centos8 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
@@ -26,7 +26,7 @@ ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
@@ -44,7 +44,7 @@ ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ cat << EOF > vagrant/config.rb
|
|||||||
\$instance_name_prefix = "kub"
|
\$instance_name_prefix = "kub"
|
||||||
\$vm_cpus = 1
|
\$vm_cpus = 1
|
||||||
\$num_instances = 3
|
\$num_instances = 3
|
||||||
\$os = "centos-bento"
|
\$os = "centos8-bento"
|
||||||
\$subnet = "10.0.20"
|
\$subnet = "10.0.20"
|
||||||
\$network_plugin = "flannel"
|
\$network_plugin = "flannel"
|
||||||
\$inventory = "$INV"
|
\$inventory = "$INV"
|
||||||
|
|||||||
@@ -1,10 +1,5 @@
|
|||||||
# CentOS and derivatives
|
# CentOS and derivatives
|
||||||
|
|
||||||
## CentOS 7
|
|
||||||
|
|
||||||
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
|
||||||
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
|
||||||
|
|
||||||
## CentOS 8
|
## CentOS 8
|
||||||
|
|
||||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# cgroups
|
# cgroups
|
||||||
|
|
||||||
To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the container’s resources usage.
|
To avoid resource contention between containers and host daemons in Kubernetes, the kubelet components can use cgroups to limit resource usage.
|
||||||
|
|
||||||
## Enforcing Node Allocatable
|
## Enforcing Node Allocatable
|
||||||
|
|
||||||
@@ -20,8 +20,9 @@ Here is an example:
|
|||||||
```yaml
|
```yaml
|
||||||
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||||
|
|
||||||
# Reserve this space for kube resources
|
# Set kube_reserved to true to run kubelet and container-engine daemons in a dedicated cgroup.
|
||||||
# Set to true to reserve resources for kube daemons
|
# This is required if you want to enforce limits on the resource usage of these daemons.
|
||||||
|
# It is not required if you just want to make resource reservations (kube_memory_reserved, kube_cpu_reserved, etc.)
|
||||||
kube_reserved: true
|
kube_reserved: true
|
||||||
kube_reserved_cgroups_for_service_slice: kube.slice
|
kube_reserved_cgroups_for_service_slice: kube.slice
|
||||||
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||||
|
|||||||
@@ -30,12 +30,12 @@ loadbalancer. If you wish to control the name of the loadbalancer container,
|
|||||||
you can set the variable `loadbalancer_apiserver_pod_name`.
|
you can set the variable `loadbalancer_apiserver_pod_name`.
|
||||||
|
|
||||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||||
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
|
use the [kube-vip](/docs/ingress/kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
|
||||||
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
|
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
|
||||||
It can also configure clients to use endpoints for a given loadbalancer type.
|
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||||
The following diagram shows how traffic to the apiserver is directed.
|
The following diagram shows how traffic to the apiserver is directed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||||
provides access for external clients, while the internal LB accepts client
|
provides access for external clients, while the internal LB accepts client
|
||||||
|
|||||||
@@ -103,7 +103,9 @@ If you use the settings like the one above, you'll need to define in your invent
|
|||||||
can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so
|
can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so
|
||||||
that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending on your OS, should point to your internal
|
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending on your OS, should point to your internal
|
||||||
repository. Adjust the path accordingly.
|
repository. Adjust the path accordingly. Used only for Docker/Containerd packages (if needed); other packages might
|
||||||
|
be installed from other repositories. You might disable installing packages from other repositories by skipping
|
||||||
|
the `system-packages` tag
|
||||||
|
|
||||||
## Install Kubespray Python Packages
|
## Install Kubespray Python Packages
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
- name: Setup ssh config to use the bastion
|
- name: Setup ssh config to use the bastion
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|||||||
49
galaxy.yml
49
galaxy.yml
@@ -2,49 +2,24 @@
|
|||||||
namespace: kubernetes_sigs
|
namespace: kubernetes_sigs
|
||||||
description: Deploy a production ready Kubernetes cluster
|
description: Deploy a production ready Kubernetes cluster
|
||||||
name: kubespray
|
name: kubespray
|
||||||
version: 2.25.2
|
version: 2.26.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||||
tags:
|
tags:
|
||||||
- infrastructure
|
- infrastructure
|
||||||
repository: https://github.com/kubernetes-sigs/kubespray
|
repository: https://github.com/kubernetes-sigs/kubespray
|
||||||
|
issues: https://github.com/kubernetes-sigs/kubespray/issues
|
||||||
|
documentation: https://kubespray.io
|
||||||
license_file: LICENSE
|
license_file: LICENSE
|
||||||
dependencies:
|
dependencies:
|
||||||
ansible.utils: '>=2.5.0'
|
ansible.utils: '>=2.5.0'
|
||||||
community.general: '>=3.0.0'
|
community.general: '>=7.0.0'
|
||||||
build_ignore:
|
ansible.netcommon: '>=5.3.0'
|
||||||
- .github
|
ansible.posix: '>=1.5.4'
|
||||||
- '*.tar.gz'
|
community.docker: '>=3.11.0'
|
||||||
- extra_playbooks
|
kubernetes.core: '>=2.4.2'
|
||||||
- inventory
|
manifest:
|
||||||
- scripts
|
directives:
|
||||||
- test-infra
|
- recursive-exclude tests **
|
||||||
- .ansible-lint
|
- recursive-include roles **/files/*
|
||||||
- .editorconfig
|
|
||||||
- .gitignore
|
|
||||||
- .gitlab-ci
|
|
||||||
- .gitlab-ci.yml
|
|
||||||
- .gitmodules
|
|
||||||
- .markdownlint.yaml
|
|
||||||
- .nojekyll
|
|
||||||
- .pre-commit-config.yaml
|
|
||||||
- .yamllint
|
|
||||||
- Dockerfile
|
|
||||||
- FILES.json
|
|
||||||
- MANIFEST.json
|
|
||||||
- Makefile
|
|
||||||
- Vagrantfile
|
|
||||||
- _config.yml
|
|
||||||
- ansible.cfg
|
|
||||||
- requirements*txt
|
|
||||||
- setup.cfg
|
|
||||||
- setup.py
|
|
||||||
- index.html
|
|
||||||
- reset.yml
|
|
||||||
- cluster.yml
|
|
||||||
- scale.yml
|
|
||||||
- recover-control-plane.yml
|
|
||||||
- remove-node.yml
|
|
||||||
- upgrade-cluster.yml
|
|
||||||
- library
|
|
||||||
|
|||||||
@@ -24,8 +24,21 @@
|
|||||||
# containerd_grpc_max_recv_message_size: 16777216
|
# containerd_grpc_max_recv_message_size: 16777216
|
||||||
# containerd_grpc_max_send_message_size: 16777216
|
# containerd_grpc_max_send_message_size: 16777216
|
||||||
|
|
||||||
|
# Containerd debug socket location: unix or tcp format
|
||||||
|
# containerd_debug_address: ""
|
||||||
|
|
||||||
|
# Containerd log level
|
||||||
# containerd_debug_level: "info"
|
# containerd_debug_level: "info"
|
||||||
|
|
||||||
|
# Containerd logs format, supported values: text, json
|
||||||
|
# containerd_debug_format: ""
|
||||||
|
|
||||||
|
# Containerd debug socket UID
|
||||||
|
# containerd_debug_uid: 0
|
||||||
|
|
||||||
|
# Containerd debug socket GID
|
||||||
|
# containerd_debug_gid: 0
|
||||||
|
|
||||||
# containerd_metrics_address: ""
|
# containerd_metrics_address: ""
|
||||||
|
|
||||||
# containerd_metrics_grpc_histogram: false
|
# containerd_metrics_grpc_histogram: false
|
||||||
|
|||||||
@@ -18,7 +18,7 @@
|
|||||||
# quay_image_repo: "{{ registry_host }}"
|
# quay_image_repo: "{{ registry_host }}"
|
||||||
|
|
||||||
## Kubernetes components
|
## Kubernetes components
|
||||||
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
||||||
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
||||||
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
||||||
|
|
||||||
@@ -82,7 +82,7 @@
|
|||||||
# krew_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
|
# krew_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
## CentOS/Redhat/AlmaLinux
|
## CentOS/Redhat/AlmaLinux
|
||||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
### For EL8, baseos and appstream must be available,
|
||||||
### By default we enable those repo automatically
|
### By default we enable those repo automatically
|
||||||
# rhel_enable_repos: false
|
# rhel_enable_repos: false
|
||||||
### Docker / Containerd
|
### Docker / Containerd
|
||||||
|
|||||||
@@ -33,3 +33,6 @@
|
|||||||
# etcd_experimental_distributed_tracing_sample_rate: 100
|
# etcd_experimental_distributed_tracing_sample_rate: 100
|
||||||
# etcd_experimental_distributed_tracing_address: "localhost:4317"
|
# etcd_experimental_distributed_tracing_address: "localhost:4317"
|
||||||
# etcd_experimental_distributed_tracing_service_name: etcd
|
# etcd_experimental_distributed_tracing_service_name: etcd
|
||||||
|
|
||||||
|
## The interval for etcd watch progress notify events
|
||||||
|
# etcd_experimental_watch_progress_notify_interval: 5s
|
||||||
|
|||||||
@@ -96,6 +96,10 @@ rbd_provisioner_enabled: false
|
|||||||
# rbd_provisioner_storage_class: rbd
|
# rbd_provisioner_storage_class: rbd
|
||||||
# rbd_provisioner_reclaim_policy: Delete
|
# rbd_provisioner_reclaim_policy: Delete
|
||||||
|
|
||||||
|
# Gateway API CRDs
|
||||||
|
gateway_api_enabled: false
|
||||||
|
# gateway_api_experimental_channel: false
|
||||||
|
|
||||||
# Nginx ingress controller deployment
|
# Nginx ingress controller deployment
|
||||||
ingress_nginx_enabled: false
|
ingress_nginx_enabled: false
|
||||||
# ingress_nginx_host_network: false
|
# ingress_nginx_host_network: false
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.29.10
|
kube_version: v1.30.4
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
@@ -262,7 +262,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
|||||||
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
|
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
|
||||||
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
|
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
|
||||||
|
|
||||||
# Optionally reserve this space for kube daemons.
|
# Whether to run kubelet and container-engine daemons in a dedicated cgroup.
|
||||||
# kube_reserved: false
|
# kube_reserved: false
|
||||||
## Uncomment to override default values
|
## Uncomment to override default values
|
||||||
## The following two items need to be set when kube_reserved is true
|
## The following two items need to be set when kube_reserved is true
|
||||||
|
|||||||
@@ -163,6 +163,13 @@ cilium_l2announcements: false
|
|||||||
### Enable auto generate certs if cilium_hubble_install: true
|
### Enable auto generate certs if cilium_hubble_install: true
|
||||||
# cilium_hubble_tls_generate: false
|
# cilium_hubble_tls_generate: false
|
||||||
|
|
||||||
|
### Tune cilium_hubble_event_buffer_capacity & cilium_hubble_event_queue_size values to avoid dropping events when hubble is under heavy load
|
||||||
|
### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535
|
||||||
|
### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095)
|
||||||
|
# cilium_hubble_event_buffer_capacity: 4095
|
||||||
|
### Buffer size of the channel to receive monitor events.
|
||||||
|
# cilium_hubble_event_queue_size: 50
|
||||||
|
|
||||||
# IP address management mode for v1.9+.
|
# IP address management mode for v1.9+.
|
||||||
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
||||||
# cilium_ipam_mode: kubernetes
|
# cilium_ipam_mode: kubernetes
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
- name: Check Ansible version
|
- name: Check Ansible version
|
||||||
hosts: all
|
hosts: all
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
become: false
|
||||||
run_once: true
|
run_once: true
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.16.4
|
minimal_ansible_version: 2.16.4
|
||||||
@@ -25,7 +25,6 @@
|
|||||||
tags:
|
tags:
|
||||||
- check
|
- check
|
||||||
|
|
||||||
# CentOS 7 provides too old jinja version
|
|
||||||
- name: "Check that jinja is not too old (install via pip)"
|
- name: "Check that jinja is not too old (install via pip)"
|
||||||
assert:
|
assert:
|
||||||
msg: "Your Jinja version is too old, install via pip"
|
msg: "Your Jinja version is too old, install via pip"
|
||||||
|
|||||||
@@ -51,7 +51,7 @@
|
|||||||
|
|
||||||
- name: Install bastion ssh config
|
- name: Install bastion ssh config
|
||||||
hosts: bastion[0]
|
hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
- name: Prepare for etcd install
|
- name: Prepare for etcd install
|
||||||
hosts: k8s_cluster:etcd
|
hosts: k8s_cluster:etcd
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -21,7 +21,7 @@
|
|||||||
|
|
||||||
- name: Install Kubernetes nodes
|
- name: Install Kubernetes nodes
|
||||||
hosts: k8s_cluster
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
|
|
||||||
- name: Install the control plane
|
- name: Install the control plane
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -41,7 +41,7 @@
|
|||||||
|
|
||||||
- name: Invoke kubeadm and install a CNI
|
- name: Invoke kubeadm and install a CNI
|
||||||
hosts: k8s_cluster
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -54,7 +54,7 @@
|
|||||||
|
|
||||||
- name: Install Calico Route Reflector
|
- name: Install Calico Route Reflector
|
||||||
hosts: calico_rr
|
hosts: calico_rr
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -63,7 +63,7 @@
|
|||||||
|
|
||||||
- name: Patch Kubernetes for Windows
|
- name: Patch Kubernetes for Windows
|
||||||
hosts: kube_control_plane[0]
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -72,7 +72,7 @@
|
|||||||
|
|
||||||
- name: Install Kubernetes apps
|
- name: Install Kubernetes apps
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -86,7 +86,7 @@
|
|||||||
|
|
||||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||||
hosts: k8s_cluster
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
hosts: k8s_cluster:etcd:calico_rr
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: Gather minimal facts
|
- name: Gather minimal facts
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
- name: Install etcd
|
- name: Install etcd
|
||||||
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
|
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -4,13 +4,13 @@
|
|||||||
|
|
||||||
- name: Confirm node removal
|
- name: Confirm node removal
|
||||||
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
||||||
gather_facts: no
|
gather_facts: false
|
||||||
tasks:
|
tasks:
|
||||||
- name: Confirm Execution
|
- name: Confirm Execution
|
||||||
pause:
|
pause:
|
||||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||||
register: pause_result
|
register: pause_result
|
||||||
run_once: True
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- not (skip_confirmation | default(false) | bool)
|
- not (skip_confirmation | default(false) | bool)
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
- name: Reset node
|
- name: Reset node
|
||||||
hosts: "{{ node | default('kube_node') }}"
|
hosts: "{{ node | default('kube_node') }}"
|
||||||
gather_facts: no
|
gather_facts: false
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Gather information about installed services
|
- name: Gather information about installed services
|
||||||
@@ -40,7 +40,7 @@
|
|||||||
# Currently cannot remove first master or etcd
|
# Currently cannot remove first master or etcd
|
||||||
- name: Post node removal
|
- name: Post node removal
|
||||||
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
||||||
gather_facts: no
|
gather_facts: false
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
|
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
|
||||||
|
|||||||
@@ -7,13 +7,13 @@
|
|||||||
|
|
||||||
- name: Reset cluster
|
- name: Reset cluster
|
||||||
hosts: etcd:k8s_cluster:calico_rr
|
hosts: etcd:k8s_cluster:calico_rr
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Reset Confirmation
|
- name: Reset Confirmation
|
||||||
pause:
|
pause:
|
||||||
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
|
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
|
||||||
register: reset_confirmation_prompt
|
register: reset_confirmation_prompt
|
||||||
run_once: True
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- not (skip_confirmation | default(false) | bool)
|
- not (skip_confirmation | default(false) | bool)
|
||||||
- reset_confirmation is not defined
|
- reset_confirmation is not defined
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
- name: Generate the etcd certificates beforehand
|
- name: Generate the etcd certificates beforehand
|
||||||
hosts: etcd:kube_control_plane
|
hosts: etcd:kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
- name: Download images to ansible host cache via first kube_control_plane node
|
- name: Download images to ansible host cache via first kube_control_plane node
|
||||||
hosts: kube_control_plane[0]
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
|
||||||
hosts: kube_node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -53,7 +53,7 @@
|
|||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
|
||||||
hosts: kube_node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -63,7 +63,7 @@
|
|||||||
- name: Upload control plane certs and retrieve encryption key
|
- name: Upload control plane certs and retrieve encryption key
|
||||||
hosts: kube_control_plane | first
|
hosts: kube_control_plane | first
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
tags: kubeadm
|
tags: kubeadm
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
@@ -84,7 +84,7 @@
|
|||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
|
||||||
hosts: kube_node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
|
|
||||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||||
hosts: k8s_cluster
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
- name: Download images to ansible host cache via first kube_control_plane node
|
- name: Download images to ansible host cache via first kube_control_plane node
|
||||||
hosts: kube_control_plane[0]
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
- name: Prepare nodes for upgrade
|
- name: Prepare nodes for upgrade
|
||||||
hosts: k8s_cluster:etcd:calico_rr
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -27,7 +27,7 @@
|
|||||||
|
|
||||||
- name: Upgrade container engine on non-cluster nodes
|
- name: Upgrade container engine on non-cluster nodes
|
||||||
hosts: etcd:calico_rr:!k8s_cluster
|
hosts: etcd:calico_rr:!k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
@@ -39,7 +39,7 @@
|
|||||||
import_playbook: install_etcd.yml
|
import_playbook: install_etcd.yml
|
||||||
|
|
||||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
@@ -62,7 +62,7 @@
|
|||||||
|
|
||||||
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
|
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
|
||||||
hosts: kube_control_plane:calico_rr:kube_node
|
hosts: kube_control_plane:calico_rr:kube_node
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
@@ -75,7 +75,7 @@
|
|||||||
|
|
||||||
- name: Finally handle worker upgrades, based on given batch size
|
- name: Finally handle worker upgrades, based on given batch size
|
||||||
hosts: kube_node:calico_rr:!kube_control_plane
|
hosts: kube_node:calico_rr:!kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
@@ -93,7 +93,7 @@
|
|||||||
|
|
||||||
- name: Patch Kubernetes for Windows
|
- name: Patch Kubernetes for Windows
|
||||||
hosts: kube_control_plane[0]
|
hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -102,7 +102,7 @@
|
|||||||
|
|
||||||
- name: Install Calico Route Reflector
|
- name: Install Calico Route Reflector
|
||||||
hosts: calico_rr
|
hosts: calico_rr
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -111,7 +111,7 @@
|
|||||||
|
|
||||||
- name: Install Kubernetes apps
|
- name: Install Kubernetes apps
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -122,7 +122,7 @@
|
|||||||
|
|
||||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||||
hosts: k8s_cluster
|
hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
ansible==9.5.1
|
ansible==9.8.0
|
||||||
cryptography==42.0.7
|
# Needed for jinja2 json_query templating
|
||||||
jinja2==3.1.4
|
|
||||||
jmespath==1.0.1
|
jmespath==1.0.1
|
||||||
jsonschema==4.22.0
|
# Needed for ansible.utils.validate module
|
||||||
MarkupSafe==2.1.5
|
jsonschema==4.23.0
|
||||||
netaddr==1.2.1
|
# Needed for ansible.utils.ipaddr
|
||||||
pbr==6.0.0
|
netaddr==1.3.0
|
||||||
ruamel.yaml==0.18.6
|
|
||||||
ruamel.yaml.clib==0.2.8
|
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ addusers:
|
|||||||
etcd:
|
etcd:
|
||||||
name: etcd
|
name: etcd
|
||||||
comment: "Etcd user"
|
comment: "Etcd user"
|
||||||
create_home: no
|
create_home: false
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
kube:
|
kube:
|
||||||
name: kube
|
name: kube
|
||||||
comment: "Kubernetes user"
|
comment: "Kubernetes user"
|
||||||
create_home: no
|
create_home: false
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,6 @@ addusers:
|
|||||||
- name: kube
|
- name: kube
|
||||||
comment: "Kubernetes user"
|
comment: "Kubernetes user"
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
system: yes
|
system: true
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
create_home: no
|
create_home: false
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
addusers:
|
addusers:
|
||||||
- name: etcd
|
- name: etcd
|
||||||
comment: "Etcd user"
|
comment: "Etcd user"
|
||||||
create_home: yes
|
create_home: true
|
||||||
home: "{{ etcd_data_dir }}"
|
home: "{{ etcd_data_dir }}"
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
|
|
||||||
- name: kube
|
- name: kube
|
||||||
comment: "Kubernetes user"
|
comment: "Kubernetes user"
|
||||||
create_home: no
|
create_home: false
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
addusers:
|
addusers:
|
||||||
- name: etcd
|
- name: etcd
|
||||||
comment: "Etcd user"
|
comment: "Etcd user"
|
||||||
create_home: yes
|
create_home: true
|
||||||
home: "{{ etcd_data_dir }}"
|
home: "{{ etcd_data_dir }}"
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
|
|
||||||
- name: kube
|
- name: kube
|
||||||
comment: "Kubernetes user"
|
comment: "Kubernetes user"
|
||||||
create_home: no
|
create_home: false
|
||||||
system: yes
|
system: true
|
||||||
shell: /sbin/nologin
|
shell: /sbin/nologin
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
|
|||||||
@@ -12,4 +12,4 @@
|
|||||||
dest: "{{ ssh_bastion_confing__name }}"
|
dest: "{{ ssh_bastion_confing__name }}"
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user }}"
|
||||||
group: "{{ ansible_user }}"
|
group: "{{ ansible_user }}"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
|
|||||||
@@ -19,4 +19,4 @@
|
|||||||
template:
|
template:
|
||||||
src: "{{ ssh_bastion_confing__name }}.j2"
|
src: "{{ ssh_bastion_confing__name }}.j2"
|
||||||
dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}"
|
dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}"
|
||||||
mode: 0640
|
mode: "0640"
|
||||||
|
|||||||
@@ -11,6 +11,10 @@ coreos_locksmithd_disable: false
|
|||||||
# Install public repo on Oracle Linux
|
# Install public repo on Oracle Linux
|
||||||
use_oracle_public_repo: true
|
use_oracle_public_repo: true
|
||||||
|
|
||||||
|
## Ubuntu specific variables
|
||||||
|
# Disable unattended-upgrades for Linux kernel and all packages start with linux- on Ubuntu
|
||||||
|
ubuntu_kernel_unattended_upgrades_disabled: false
|
||||||
|
|
||||||
fedora_coreos_packages:
|
fedora_coreos_packages:
|
||||||
- python
|
- python
|
||||||
- python3-libselinux
|
- python3-libselinux
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Converge
|
- name: Converge
|
||||||
hosts: all
|
hosts: all
|
||||||
gather_facts: no
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- role: bootstrap-os
|
- role: bootstrap-os
|
||||||
|
|||||||
@@ -19,12 +19,6 @@ platforms:
|
|||||||
memory: 1024
|
memory: 1024
|
||||||
provider_options:
|
provider_options:
|
||||||
driver: kvm
|
driver: kvm
|
||||||
- name: centos7
|
|
||||||
box: centos/7
|
|
||||||
cpus: 1
|
|
||||||
memory: 512
|
|
||||||
provider_options:
|
|
||||||
driver: kvm
|
|
||||||
- name: almalinux8
|
- name: almalinux8
|
||||||
box: almalinux/8
|
box: almalinux/8
|
||||||
cpus: 1
|
cpus: 1
|
||||||
|
|||||||
@@ -8,9 +8,9 @@
|
|||||||
file: epel
|
file: epel
|
||||||
description: Extra Packages for Enterprise Linux 7 - $basearch
|
description: Extra Packages for Enterprise Linux 7 - $basearch
|
||||||
baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
|
baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
|
||||||
gpgcheck: yes
|
gpgcheck: true
|
||||||
gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
|
gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
|
||||||
skip_if_unavailable: yes
|
skip_if_unavailable: true
|
||||||
enabled: yes
|
enabled: true
|
||||||
repo_gpgcheck: no
|
repo_gpgcheck: false
|
||||||
when: epel_enabled
|
when: epel_enabled
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
value: "{{ http_proxy | default(omit) }}"
|
value: "{{ http_proxy | default(omit) }}"
|
||||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||||
no_extra_spaces: true
|
no_extra_spaces: true
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
become: true
|
become: true
|
||||||
when: not skip_http_proxy_on_os_packages
|
when: not skip_http_proxy_on_os_packages
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@
|
|||||||
get_url:
|
get_url:
|
||||||
url: https://yum.oracle.com/public-yum-ol7.repo
|
url: https://yum.oracle.com/public-yum-ol7.repo
|
||||||
dest: /etc/yum.repos.d/public-yum-ol7.repo
|
dest: /etc/yum.repos.d/public-yum-ol7.repo
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
when:
|
when:
|
||||||
- use_oracle_public_repo | default(true)
|
- use_oracle_public_repo | default(true)
|
||||||
- '''ID="ol"'' in os_release.stdout_lines'
|
- '''ID="ol"'' in os_release.stdout_lines'
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
section: "{{ item }}"
|
section: "{{ item }}"
|
||||||
option: enabled
|
option: enabled
|
||||||
value: "1"
|
value: "1"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- ol7_latest
|
- ol7_latest
|
||||||
- ol7_addons
|
- ol7_addons
|
||||||
@@ -59,7 +59,7 @@
|
|||||||
section: "ol{{ ansible_distribution_major_version }}_addons"
|
section: "ol{{ ansible_distribution_major_version }}_addons"
|
||||||
option: "{{ item.option }}"
|
option: "{{ item.option }}"
|
||||||
value: "{{ item.value }}"
|
value: "{{ item.value }}"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
|
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
|
||||||
- { option: "enabled", value: "1" }
|
- { option: "enabled", value: "1" }
|
||||||
@@ -75,53 +75,26 @@
|
|||||||
section: "extras"
|
section: "extras"
|
||||||
option: "{{ item.option }}"
|
option: "{{ item.option }}"
|
||||||
value: "{{ item.value }}"
|
value: "{{ item.value }}"
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
with_items:
|
with_items:
|
||||||
- { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
|
- { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
|
||||||
- { option: "enabled", value: "1" }
|
- { option: "enabled", value: "1" }
|
||||||
- { option: "gpgcheck", value: "0" }
|
- { option: "gpgcheck", value: "0" }
|
||||||
- { option: "baseurl", value: "http://vault.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
|
- { option: "baseurl", value: "http://mirror.centos.org/centos/{{ ansible_distribution_major_version }}/extras/$basearch/os/" }
|
||||||
when:
|
when:
|
||||||
- use_oracle_public_repo | default(true)
|
- use_oracle_public_repo | default(true)
|
||||||
- '''ID="ol"'' in os_release.stdout_lines'
|
- '''ID="ol"'' in os_release.stdout_lines'
|
||||||
- (ansible_distribution_version | float) >= 7.6
|
- (ansible_distribution_version | float) >= 7.6
|
||||||
- (ansible_distribution_version | float) < 9
|
- (ansible_distribution_version | float) < 9
|
||||||
|
|
||||||
# CentOS 7 EOL at July 1, 2024.
|
|
||||||
- name: Check CentOS-Base.repo exists for CentOS 7
|
|
||||||
stat:
|
|
||||||
path: /etc/yum.repos.d/CentOS-Base.repo
|
|
||||||
register: centos_base_repo_stat
|
|
||||||
when:
|
|
||||||
- ansible_distribution_major_version == "7"
|
|
||||||
|
|
||||||
# CentOS 7 EOL at July 1, 2024.
|
|
||||||
- name: Update CentOS 7 CentOS-Base.repo
|
|
||||||
when:
|
|
||||||
- ansible_distribution_major_version == "7"
|
|
||||||
- centos_base_repo_stat.stat.exists
|
|
||||||
become: true
|
|
||||||
block:
|
|
||||||
- name: Disable CentOS 7 mirrorlist in CentOS-Base.repo
|
|
||||||
replace:
|
|
||||||
path: "{{ centos_base_repo_stat.stat.path }}"
|
|
||||||
regexp: '^mirrorlist='
|
|
||||||
replace: '#mirrorlist='
|
|
||||||
|
|
||||||
- name: Update CentOS 7 baseurl in CentOS-Base.repo
|
|
||||||
replace:
|
|
||||||
path: "{{ centos_base_repo_stat.stat.path }}"
|
|
||||||
regexp: '^#baseurl=http:\/\/mirror.centos.org'
|
|
||||||
replace: 'baseurl=http:\/\/vault.centos.org'
|
|
||||||
|
|
||||||
# CentOS ships with python installed
|
# CentOS ships with python installed
|
||||||
|
|
||||||
- name: Check presence of fastestmirror.conf
|
- name: Check presence of fastestmirror.conf
|
||||||
stat:
|
stat:
|
||||||
path: /etc/yum/pluginconf.d/fastestmirror.conf
|
path: /etc/yum/pluginconf.d/fastestmirror.conf
|
||||||
get_attributes: no
|
get_attributes: false
|
||||||
get_checksum: no
|
get_checksum: false
|
||||||
get_mime: no
|
get_mime: false
|
||||||
register: fastestmirror
|
register: fastestmirror
|
||||||
|
|
||||||
# the fastestmirror plugin can actually slow down Ansible deployments
|
# the fastestmirror plugin can actually slow down Ansible deployments
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Make sure docker service is enabled
|
- name: Make sure docker service is enabled
|
||||||
systemd:
|
systemd_service:
|
||||||
name: docker
|
name: docker
|
||||||
masked: false
|
masked: false
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
|
raw: "nohup bash -c 'sleep 5s && shutdown -r now'"
|
||||||
become: true
|
become: true
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
ignore_unreachable: yes
|
ignore_unreachable: true
|
||||||
when: need_bootstrap.rc != 0
|
when: need_bootstrap.rc != 0
|
||||||
|
|
||||||
- name: Wait for the reboot to complete
|
- name: Wait for the reboot to complete
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
value: "{{ http_proxy | default(omit) }}"
|
value: "{{ http_proxy | default(omit) }}"
|
||||||
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
|
||||||
no_extra_spaces: true
|
no_extra_spaces: true
|
||||||
mode: 0644
|
mode: "0644"
|
||||||
become: true
|
become: true
|
||||||
when: not skip_http_proxy_on_os_packages
|
when: not skip_http_proxy_on_os_packages
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
ansible_interpreter_python_fallback: "{{ ansible_interpreter_python_fallback + [ '/opt/bin/python' ] }}"
|
ansible_interpreter_python_fallback: "{{ ansible_interpreter_python_fallback + [ '/opt/bin/python' ] }}"
|
||||||
|
|
||||||
- name: Disable auto-upgrade
|
- name: Disable auto-upgrade
|
||||||
systemd:
|
systemd_service:
|
||||||
name: locksmithd.service
|
name: locksmithd.service
|
||||||
masked: true
|
masked: true
|
||||||
state: stopped
|
state: stopped
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
- "{{ os_release_dict['ID'] }}.yml"
|
- "{{ os_release_dict['ID'] }}.yml"
|
||||||
paths:
|
paths:
|
||||||
- vars/
|
- vars/
|
||||||
skip: True
|
skip: true
|
||||||
- name: Include tasks
|
- name: Include tasks
|
||||||
include_tasks: "{{ included_tasks_file }}"
|
include_tasks: "{{ included_tasks_file }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
@@ -36,7 +36,7 @@
|
|||||||
file:
|
file:
|
||||||
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
|
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: 0700
|
mode: "0700"
|
||||||
|
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
setup:
|
setup:
|
||||||
@@ -61,4 +61,4 @@
|
|||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: "0755"
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user