mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-05 03:24:55 -03:30
Compare commits
87 Commits
pre-commit
...
v2.20.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18efdc2c51 | ||
|
|
6dff39344b | ||
|
|
c4de3df492 | ||
|
|
f2e11f088b | ||
|
|
782f0511b9 | ||
|
|
fa093ee609 | ||
|
|
612bcc4bb8 | ||
|
|
4ad67acedd | ||
|
|
467dc19cbd | ||
|
|
726711513f | ||
|
|
9468642269 | ||
|
|
d387d4811f | ||
|
|
1b3c2dab2e | ||
|
|
76573bf293 | ||
|
|
5d3326b93f | ||
|
|
68dac4e181 | ||
|
|
262c96ec0b | ||
|
|
2acdc33aa1 | ||
|
|
8acd33d0df | ||
|
|
a2e23c1a71 | ||
|
|
1b5cc175b9 | ||
|
|
a71da25b57 | ||
|
|
5ac614f97d | ||
|
|
b8b8b82ff4 | ||
|
|
7da3dbcb39 | ||
|
|
680293e79c | ||
|
|
023b16349e | ||
|
|
c4976437a8 | ||
|
|
97ca2f3c78 | ||
|
|
e76385e7cd | ||
|
|
7c2fb227f4 | ||
|
|
08bfa0b18f | ||
|
|
952cad8d63 | ||
|
|
5bce39abf8 | ||
|
|
fc57c0b27e | ||
|
|
dd4bc5fbfe | ||
|
|
d2a7434c67 | ||
|
|
5fa885b150 | ||
|
|
f3fb758f0c | ||
|
|
6386ec029c | ||
|
|
ad7cefa352 | ||
|
|
09d9bc910e | ||
|
|
e2f1f8d69d | ||
|
|
be2bfd867c | ||
|
|
133a7a0e1b | ||
|
|
efb47edb9f | ||
|
|
36bec19a84 | ||
|
|
6db6c8678c | ||
|
|
5603f9f374 | ||
|
|
7ebb8c3f2e | ||
|
|
acb6f243fd | ||
|
|
220f149299 | ||
|
|
1baabb3c05 | ||
|
|
617b17ad46 | ||
|
|
8af86e4c1e | ||
|
|
9dc9a670a5 | ||
|
|
b46ddf35fc | ||
|
|
de762400ad | ||
|
|
e60ece2b5e | ||
|
|
e6976a54e1 | ||
|
|
64daaf1887 | ||
|
|
1c75ec9ec1 | ||
|
|
c8a61ec98c | ||
|
|
aeeae76750 | ||
|
|
30b062fd43 | ||
|
|
8f899a1101 | ||
|
|
386c739d5b | ||
|
|
fddff783c8 | ||
|
|
bbd1161147 | ||
|
|
ab938602a9 | ||
|
|
e31890806c | ||
|
|
30c77ea4c1 | ||
|
|
175cdba9b1 | ||
|
|
ea29cd0890 | ||
|
|
68653c31c0 | ||
|
|
be5fdab3aa | ||
|
|
f4daf5856e | ||
|
|
49d869f662 | ||
|
|
b36bb9115a | ||
|
|
9ad2d24ad8 | ||
|
|
0088fe0ab7 | ||
|
|
ab93b17a7e | ||
|
|
9f1b980844 | ||
|
|
86d05ac180 | ||
|
|
bf6fcf6347 | ||
|
|
b9e4e27195 | ||
|
|
8585134db4 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -112,3 +112,4 @@ roles/**/molecule/**/__pycache__/
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
||||
@@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.19.0
|
||||
KUBESPRAY_VERSION: v2.19.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
|
||||
@@ -151,6 +151,11 @@ packet_rockylinux8-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
||||
48
.pre-commit-config.yaml
Normal file
48
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
@@ -16,7 +16,12 @@ pip install -r tests/requirements.txt
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
|
||||
#### Molecule
|
||||
|
||||
@@ -33,7 +38,9 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
||||
@@ -8,6 +8,7 @@ aliases:
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
@@ -16,6 +17,7 @@ aliases:
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
||||
28
README.md
28
README.md
@@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||
quay.io/kubespray/kubespray:v2.19.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -120,13 +120,13 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8](docs/centos.md#centos-8)
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
|
||||
@@ -135,29 +135,29 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.6
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.6
|
||||
- [containerd](https://containerd.io/) v1.6.8
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.7
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.18.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.7
|
||||
- [helm](https://helm.sh/) v3.9.2
|
||||
- [argocd](https://argoproj.github.io/) v2.4.12
|
||||
- [helm](https://helm.sh/) v3.9.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
|
||||
@@ -9,5 +9,7 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
||||
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@@ -31,7 +31,7 @@ SUPPORTED_OS = {
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
- hosts: etcd:kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
@@ -59,7 +59,10 @@
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
|
||||
@@ -14,12 +14,16 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
glusterfs_default_release: ""
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
@@ -29,9 +33,11 @@ None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
||||
@@ -36,8 +36,7 @@ terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
||||
@@ -31,9 +31,7 @@ The setup looks like following
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
||||
@@ -294,7 +294,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -314,6 +315,7 @@ k8s_nodes = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
"extra_groups" = "calico_rr"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -96,6 +96,10 @@ module "compute" {
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
|
||||
depends_on = [
|
||||
module.network.subnet_id
|
||||
]
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
@@ -111,7 +115,7 @@ output "router_id" {
|
||||
}
|
||||
|
||||
output "k8s_master_fips" {
|
||||
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
|
||||
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
|
||||
@@ -742,7 +742,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
@@ -35,9 +35,7 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Installing Ansible
|
||||
|
||||
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
||||
Depending on your available python version you may be limited in chooding which ansible version to use.
|
||||
Depending on your available python version you may be limited in choosing which ansible version to use.
|
||||
|
||||
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
||||
|
||||
|
||||
@@ -57,19 +57,28 @@ The name of the network security group your instances are in, can be retrieved v
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
Display name, identifier-uri, homepage and the password can be chosen
|
||||
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
||||
@@ -71,14 +71,27 @@ The name of the resource group that contains the route table. Defaults to `azur
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
display name, identifier-uri, homepage and the password can be chosen
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
||||
@@ -48,11 +48,13 @@ The `kubespray-defaults` role is expected to be run before this role.
|
||||
|
||||
Remember to disable fact gathering since Python might not be present on hosts.
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false # not all hosts might be able to run modules yet
|
||||
roles:
|
||||
- kubespray-defaults
|
||||
- bootstrap-os
|
||||
```yaml
|
||||
- hosts: all
|
||||
gather_facts: false # not all hosts might be able to run modules yet
|
||||
roles:
|
||||
- kubespray-defaults
|
||||
- bootstrap-os
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||
The following variables need to be set:
|
||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||
The following variables need to be set as follow:
|
||||
|
||||
```yml
|
||||
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
|
||||
nat_outgoing: false # (optional) NAT outgoing (default value: true).
|
||||
```
|
||||
|
||||
And you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||
|
||||
```ShellSession
|
||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
@@ -124,8 +129,7 @@ You need to edit your inventory and add:
|
||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||
group of `k8s_cluster` group.
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||
|
||||
@@ -172,6 +176,8 @@ node5
|
||||
|
||||
[rack0:vars]
|
||||
cluster_id="1.0.0.1"
|
||||
calico_rr_id=rr1
|
||||
calico_group_id=rr1
|
||||
```
|
||||
|
||||
The inventory above will deploy the following topology assuming that calico's
|
||||
@@ -212,7 +218,7 @@ calico_node_readinessprobe_timeout: 10
|
||||
|
||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
||||
|
||||
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
||||
*IP in IP* and *VXLAN* is mutually exclusive modes.
|
||||
|
||||
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
||||
|
||||
@@ -245,14 +251,14 @@ calico_network_backend: 'bird'
|
||||
|
||||
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
||||
|
||||
Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||
|
||||
```shell
|
||||
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
||||
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
||||
```
|
||||
|
||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
|
||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray.
|
||||
|
||||
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
||||
|
||||
@@ -369,7 +375,7 @@ use_localhost_as_kubeapi_loadbalancer: true
|
||||
|
||||
### Tunneled versus Direct Server Return
|
||||
|
||||
By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||
|
||||
To configure DSR:
|
||||
|
||||
@@ -395,7 +401,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
|
||||
|
||||
## Wireguard Encryption
|
||||
|
||||
Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||
|
||||
To enable wireguard support:
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Kubespray supports multiple ansible versions but only the default (5.x) gets wid
|
||||
|
||||
## CentOS 8
|
||||
|
||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
@@ -35,6 +36,7 @@ fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
@@ -54,6 +56,7 @@ fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
|
||||
## Choose Cilium version
|
||||
|
||||
```yml
|
||||
cilium_version: v1.11.3
|
||||
cilium_version: v1.12.1
|
||||
```
|
||||
|
||||
## Add variable to config
|
||||
@@ -153,3 +153,32 @@ cilium_hubble_metrics:
|
||||
```
|
||||
|
||||
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
|
||||
|
||||
## Upgrade considerations
|
||||
|
||||
### Rolling-restart timeouts
|
||||
|
||||
Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update.
|
||||
|
||||
As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster.
|
||||
|
||||
As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml)
|
||||
are not appropriate for large clusters.
|
||||
|
||||
This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance.
|
||||
This is configured by the following values:
|
||||
|
||||
```yaml
|
||||
# Configure how long to wait for the Cilium DaemonSet to be ready again
|
||||
cilium_rolling_restart_wait_retries_count: 30
|
||||
cilium_rolling_restart_wait_retries_delay_seconds: 10
|
||||
```
|
||||
|
||||
The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no
|
||||
drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns.
|
||||
|
||||
Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it
|
||||
to become ready.
|
||||
|
||||
Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You
|
||||
probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you.
|
||||
|
||||
@@ -39,4 +39,65 @@ containerd_registries:
|
||||
image_command_tool: crictl
|
||||
```
|
||||
|
||||
### Containerd Runtimes
|
||||
|
||||
Containerd supports multiple runtime configurations that can be used with
|
||||
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
|
||||
details of containerd configuration.
|
||||
|
||||
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
|
||||
|
||||
```yaml
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
options:
|
||||
systemdCgroup: "false"
|
||||
binaryName: /usr/local/bin/my-runc
|
||||
base_runtime_spec: cri-base.json
|
||||
```
|
||||
|
||||
Further runtimes can be configured with `containerd_additional_runtimes`, which
|
||||
is a list of such dictionaries.
|
||||
|
||||
Default runtime can be changed by setting `containerd_default_runtime`.
|
||||
|
||||
#### base_runtime_spec
|
||||
|
||||
`base_runtime_spec` key in a runtime dictionary can be used to explicitly
|
||||
specify a runtime spec json file. We ship the default one which is generated
|
||||
with `ctr oci spec > /etc/containerd/cri-base.json`. It will be used if you set
|
||||
`base_runtime_spec: cri-base.json`. The main advantage of doing so is the presence of
|
||||
`rlimits` section in this configuration, which will restrict the maximum number
|
||||
of file descriptors(open files) per container to 1024.
|
||||
|
||||
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
|
||||
|
||||
```yaml
|
||||
containerd_base_runtime_specs:
|
||||
cri-spec-custom.json: |
|
||||
{
|
||||
"ociVersion": "1.0.2-dev",
|
||||
"process": {
|
||||
"user": {
|
||||
"uid": 0,
|
||||
...
|
||||
```
|
||||
|
||||
The files in this dict will be placed in containerd config directory,
|
||||
`/etc/containerd` by default. The files can then be referenced by filename in a
|
||||
runtime:
|
||||
|
||||
```yaml
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
base_runtime_spec: cri-spec-custom.json
|
||||
...
|
||||
```
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||
|
||||
@@ -3,34 +3,39 @@
|
||||
Debian Jessie installation Notes:
|
||||
|
||||
- Add
|
||||
|
||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||
|
||||
to /etc/default/grub. Then update with
|
||||
|
||||
```ShellSession
|
||||
sudo update-grub
|
||||
sudo update-grub2
|
||||
sudo reboot
|
||||
|
||||
```ini
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
```
|
||||
|
||||
|
||||
to `/etc/default/grub`. Then update with
|
||||
|
||||
```ShellSession
|
||||
sudo update-grub
|
||||
sudo update-grub2
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||
|
||||
```apt-get -t jessie-backports install systemd```
|
||||
|
||||
|
||||
```ShellSession
|
||||
apt-get -t jessie-backports install systemd
|
||||
```
|
||||
|
||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||
|
||||
|
||||
- Add the Ansible repository and install Ansible to get a proper version
|
||||
|
||||
```ShellSession
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
|
||||
```ShellSession
|
||||
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
|
||||
```
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
|
||||
@@ -19,6 +19,14 @@ ndots value to be used in ``/etc/resolv.conf``
|
||||
It is important to note that multiple search domains combined with high ``ndots``
|
||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||
|
||||
## dns_timeout
|
||||
|
||||
timeout value to be used in ``/etc/resolv.conf``
|
||||
|
||||
## dns_attempts
|
||||
|
||||
attempts value to be used in ``/etc/resolv.conf``
|
||||
|
||||
### searchdomains
|
||||
|
||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||
@@ -26,6 +34,8 @@ Custom search domains to be added in addition to the cluster search domains (``d
|
||||
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
||||
|
||||
`remove_default_searchdomains: true` will remove the default cluster search domains.
|
||||
|
||||
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||
additional search domains. Please take this into the accounts for the limits.
|
||||
|
||||
@@ -62,6 +72,13 @@ coredns_external_zones:
|
||||
nameservers:
|
||||
- 192.168.0.53
|
||||
cache: 0
|
||||
- zones:
|
||||
- mydomain.tld
|
||||
nameservers:
|
||||
- 10.233.0.3
|
||||
cache: 5
|
||||
rewrite:
|
||||
- name stop website.tld website.namespace.svc.cluster.local
|
||||
```
|
||||
|
||||
or as INI
|
||||
@@ -263,7 +280,8 @@ nodelocaldns_secondary_skew_seconds: 5
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
limits are a 4 names and 239 chars respectively.
|
||||
limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
|
||||
added you are back to 6 names.
|
||||
|
||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||
|
||||
@@ -54,7 +54,7 @@ Prepare ignition and serve via http (a.e. python -m http.server )
|
||||
|
||||
### create guest
|
||||
|
||||
```shell script
|
||||
```ShellSeasion
|
||||
machine_name=myfcos1
|
||||
ignition_url=http://mywebserver/fcos.ign
|
||||
|
||||
|
||||
@@ -2,15 +2,19 @@
|
||||
|
||||
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
||||
|
||||
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
|
||||
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
|
||||
|
||||
```ShellSession
|
||||
--cloud-provider=gce
|
||||
--cloud-config=/etc/kubernetes/cloud-config
|
||||
```
|
||||
|
||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
|
||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
|
||||
|
||||
```yaml
|
||||
cloud_provider: gce
|
||||
gce_node_tags: k8s-lb
|
||||
```
|
||||
|
||||
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
|
||||
When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall.
|
||||
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
||||
|
||||
@@ -74,7 +74,6 @@ kube_kubeadm_scheduler_extra_args:
|
||||
etcd_deployment_type: kubeadm
|
||||
|
||||
## kubelet
|
||||
kubelet_authorization_mode_webhook: true
|
||||
kubelet_authentication_token_webhook: true
|
||||
kube_read_only_port: 0
|
||||
kubelet_rotate_server_certificates: true
|
||||
@@ -85,10 +84,22 @@ kubelet_streaming_connection_idle_timeout: "5m"
|
||||
kubelet_make_iptables_util_chains: true
|
||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
|
||||
kubelet_seccomp_default: true
|
||||
kubelet_systemd_hardening: true
|
||||
# In case you have multiple interfaces in your
|
||||
# control plane nodes and you want to specify the right
|
||||
# IP addresses, kubelet_secure_addresses allows you
|
||||
# to specify the IP from which the kubelet
|
||||
# will receive the packets.
|
||||
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
|
||||
|
||||
# additional configurations
|
||||
kube_owner: root
|
||||
kube_cert_group: root
|
||||
|
||||
# create a default Pod Security Configuration and deny running of insecure pods
|
||||
# kube_system namespace is exempted by default
|
||||
kube_pod_security_use_default: true
|
||||
kube_pod_security_default_enforce: restricted
|
||||
```
|
||||
|
||||
Let's take a deep look to the resultant **kubernetes** configuration:
|
||||
@@ -98,6 +109,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
|
||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
||||
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||

|
||||
|
||||
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
||||
|
||||
|
||||
BIN
docs/img/kubelet-hardening.png
Normal file
BIN
docs/img/kubelet-hardening.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.5 MiB |
@@ -6,84 +6,100 @@
|
||||
* List of all forked repos could be retrieved from github page of original project.
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
Git will create `.gitmodules` file in your existent ansible repo:
|
||||
|
||||
```ShellSession
|
||||
git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray
|
||||
```
|
||||
|
||||
Git will create `.gitmodules` file in your existent ansible repo:
|
||||
|
||||
```ini
|
||||
[submodule "3d/kubespray"]
|
||||
path = 3d/kubespray
|
||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||
path = 3d/kubespray
|
||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||
```
|
||||
|
||||
3. Configure git to show submodule status:
|
||||
```git config --global status.submoduleSummary true```
|
||||
|
||||
```ShellSession
|
||||
git config --global status.submoduleSummary true
|
||||
```
|
||||
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||
|
||||
```ShellSession
|
||||
cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git
|
||||
```
|
||||
|
||||
5. Sync your master branch with upstream:
|
||||
|
||||
```ShellSession
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
|
||||
6. Create a new branch which you will use in your working environment:
|
||||
```git checkout -b work```
|
||||
|
||||
```ShellSession
|
||||
git checkout -b work
|
||||
```
|
||||
|
||||
***Never*** use master branch of your repository for your commits.
|
||||
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
||||
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
||||
|
||||
8. ```ini
|
||||
```ini
|
||||
...
|
||||
library = ./library/:3d/kubespray/library/
|
||||
roles_path = ./roles/:3d/kubespray/roles/
|
||||
...
|
||||
```
|
||||
|
||||
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
|
||||
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
|
||||
```ini
|
||||
...
|
||||
#Kargo groups:
|
||||
[kube_node:children]
|
||||
kubenode
|
||||
```ini
|
||||
...
|
||||
#Kubespray groups:
|
||||
[kube_node:children]
|
||||
kubenode
|
||||
|
||||
[k8s_cluster:children]
|
||||
kubernetes
|
||||
[k8s_cluster:children]
|
||||
kubernetes
|
||||
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[kube_control_plane:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
[kube_control_plane:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
|
||||
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
|
||||
```yml
|
||||
- name: Import kubespray playbook
|
||||
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
||||
```
|
||||
```yml
|
||||
- name: Import kubespray playbook
|
||||
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
||||
```
|
||||
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
|
||||
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||
|
||||
## Contributing
|
||||
@@ -95,37 +111,78 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
|
||||
2. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
3. Setup desired user.name and user.email for submodule.
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'```
|
||||
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
|
||||
```ShellSession
|
||||
git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'
|
||||
```
|
||||
|
||||
4. Sync with upstream master:
|
||||
|
||||
```ShellSession
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git push origin master
|
||||
```
|
||||
|
||||
5. Create new branch for the specific fixes that you want to contribute:
|
||||
```git checkout -b fixes-name-date-index```
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
```ShellSession
|
||||
git checkout -b fixes-name-date-index
|
||||
```
|
||||
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||
|
||||
```ShellSession
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
```ShellSession
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
|
||||
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||
7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
|
||||
Also you could use interactive rebase
|
||||
|
||||
```ShellSession
|
||||
git rebase -i HEAD~10
|
||||
```
|
||||
|
||||
to delete commits which you don't want to contribute into original repo.
|
||||
|
||||
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
Check that you're on correct branch:
|
||||
```git status```
|
||||
And pull changes from upstream (if any):
|
||||
```git pull --rebase upstream master```
|
||||
|
||||
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||
Check that you're on correct branch:
|
||||
|
||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
```ShellSession
|
||||
git status
|
||||
```
|
||||
|
||||
And pull changes from upstream (if any):
|
||||
|
||||
```ShellSession
|
||||
git pull --rebase upstream master
|
||||
```
|
||||
|
||||
9. Now push your changes to your **fork** repo with
|
||||
|
||||
```ShellSession
|
||||
git push
|
||||
```
|
||||
|
||||
If your branch doesn't exists on github, git will propose you to use something like
|
||||
|
||||
```ShellSession
|
||||
git push --set-upstream origin fixes-name-date-index
|
||||
```
|
||||
|
||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using
|
||||
|
||||
```ShellSession
|
||||
git push origin --delete fixes-name-date-index
|
||||
git branch -D fixes-name-date-index
|
||||
```
|
||||
|
||||
and start whole process from the beginning.
|
||||
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled.
|
||||
|
||||
```yaml
|
||||
kube_proxy_strict_arp: true
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
You have to explicitly enable the kube-vip extension:
|
||||
@@ -11,7 +19,7 @@ kube_vip_enabled: true
|
||||
```
|
||||
|
||||
You also need to enable
|
||||
[kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
||||
[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
||||
|
||||
```yaml
|
||||
# HA for control-plane, requires a VIP
|
||||
@@ -28,16 +36,16 @@ kube_vip_services_enabled: false
|
||||
```
|
||||
|
||||
> Note: When using `kube-vip` as LoadBalancer for services,
|
||||
[additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
|
||||
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/)
|
||||
are needed.
|
||||
|
||||
If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
|
||||
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) :
|
||||
|
||||
```yaml
|
||||
kube_vip_arp_enabled: true
|
||||
```
|
||||
|
||||
If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
|
||||
If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) :
|
||||
|
||||
```yaml
|
||||
kube_vip_bgp_enabled: true
|
||||
|
||||
@@ -29,8 +29,7 @@ use Kubernetes's `PersistentVolume` abstraction. The following template is
|
||||
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
||||
other situations:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
|
||||
``` yaml
|
||||
```yaml
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@@ -46,7 +45,6 @@ spec:
|
||||
fsType: "ext4"
|
||||
{% endif %}
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
|
||||
|
||||
If, for example, you wanted to use NFS you would just need to change the
|
||||
`gcePersistentDisk` block to `nfs`. See
|
||||
@@ -68,8 +66,7 @@ Now that the Kubernetes cluster knows that some storage exists, you can put a
|
||||
claim on that storage. As with the `PersistentVolume` above, you can start
|
||||
with the `salt` template:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
||||
``` yaml
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@@ -82,7 +79,6 @@ spec:
|
||||
requests:
|
||||
storage: {{ pillar['cluster_registry_disk_size'] }}
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
||||
|
||||
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
||||
you created before will be bound to this claim (unless you have other
|
||||
@@ -93,8 +89,7 @@ gives you the right to use this storage until you release the claim.
|
||||
|
||||
Now we can run a Docker registry:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
@@ -138,7 +133,6 @@ spec:
|
||||
persistentVolumeClaim:
|
||||
claimName: kube-registry-pvc
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
|
||||
|
||||
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
||||
|
||||
@@ -146,8 +140,7 @@ spec:
|
||||
|
||||
Now that we have a registry `Pod` running, we can expose it as a Service:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -164,7 +157,6 @@ spec:
|
||||
port: 5000
|
||||
protocol: TCP
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
|
||||
|
||||
## Expose the registry on each node
|
||||
|
||||
@@ -172,8 +164,7 @@ Now that we have a running `Service`, we need to expose it onto each Kubernetes
|
||||
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
||||
node by creating following daemonset.
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
@@ -207,7 +198,6 @@ spec:
|
||||
containerPort: 80
|
||||
hostPort: 5000
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
||||
|
||||
When modifying replication-controller, service and daemon-set definitions, take
|
||||
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
||||
@@ -219,7 +209,7 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
|
||||
You should be able to verify that it is running by hitting port 5000 with a web
|
||||
browser and getting a 404 error:
|
||||
|
||||
``` console
|
||||
```ShellSession
|
||||
$ curl localhost:5000
|
||||
404 page not found
|
||||
```
|
||||
@@ -229,7 +219,7 @@ $ curl localhost:5000
|
||||
To use an image hosted by this registry, simply say this in your `Pod`'s
|
||||
`spec.containers[].image` field:
|
||||
|
||||
``` yaml
|
||||
```yaml
|
||||
image: localhost:5000/user/container
|
||||
```
|
||||
|
||||
@@ -241,7 +231,7 @@ building locally and want to push to your cluster.
|
||||
You can use `kubectl` to set up a port-forward from your local node to a
|
||||
running Pod:
|
||||
|
||||
``` console
|
||||
```ShellSession
|
||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
||||
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
||||
| grep Running | head -1 | cut -f1 -d' ')
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
|
||||
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
|
||||
The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
||||
The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -19,6 +19,7 @@ You have to explicitly enable the MetalLB extension and set an IP address range
|
||||
```yaml
|
||||
metallb_enabled: true
|
||||
metallb_speaker_enabled: true
|
||||
metallb_avoid_buggy_ips: true
|
||||
metallb_ip_range:
|
||||
- 10.5.0.0/16
|
||||
```
|
||||
@@ -69,16 +70,17 @@ metallb_peers:
|
||||
|
||||
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
|
||||
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
|
||||
In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
||||
In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
||||
|
||||
```yaml
|
||||
metallb_speaker_enabled: false
|
||||
metallb_avoid_buggy_ips: true
|
||||
metallb_ip_range:
|
||||
- 10.5.0.0/16
|
||||
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
|
||||
```
|
||||
|
||||
If you have additional loadbalancer IP pool in `metallb_additional_address_pools`, ensure to add them to the list.
|
||||
If you have additional loadbalancer IP pool in `metallb_additional_address_pools` , ensure to add them to the list.
|
||||
|
||||
```yaml
|
||||
metallb_speaker_enabled: false
|
||||
@@ -90,11 +92,13 @@ metallb_additional_address_pools:
|
||||
- 10.6.0.0/16
|
||||
protocol: "bgp"
|
||||
auto_assign: false
|
||||
avoid_buggy_ips: true
|
||||
kube_service_pool_2:
|
||||
ip_range:
|
||||
- 10.10.0.0/16
|
||||
protocol: "bgp"
|
||||
auto_assign: false
|
||||
avoid_buggy_ips: true
|
||||
calico_advertise_service_loadbalancer_ips:
|
||||
- 10.5.0.0/16
|
||||
- 10.6.0.0/16
|
||||
|
||||
@@ -124,7 +124,7 @@ to
|
||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
||||
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
||||
|
||||
### 3) Edit cluster-info configmap in kube-system namespace
|
||||
### 3) Edit cluster-info configmap in kube-public namespace
|
||||
|
||||
`kubectl edit cm -n kube-public cluster-info`
|
||||
|
||||
|
||||
11
docs/ntp.md
11
docs/ntp.md
@@ -12,7 +12,7 @@ ntp_enabled: true
|
||||
|
||||
The NTP service would be enabled and sync time automatically.
|
||||
|
||||
## Custimize the NTP configure file
|
||||
## Customize the NTP configure file
|
||||
|
||||
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
|
||||
|
||||
@@ -26,6 +26,15 @@ ntp_servers:
|
||||
- "3.your-ntp-server.org iburst"
|
||||
```
|
||||
|
||||
## Setting the TimeZone
|
||||
|
||||
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
|
||||
|
||||
```ShellSession
|
||||
ntp_enabled: true
|
||||
ntp_timezone: Etc/UTC
|
||||
```
|
||||
|
||||
## Advanced Configure
|
||||
|
||||
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
|
||||
|
||||
@@ -34,52 +34,6 @@ Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expecte
|
||||
|
||||
Unless you are using calico or kube-router you can now run the playbook.
|
||||
|
||||
**Additional step needed when using calico or kube-router:**
|
||||
|
||||
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
|
||||
|
||||
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
|
||||
|
||||
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
|
||||
|
||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||
|
||||
```bash
|
||||
openstack server list --project YOUR_PROJECT
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| ID | Name | Tenant ID | Status | Power State |
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
```
|
||||
|
||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
||||
|
||||
```bash
|
||||
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| id | device_id |
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||
```
|
||||
|
||||
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||
|
||||
```bash
|
||||
# allow kube_service_addresses and kube_pods_subnet network
|
||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
|
||||
|
||||
```bash
|
||||
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
Now you can finally run the playbook.
|
||||
|
||||
## The external cloud provider
|
||||
|
||||
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
|
||||
@@ -156,3 +110,49 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
|
||||
|
||||
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
|
||||
- Run the `cluster.yml` playbook
|
||||
|
||||
## Additional step needed when using calico or kube-router
|
||||
|
||||
Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly.
|
||||
|
||||
OpenStack will filter and drop all packets from ips it does not know to prevent spoofing.
|
||||
|
||||
In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use.
|
||||
|
||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||
|
||||
```bash
|
||||
openstack server list --project YOUR_PROJECT
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| ID | Name | Tenant ID | Status | Power State |
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
```
|
||||
|
||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
||||
|
||||
```bash
|
||||
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| id | device_id |
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||
```
|
||||
|
||||
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||
|
||||
```bash
|
||||
# allow kube_service_addresses and kube_pods_subnet network
|
||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
|
||||
|
||||
```bash
|
||||
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
Now you can finally run the playbook.
|
||||
|
||||
@@ -252,11 +252,7 @@ Ansible will now execute the playbook, this can take up to 20 minutes.
|
||||
We will leverage a kubeconfig file from one of the controller nodes to access
|
||||
the cluster as administrator from our local workstation.
|
||||
|
||||
> In this simplified set-up, we did not include a load balancer that usually
|
||||
sits on top of the
|
||||
three controller nodes for a high available API server endpoint. In this
|
||||
simplified tutorial we connect directly to one of the three
|
||||
controllers.
|
||||
> In this simplified set-up, we did not include a load balancer that usually sits on top of the three controller nodes for a high available API server endpoint. In this simplified tutorial we connect directly to one of the three controllers.
|
||||
|
||||
First, we need to edit the permission of the kubeconfig file on one of the
|
||||
controller nodes:
|
||||
|
||||
@@ -58,7 +58,7 @@ see [download documentation](/docs/downloads.md).
|
||||
|
||||
The following is an example of setting up and running kubespray using `vagrant`.
|
||||
For repeated runs, you could save the script to a file in the root of the
|
||||
kubespray and run it by executing 'source <name_of_the_file>.
|
||||
kubespray and run it by executing `source <name_of_the_file>`.
|
||||
|
||||
```ShellSession
|
||||
# use virtualenv to install all python requirements
|
||||
|
||||
29
docs/vars.md
29
docs/vars.md
@@ -15,7 +15,7 @@ Some variables of note include:
|
||||
|
||||
* *calico_version* - Specify version of Calico to use
|
||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||
* *docker_version* - Specify version of Docker to used (should be quoted
|
||||
* *docker_version* - Specify version of Docker to use (should be quoted
|
||||
string). Must match one of the keys defined for *docker_versioned_pkg*
|
||||
in `roles/container-engine/docker/vars/*.yml`.
|
||||
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
|
||||
@@ -28,6 +28,7 @@ Some variables of note include:
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||
* *kube_version* - Specify a given Kubernetes version
|
||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||
* *remove_default_searchdomains* - Boolean that removes the default searchdomain
|
||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive, enforcing and disabled.
|
||||
|
||||
@@ -81,7 +82,7 @@ following default cluster parameters:
|
||||
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
||||
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
||||
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
||||
|
||||
|
||||
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
||||
|
||||
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
||||
@@ -99,7 +100,7 @@ following default cluster parameters:
|
||||
|
||||
* *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled
|
||||
(default is k8s_external.local)
|
||||
|
||||
|
||||
* *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin.
|
||||
on the CoreDNS service.
|
||||
|
||||
@@ -166,6 +167,7 @@ variables to match your requirements.
|
||||
addition to Kubespray deployed DNS
|
||||
* *nameservers* - Array of DNS servers configured for use by hosts
|
||||
* *searchdomains* - Array of up to 4 search domains
|
||||
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
|
||||
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
|
||||
|
||||
For more information, see [DNS
|
||||
@@ -175,25 +177,46 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
|
||||
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||
|
||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
|
||||
By default autodetection is used to match container manager configuration.
|
||||
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
|
||||
|
||||
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
||||
from the kube-apiserver when the certificate expiration approaches.
|
||||
|
||||
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
|
||||
from the kube-apiserver when the certificate expiration approaches.
|
||||
**Note** that server certificates are **not** approved automatically. Approve them manually
|
||||
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
|
||||
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
|
||||
|
||||
* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed.
|
||||
|
||||
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
||||
|
||||
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
|
||||
|
||||
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exists, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
|
||||
|
||||
* *kubelet_secure_addresses* - By default *kubelet_systemd_hardening* set the **control plane** `ansible_host` IPs as the `kubelet_secure_addresses`. In case you have multiple interfaces in your control plane nodes and the `kube-apiserver` is not bound to the default interface, you can override them with this variable.
|
||||
Example:
|
||||
|
||||
The **control plane** node may have 2 interfaces with the following IP addresses: `eth0:10.0.0.110`, `eth1:192.168.1.110`.
|
||||
|
||||
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
|
||||
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* can only be defined as a dict:
|
||||
|
||||
@@ -31,12 +31,13 @@ You need to source the vSphere credentials you use to deploy your machines that
|
||||
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
|
||||
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v2.2.0" | CSI liveness probe image tag to use |
|
||||
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v2.1.0" | CSI provisioner image tag to use |
|
||||
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrat image tag to use |
|
||||
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrar image tag to use |
|
||||
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
|
||||
| vsphere_csi_resizer_tag | TRUE | string | | "v1.1.0" | CSI resizer image tag to use
|
||||
| vsphere_csi_aggressive_node_drain | FALSE | boolean | | false | Enable aggressive node drain strategy |
|
||||
| vsphere_csi_aggressive_node_unreachable_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in an unreachable state |
|
||||
| vsphere_csi_aggressive_node_not_ready_timeout | FALSE | int | 300 | | Timeout till node will be drained when it in not-ready state |
|
||||
| vsphere_csi_namespace | TRUE | string | | "kube-system" | vSphere CSI namespace to use; kube-system for backward compatibility, should be change to vmware-system-csi on the long run |
|
||||
|
||||
## Usage example
|
||||
|
||||
|
||||
@@ -130,3 +130,6 @@ ntp_servers:
|
||||
- "1.pool.ntp.org iburst"
|
||||
- "2.pool.ntp.org iburst"
|
||||
- "3.pool.ntp.org iburst"
|
||||
|
||||
## Used to control no_log attribute
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -82,8 +82,8 @@
|
||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd"
|
||||
# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg"
|
||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
||||
|
||||
## Ubuntu
|
||||
|
||||
@@ -7,13 +7,18 @@
|
||||
# upcloud_csi_provisioner_image_tag: "v3.1.0"
|
||||
# upcloud_csi_attacher_image_tag: "v3.4.0"
|
||||
# upcloud_csi_resizer_image_tag: "v1.4.0"
|
||||
# upcloud_csi_plugin_image_tag: "v0.2.1"
|
||||
# upcloud_csi_plugin_image_tag: "v0.3.3"
|
||||
# upcloud_csi_node_image_tag: "v2.5.0"
|
||||
# upcloud_tolerations: []
|
||||
## Storage class options
|
||||
# expand_persistent_volumes: true
|
||||
# parameters:
|
||||
# tier: maxiops # or hdd
|
||||
# storage_classes:
|
||||
# - name: standard
|
||||
# is_default: true
|
||||
# expand_persistent_volumes: true
|
||||
# parameters:
|
||||
# tier: maxiops
|
||||
# - name: hdd
|
||||
# is_default: false
|
||||
# expand_persistent_volumes: true
|
||||
# parameters:
|
||||
# tier: hdd
|
||||
@@ -166,6 +166,7 @@ metallb_speaker_enabled: true
|
||||
# - "10.5.0.50-10.5.0.99"
|
||||
# metallb_pool_name: "loadbalanced"
|
||||
# metallb_auto_assign: true
|
||||
# metallb_avoid_buggy_ips: false
|
||||
# metallb_speaker_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_controller_nodeselector:
|
||||
@@ -198,6 +199,7 @@ metallb_speaker_enabled: true
|
||||
# - "10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
# avoid_buggy_ips: false
|
||||
# metallb_protocol: "bgp"
|
||||
# metallb_peers:
|
||||
# - peer_address: 192.0.2.1
|
||||
@@ -207,9 +209,8 @@ metallb_speaker_enabled: true
|
||||
# peer_asn: 64513
|
||||
# my_asn: 4200000000
|
||||
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.4.7
|
||||
# argocd_version: v2.4.12
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
|
||||
@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.24.3
|
||||
kube_version: v1.24.6
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -125,7 +125,7 @@ kube_apiserver_port: 6443 # (https)
|
||||
kube_proxy_mode: ipvs
|
||||
|
||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||
# must be set to true for MetalLB to work
|
||||
# must be set to true for MetalLB, kube-vip(ARP enabled) to work
|
||||
kube_proxy_strict_arp: false
|
||||
|
||||
# A string slice of values which specify the addresses to use for NodePorts.
|
||||
@@ -160,6 +160,14 @@ kube_encrypt_secret_data: false
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# dns_timeout: 2
|
||||
# dns_attempts: 2
|
||||
# Custom search domains to be added in addition to the default cluster search domains
|
||||
# searchdomains:
|
||||
# - svc.{{ cluster_name }}
|
||||
# - default.svc.{{ cluster_name }}
|
||||
# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||
# remove_default_searchdomains: false
|
||||
# Can be coredns, coredns_dual, manual or none
|
||||
dns_mode: coredns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
@@ -185,6 +193,13 @@ nodelocaldns_secondary_skew_seconds: 5
|
||||
# nameservers:
|
||||
# - 192.168.0.53
|
||||
# cache: 0
|
||||
# - zones:
|
||||
# - mydomain.tld
|
||||
# nameservers:
|
||||
# - 10.233.0.3
|
||||
# cache: 5
|
||||
# rewrite:
|
||||
# - name website.tld website.namespace.svc.cluster.local
|
||||
# Enable k8s_external plugin for CoreDNS
|
||||
enable_coredns_k8s_external: false
|
||||
coredns_k8s_external_zone: k8s_external.local
|
||||
|
||||
@@ -60,7 +60,7 @@ calico_pool_blocksize: 26
|
||||
# - x.x.x.x/24
|
||||
# - y.y.y.y/32
|
||||
|
||||
# Adveritse Service LoadBalancer IPs
|
||||
# Advertise Service LoadBalancer IPs
|
||||
# calico_advertise_service_loadbalancer_ips:
|
||||
# - x.x.x.x/24
|
||||
# - y.y.y.y/16
|
||||
@@ -99,7 +99,7 @@ calico_pool_blocksize: 26
|
||||
# calico_vxlan_vni: 4096
|
||||
# calico_vxlan_port: 4789
|
||||
|
||||
# Cenable eBPF mode
|
||||
# Enable eBPF mode
|
||||
# calico_bpf_enabled: false
|
||||
|
||||
# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
# cilium_version: "v1.11.7"
|
||||
# cilium_version: "v1.12.1"
|
||||
|
||||
# Log-level
|
||||
# cilium_debug: false
|
||||
@@ -118,6 +118,7 @@
|
||||
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||||
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||||
# cilium_ip_masq_agent_enable: false
|
||||
|
||||
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||||
# cilium_non_masquerade_cidrs:
|
||||
# - 10.0.0.0/8
|
||||
|
||||
@@ -7,13 +7,13 @@ addusers:
|
||||
etcd:
|
||||
name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: no
|
||||
create_home: no
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
kube:
|
||||
name: kube
|
||||
comment: "Kubernetes user"
|
||||
createhome: no
|
||||
create_home: no
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
group: "{{ kube_cert_group }}"
|
||||
@@ -24,4 +24,4 @@ adduser:
|
||||
comment: "{{ user.comment|default(None) }}"
|
||||
shell: "{{ user.shell|default(None) }}"
|
||||
system: "{{ user.system|default(None) }}"
|
||||
createhome: "{{ user.createhome|default(None) }}"
|
||||
create_home: "{{ user.create_home|default(None) }}"
|
||||
|
||||
@@ -7,10 +7,10 @@
|
||||
- name: User | Create User
|
||||
user:
|
||||
comment: "{{ user.comment|default(omit) }}"
|
||||
createhome: "{{ user.createhome|default(omit) }}"
|
||||
create_home: "{{ user.create_home|default(omit) }}"
|
||||
group: "{{ user.group|default(user.name) }}"
|
||||
home: "{{ user.home|default(omit) }}"
|
||||
shell: "{{ user.shell|default(omit) }}"
|
||||
name: "{{ user.name }}"
|
||||
system: "{{ user.system|default(omit) }}"
|
||||
when: kube_owner != "root"
|
||||
when: user.name != "root"
|
||||
|
||||
@@ -5,4 +5,4 @@ addusers:
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
group: "{{ kube_cert_group }}"
|
||||
createhome: no
|
||||
create_home: no
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
addusers:
|
||||
- name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: yes
|
||||
create_home: yes
|
||||
home: "{{ etcd_data_dir }}"
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
|
||||
- name: kube
|
||||
comment: "Kubernetes user"
|
||||
createhome: no
|
||||
create_home: no
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
group: "{{ kube_cert_group }}"
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
addusers:
|
||||
- name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: yes
|
||||
create_home: yes
|
||||
home: "{{ etcd_data_dir }}"
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
|
||||
- name: kube
|
||||
comment: "Kubernetes user"
|
||||
createhome: no
|
||||
create_home: no
|
||||
system: yes
|
||||
shell: /sbin/nologin
|
||||
group: "{{ kube_cert_group }}"
|
||||
|
||||
@@ -25,3 +25,8 @@ override_system_hostname: true
|
||||
is_fedora_coreos: false
|
||||
|
||||
skip_http_proxy_on_os_packages: false
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
---
|
||||
# OpenSUSE ships with Python installed
|
||||
- name: Gather host facts to get ansible_distribution_version ansible_distribution_major_version
|
||||
setup:
|
||||
gather_subset: '!all'
|
||||
filter: ansible_distribution_*version
|
||||
|
||||
- name: Check that /etc/sysconfig/proxy file exists
|
||||
stat:
|
||||
@@ -59,6 +63,17 @@
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
when:
|
||||
- ansible_distribution_version is version('15.4', '<')
|
||||
|
||||
- name: Install python3-cryptography
|
||||
zypper:
|
||||
name: python3-cryptography
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
when:
|
||||
- ansible_distribution_version is version('15.4', '>=')
|
||||
|
||||
# Nerdctl needs some basic packages to get an environment up
|
||||
- name: Install basic dependencies
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
notify: RHEL auto-attach subscription
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
become: true
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
when:
|
||||
- rh_subscription_username is defined
|
||||
- rh_subscription_status.changed
|
||||
|
||||
@@ -12,6 +12,7 @@ containerd_runc_runtime:
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
# base_runtime_spec: cri-base.json # use this to limit number of file descriptors per container
|
||||
options:
|
||||
systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||
|
||||
@@ -22,6 +23,9 @@ containerd_additional_runtimes: []
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
containerd_base_runtime_specs:
|
||||
cri-base.json: "{{ lookup('file', 'cri-base.json') }}"
|
||||
|
||||
containerd_grpc_max_recv_message_size: 16777216
|
||||
containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
@@ -46,3 +50,9 @@ containerd_registry_auth: []
|
||||
# - registry: 10.0.0.2:5000
|
||||
# username: user
|
||||
# password: pass
|
||||
|
||||
# Configure containerd service
|
||||
containerd_limit_proc_num: "infinity"
|
||||
containerd_limit_core: "infinity"
|
||||
containerd_limit_open_file_num: "infinity"
|
||||
containerd_limit_mem_lock: "infinity"
|
||||
|
||||
214
roles/container-engine/containerd/files/cri-base.json
Normal file
214
roles/container-engine/containerd/files/cri-base.json
Normal file
@@ -0,0 +1,214 @@
|
||||
{
|
||||
"ociVersion": "1.0.2-dev",
|
||||
"process": {
|
||||
"user": {
|
||||
"uid": 0,
|
||||
"gid": 0
|
||||
},
|
||||
"cwd": "/",
|
||||
"capabilities": {
|
||||
"bounding": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"effective": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"inheritable": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"permitted": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
]
|
||||
},
|
||||
"rlimits": [
|
||||
{
|
||||
"type": "RLIMIT_NOFILE",
|
||||
"hard": 1024,
|
||||
"soft": 1024
|
||||
}
|
||||
],
|
||||
"noNewPrivileges": true
|
||||
},
|
||||
"root": {
|
||||
"path": "rootfs"
|
||||
},
|
||||
"mounts": [
|
||||
{
|
||||
"destination": "/proc",
|
||||
"type": "proc",
|
||||
"source": "proc",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"strictatime",
|
||||
"mode=755",
|
||||
"size=65536k"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/pts",
|
||||
"type": "devpts",
|
||||
"source": "devpts",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"newinstance",
|
||||
"ptmxmode=0666",
|
||||
"mode=0620",
|
||||
"gid=5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/shm",
|
||||
"type": "tmpfs",
|
||||
"source": "shm",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev",
|
||||
"mode=1777",
|
||||
"size=65536k"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/mqueue",
|
||||
"type": "mqueue",
|
||||
"source": "mqueue",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/sys",
|
||||
"type": "sysfs",
|
||||
"source": "sysfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev",
|
||||
"ro"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/run",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"strictatime",
|
||||
"mode=755",
|
||||
"size=65536k"
|
||||
]
|
||||
}
|
||||
],
|
||||
"linux": {
|
||||
"resources": {
|
||||
"devices": [
|
||||
{
|
||||
"allow": false,
|
||||
"access": "rwm"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cgroupsPath": "/default",
|
||||
"namespaces": [
|
||||
{
|
||||
"type": "pid"
|
||||
},
|
||||
{
|
||||
"type": "ipc"
|
||||
},
|
||||
{
|
||||
"type": "uts"
|
||||
},
|
||||
{
|
||||
"type": "mount"
|
||||
},
|
||||
{
|
||||
"type": "network"
|
||||
}
|
||||
],
|
||||
"maskedPaths": [
|
||||
"/proc/acpi",
|
||||
"/proc/asound",
|
||||
"/proc/kcore",
|
||||
"/proc/keys",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/sys/firmware",
|
||||
"/proc/scsi"
|
||||
],
|
||||
"readonlyPaths": [
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger"
|
||||
]
|
||||
}
|
||||
}
|
||||
1
roles/container-engine/containerd/handlers/reset.yml
Normal file
1
roles/container-engine/containerd/handlers/reset.yml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
@@ -31,14 +31,6 @@ platforms:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: opensuse
|
||||
box: opensuse/Leap-15.3.x86_64
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
|
||||
@@ -84,6 +84,15 @@
|
||||
notify: restart containerd
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
- name: containerd | Write base_runtime_specs
|
||||
copy:
|
||||
content: "{{ item.value }}"
|
||||
dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
|
||||
owner: "root"
|
||||
mode: 0644
|
||||
with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
|
||||
notify: restart containerd
|
||||
|
||||
- name: containerd | Copy containerd config file
|
||||
template:
|
||||
src: config.toml.j2
|
||||
|
||||
@@ -22,19 +22,15 @@ oom_score = {{ containerd_oom_score }}
|
||||
default_runtime_name = "{{ containerd_default_runtime | default('runc') }}"
|
||||
snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ containerd_runc_runtime.name }}]
|
||||
runtime_type = "{{ containerd_runc_runtime.type }}"
|
||||
runtime_engine = "{{ containerd_runc_runtime.engine}}"
|
||||
runtime_root = "{{ containerd_runc_runtime.root }}"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ containerd_runc_runtime.name }}.options]
|
||||
{% for key, value in containerd_runc_runtime.options.items() %}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor %}
|
||||
{% for runtime in containerd_additional_runtimes %}
|
||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
{% if runtime.base_runtime_spec is defined %}
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
{% endif %}
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options]
|
||||
{% for key, value in runtime.options.items() %}
|
||||
{{ key }} = {{ value }}
|
||||
@@ -58,7 +54,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
{% for registry, addr in containerd_insecure_registries.items() %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"]
|
||||
endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry }}".tls]
|
||||
{% endfor %}
|
||||
{% for addr in containerd_insecure_registries.values() | flatten | unique %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr }}".tls]
|
||||
insecure_skip_verify = true
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -28,9 +28,10 @@ Restart=always
|
||||
RestartSec=5
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC={{ containerd_limit_proc_num }}
|
||||
LimitCORE={{ containerd_limit_core }}
|
||||
LimitNOFILE={{ containerd_limit_open_file_num }}
|
||||
LimitMEMLOCK={{ containerd_limit_mem_lock }}
|
||||
# Comment TasksMax if your systemd version does not supports it.
|
||||
# Only systemd 226 and above support this version.
|
||||
TasksMax=infinity
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
---
|
||||
- name: restart cri-dockerd
|
||||
- name: restart and enable cri-dockerd
|
||||
command: /bin/true
|
||||
notify:
|
||||
- cri-dockerd | reload systemd
|
||||
- cri-dockerd | restart docker.service
|
||||
- cri-dockerd | reload cri-dockerd.socket
|
||||
- cri-dockerd | reload cri-dockerd.service
|
||||
- cri-dockerd | enable cri-dockerd service
|
||||
|
||||
- name: cri-dockerd | reload systemd
|
||||
systemd:
|
||||
@@ -12,6 +14,11 @@
|
||||
daemon_reload: true
|
||||
masked: no
|
||||
|
||||
- name: cri-dockerd | restart docker.service
|
||||
service:
|
||||
name: docker.service
|
||||
state: restarted
|
||||
|
||||
- name: cri-dockerd | reload cri-dockerd.socket
|
||||
service:
|
||||
name: cri-dockerd.socket
|
||||
@@ -21,3 +28,8 @@
|
||||
service:
|
||||
name: cri-dockerd.service
|
||||
state: restarted
|
||||
|
||||
- name: cri-dockerd | enable cri-dockerd service
|
||||
service:
|
||||
name: cri-dockerd.service
|
||||
enabled: yes
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
mode: 0755
|
||||
remote_src: true
|
||||
notify:
|
||||
- restart cri-dockerd
|
||||
- restart and enable cri-dockerd
|
||||
|
||||
- name: Generate cri-dockerd systemd unit files
|
||||
template:
|
||||
@@ -22,4 +22,7 @@
|
||||
- cri-dockerd.service
|
||||
- cri-dockerd.socket
|
||||
notify:
|
||||
- restart cri-dockerd
|
||||
- restart and enable cri-dockerd
|
||||
|
||||
- name: Flush handlers
|
||||
meta: flush_handlers
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
---
|
||||
- name: runc | check if fedora coreos
|
||||
stat:
|
||||
path: /run/ostree-booted
|
||||
get_attributes: no
|
||||
get_checksum: no
|
||||
get_mime: no
|
||||
register: ostree
|
||||
|
||||
- name: runc | set is_ostree
|
||||
set_fact:
|
||||
is_ostree: "{{ ostree.stat.exists }}"
|
||||
|
||||
@@ -90,6 +90,7 @@
|
||||
import_role:
|
||||
name: container-engine/containerd
|
||||
tasks_from: reset
|
||||
handlers_from: reset
|
||||
vars:
|
||||
service_name: containerd.service
|
||||
when:
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
local_release_dir: /tmp/releases
|
||||
download_cache_dir: /tmp/kubespray_cache
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
# do not delete remote cache files after using them
|
||||
# NOTE: Setting this parameter to TRUE is only really useful when developing kubespray
|
||||
download_keep_remote_cache: false
|
||||
@@ -70,11 +75,11 @@ nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and conta
|
||||
# Versions
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
crun_version: 1.4.5
|
||||
runc_version: v1.1.3
|
||||
runc_version: v1.1.4
|
||||
kata_containers_version: 2.4.1
|
||||
youki_version: 0.0.1
|
||||
gvisor_version: 20210921
|
||||
containerd_version: 1.6.6
|
||||
containerd_version: 1.6.8
|
||||
cri_dockerd_version: 0.2.2
|
||||
|
||||
# this is relevant when container_manager == 'docker'
|
||||
@@ -105,18 +110,18 @@ calico_apiserver_version: "{{ calico_version }}"
|
||||
typha_enabled: false
|
||||
calico_apiserver_enabled: false
|
||||
|
||||
flannel_version: "v0.18.1"
|
||||
flannel_version: "v0.19.2"
|
||||
flannel_cni_version: "v1.1.0"
|
||||
cni_version: "v1.1.1"
|
||||
weave_version: 2.8.1
|
||||
pod_infra_version: "3.6"
|
||||
cilium_version: "v1.11.7"
|
||||
cilium_version: "v1.12.1"
|
||||
kube_ovn_version: "v1.9.7"
|
||||
kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}"
|
||||
kube_router_version: "v1.5.1"
|
||||
multus_version: "v3.8-{{ image_arch }}"
|
||||
helm_version: "v3.9.2"
|
||||
nerdctl_version: "0.20.0"
|
||||
helm_version: "v3.9.4"
|
||||
nerdctl_version: "0.22.2"
|
||||
krew_version: "v0.4.3"
|
||||
|
||||
# Get kubernetes major version (i.e. 1.17.4 => 1.17)
|
||||
@@ -179,10 +184,16 @@ crictl_checksums:
|
||||
# Kubernetes versions above Kubespray's current target version are untested and should be used with caution.
|
||||
kubelet_checksums:
|
||||
arm:
|
||||
v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4
|
||||
v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87
|
||||
v1.24.4: f9d387c18159a4473e7bdc290780ba1b1c92e8d8b41f558c15ee044db54636cd
|
||||
v1.24.3: fe34b1a0892cdfb015f66be8f2d3450130a5d04f9466732020e186c8da0ee799
|
||||
v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22
|
||||
v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec
|
||||
v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3
|
||||
v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3
|
||||
v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0
|
||||
v1.23.10: d6d5aa26f16e735962cac5f2ee8ddc0d3b9d2aa14b8e968cb55fc9745f9a8b03
|
||||
v1.23.9: f22edc9838eb3d0788d951c1fc8fdb0e1bf6c43ad638a215172f25b54ca27a8a
|
||||
v1.23.8: 53c4f44ba10d9c53a4526fccb4d20146e52473788058684ca2de74ae0e1abb11
|
||||
v1.23.7: f9910e670aea8845b6b07ecd36d43d8ac0901ee3244264d2bc0f6ea918d862ac
|
||||
@@ -193,6 +204,9 @@ kubelet_checksums:
|
||||
v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908
|
||||
v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce
|
||||
v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba
|
||||
v1.22.15: c32ae2467733c0d61a6e9c8f042a2a14c329209b3c4f74abed338c5e83518278
|
||||
v1.22.14: b2f2bcb73bb367b7ea4834b41e01818f8aaa25c725e641008e6f320a9274851b
|
||||
v1.22.13: c2230f8ff03102502b6f9f10dcc494af6c536fd8f1f9467aa42ba684da4e9106
|
||||
v1.22.12: bb50b896769cb5e53101ef36e580095b8e546ea0dc194687e662824248b183ac
|
||||
v1.22.11: 528e01a436b1b91edaa192ecc6befff5f5a2e17f9f340e3f4908b8bed1cebbe9
|
||||
v1.22.10: 1510b508bd72c03f2576f07e652dfc0a12feda5a231a7dd792f32cd968153d8f
|
||||
@@ -207,10 +221,16 @@ kubelet_checksums:
|
||||
v1.22.1: f42bc00f274be7ce0578b359cbccc48ead03894b599f5bf4d10e44c305fbab65
|
||||
v1.22.0: 4354dc8db1d8ca336eb940dd73adcd3cf17cbdefbf11889602420f6ee9c6c4bb
|
||||
arm64:
|
||||
v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e
|
||||
v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8
|
||||
v1.24.4: 2d9817c1e9e1edd9480aa05862ea6e9655a9512d820b1933175f5d7c8253ca61
|
||||
v1.24.3: 6c04ae25ee9b434f40e0d2466eb4ef5604dc43f306ddf1e5f165fc9d3c521e12
|
||||
v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0
|
||||
v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf
|
||||
v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e
|
||||
v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488
|
||||
v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc
|
||||
v1.23.10: 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8
|
||||
v1.23.9: c11b14ab3fa8e567c54e893c5a937f53618b26c9b62416cc8aa7760835f68350
|
||||
v1.23.8: 1b4ec707e29e8136e3516a437cb541a79c52c69b1331a7add2b47e7ac7d032e6
|
||||
v1.23.7: e96b746a77b00c04f1926035899a583ce28f02e9a5dca26c1bfb8251ca6a43bb
|
||||
@@ -221,6 +241,9 @@ kubelet_checksums:
|
||||
v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f
|
||||
v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d
|
||||
v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9
|
||||
v1.22.15: 0c34cbda04ae914f342e683cf70f96d56d46033457d46ea79445e1483b501565
|
||||
v1.22.14: 663287b907c4aed4dfde55639da15a0d23fd0608b13afa9cf71fc3da850f3660
|
||||
v1.22.13: f8c1ec9fec6b36646ac05e1e26f0cd3e20395b500eca8ee3baeb3ca59935fdb0
|
||||
v1.22.12: 0e58133c153be32e8e61004cfdc18f8a02ef465f979c6d5bf3e998fbe3f89fca
|
||||
v1.22.11: d20398fa95ee724d63c3263af65eeb49e56c963fcace92efed2d2d0f6084c11a
|
||||
v1.22.10: 2376a7ecc044bc4b5cdae9a0a14d058ae5c1803450f3a8ffdce656785e9e251e
|
||||
@@ -235,10 +258,16 @@ kubelet_checksums:
|
||||
v1.22.1: d5ffd67d8285fb224a1c49622fd739131f7b941e3d68f233dec96e72c9ebee63
|
||||
v1.22.0: cea637a7da4f1097b16b0195005351c07032a820a3d64c3ff326b9097cfac930
|
||||
amd64:
|
||||
v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b
|
||||
v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631
|
||||
v1.24.4: 0f34d12aaa1b911adbf75dd63df03d0674dde921fa0571a51acd2b5b576ba0a4
|
||||
v1.24.3: da575ceb7c44fddbe7d2514c16798f39f8c10e54b5dbef3bcee5ac547637db11
|
||||
v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343
|
||||
v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde
|
||||
v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52
|
||||
v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614
|
||||
v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7
|
||||
v1.23.10: c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b
|
||||
v1.23.9: a5975920be1de0768e77ef101e4e42b179406add242c0883a7dc598f2006d387
|
||||
v1.23.8: 1ba15ad4d9d99cfc3cbef922b5101492ad74e812629837ac2e5705a68cb7af1e
|
||||
v1.23.7: 518f67200e853253ed6424488d6148476144b6b796ec7c6160cff15769b3e12a
|
||||
@@ -249,6 +278,9 @@ kubelet_checksums:
|
||||
v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6
|
||||
v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4
|
||||
v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518
|
||||
v1.22.15: 3c00f6d4e329c40c727eaf69f46eec25879ddce87c0b21b51fa1b3c6e55218b9
|
||||
v1.22.14: 76b1512da1104b4e80e23fa2c4d1cbd87b865f7bc25a41a46932cf0a219469ac
|
||||
v1.22.13: f55a72f5546ecf463f54e9220a1c38179b94b32ba561dfd6ec1f2fbe8231d640
|
||||
v1.22.12: d54539bd0fa43b43e9ad2ac4e6644bcb3f1e98b8fc371befba7ac362d93a6b00
|
||||
v1.22.11: 50fb1ede16c15dfe0bcb9fa98148d969ae8efeb8b599ce5eb5f09ab78345c9d1
|
||||
v1.22.10: c1aa6e9f59cfc765d33b382f604140699ab97c9c4212a905d5e1bcd7ef9a5c8b
|
||||
@@ -263,10 +295,16 @@ kubelet_checksums:
|
||||
v1.22.1: 2079780ad2ff993affc9b8e1a378bf5ee759bf87fdc446e6a892a0bbd7353683
|
||||
v1.22.0: fec5c596f7f815f17f5d7d955e9707df1ef02a2ca5e788b223651f83376feb7f
|
||||
ppc64le:
|
||||
v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c
|
||||
v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a
|
||||
v1.24.4: 38475815448bd5d43e893b6a9ac9fd3ae8b0dbddf8a7ba92d3f83437b5c1b916
|
||||
v1.24.3: 0bfb73c1932c8593ef6281efc6d16bf440275fed1272466f76101ea0f0971907
|
||||
v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132
|
||||
v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06
|
||||
v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931
|
||||
v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a
|
||||
v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6
|
||||
v1.23.10: a8f742b9b1c0b1a70719da6ea52e92d276b5ad6c59db0070aacdc474292c7e7a
|
||||
v1.23.9: 6b05833c938c1d31e7450e93aebff561dfaa43eacafde1a011e0945ec2114fec
|
||||
v1.23.8: f07b6194add802e2e5c5905a79ef744118ccb82ebcbf4e402a11bdb478de2c0f
|
||||
v1.23.7: e011d7ad6aa01c5d1858ee88829d4a46b66dae10602615f46a7d4a0f9d9c2d6e
|
||||
@@ -277,6 +315,9 @@ kubelet_checksums:
|
||||
v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca
|
||||
v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1
|
||||
v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753
|
||||
v1.22.15: b0185e633d401ef24c05ada5262a3fe8c49286bce32fa1ad15b2c78988e5966f
|
||||
v1.22.14: 7d27862fdb447d9d6a3547a1c5f3fd7dd718bc4b3cebbc13af686d169910e5e9
|
||||
v1.22.13: ac81fe025a69834f872d70d696472780e8e5713e0ca2450dcfc2cd9745b55239
|
||||
v1.22.12: 50e418ff8b8d1f4746be37d5658895dfcb892b0a3a8a2dd7320e760d4159826c
|
||||
v1.22.11: 48e6b0e8d4483e7ccce02dd658b4c92be6859bbb235c58e8902182503280a14c
|
||||
v1.22.10: da53b707bd5e8b4ae9e720a4e87892e4c0713dd419f0d66cade7e4619a3d8965
|
||||
@@ -292,10 +333,16 @@ kubelet_checksums:
|
||||
v1.22.0: 957dcc6ae45078ce971af183c0061d60168c15f484dcd978588cc6380236423f
|
||||
kubectl_checksums:
|
||||
arm:
|
||||
v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de
|
||||
v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f
|
||||
v1.24.4: 060c0bb55aa3284c489cf8224ab10296d486b5a2e7f3e5d6440c9382698bf68a
|
||||
v1.24.3: 4ae94095580973931da53fd3b823909d85ca05055d6300f392d9dc9e5748d612
|
||||
v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a
|
||||
v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f
|
||||
v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc
|
||||
v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9
|
||||
v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7
|
||||
v1.23.10: b2156478b03b90c0f72fd386ceab2e78b7cf32eab9d9b4696c28d2bb45c9d3ec
|
||||
v1.23.9: 44caabd847c147ded79aa91daa49a5e0ea68ce4a0833b0733df1c8313375ff80
|
||||
v1.23.8: c4a2be3c61f40d4b1b0f61d509b0e361e85f10b7d2a98120d180c023ede7728f
|
||||
v1.23.7: bc74849aabe50feb71333e41130ecf1122c0f79705a5fdc9d1ec2fce621bf749
|
||||
@@ -306,6 +353,9 @@ kubectl_checksums:
|
||||
v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173
|
||||
v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5
|
||||
v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644
|
||||
v1.22.15: 011486177abff9623898a268de16ad1e17f9879b619c658add43aca8b3013201
|
||||
v1.22.14: f8dd8db49ec02d9095ec0ac5409f498505748cfbb6b394addaa6401be2403680
|
||||
v1.22.13: 4228743e4e51403692cf9578b35f3550a769804011126a9be18536ac591e8dd2
|
||||
v1.22.12: 9aa6e8df0dc0c77fd546762ccc78c3f2d349049855c59b0699a3192621590754
|
||||
v1.22.11: 8e0c2a168aac356b3c84e9366ae19c26fc5ecd1344e3ef92f56377ec4ccddc3b
|
||||
v1.22.10: daadf5f7c66fdcf2aa62a8504606a058621146379ea1bb52159ea0b087b986b2
|
||||
@@ -320,10 +370,16 @@ kubectl_checksums:
|
||||
v1.22.1: 50991ec4313ee42da03d60e21b90bc15e3252c97db189d1b66aad5bbb555997b
|
||||
v1.22.0: 6d7c787416a148acffd49746837df4cebb1311c652483dc3d2c8d24ce1cc897e
|
||||
arm64:
|
||||
v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219
|
||||
v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a
|
||||
v1.24.4: 0aa4a08ff81efe3fc1a8ef880ca2f8622e3b1f93bf622583d7b9bfe3124afe61
|
||||
v1.24.3: bdad4d3063ddb7bfa5ecf17fb8b029d5d81d7d4ea1650e4369aafa13ed97149a
|
||||
v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79
|
||||
v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271
|
||||
v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d
|
||||
v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437
|
||||
v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3
|
||||
v1.23.10: d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b
|
||||
v1.23.9: 66659f614d06d0fe80c5eafdba7073940906de98ea5ee2a081d84fa37d8c5a21
|
||||
v1.23.8: b293fce0b3dec37d3f5b8875b8fddc64e02f0f54f54dd7742368973c52530890
|
||||
v1.23.7: 5d59447a5facd8623a79c2a296a68a573789d2b102b902aafb3a730fc4bb0d3b
|
||||
@@ -334,6 +390,9 @@ kubectl_checksums:
|
||||
v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b
|
||||
v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524
|
||||
v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe
|
||||
v1.22.15: 206ccaa283eaf02f72d19cf7a490b71c7b9fa77c0eb265006db31ab84b56eac0
|
||||
v1.22.14: 942c5f0e7be658ed047d8691df3f80cf1dd00a642fb7eab5c5367cb2f8e937e6
|
||||
v1.22.13: e3e845bac0e1c30de20438433a8d75c64c237892245887a2818bd877b9601b41
|
||||
v1.22.12: 7d6507ecb8061f7d94d1bd6b982c56b1a1f929427bcc27a962fe66c61100f12a
|
||||
v1.22.11: 35da77af0581740aa8815c461ee912181fbb4cec09c2e0c9f6dbee58a48758a6
|
||||
v1.22.10: 6ce1a1315225d7d62f7d17083c9f87d4f3f5684c80da108799c99780ad520cb3
|
||||
@@ -348,10 +407,16 @@ kubectl_checksums:
|
||||
v1.22.1: 5c7ef1e505c35a8dc0b708f6b6ecdad6723875bb85554e9f9c3fe591e030ae5c
|
||||
v1.22.0: 8d9cc92dcc942f5ea2b2fc93c4934875d9e0e8ddecbde24c7d4c4e092cfc7afc
|
||||
amd64:
|
||||
v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c
|
||||
v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed
|
||||
v1.24.4: 4a76c70217581ba327f0ad0a0a597c1a02c62222bb80fbfea4f2f5cb63f3e2d8
|
||||
v1.24.3: 8a45348bdaf81d46caf1706c8bf95b3f431150554f47d444ffde89e8cdd712c1
|
||||
v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542
|
||||
v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a
|
||||
v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7
|
||||
v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9
|
||||
v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e
|
||||
v1.23.10: 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7
|
||||
v1.23.9: 053561f7c68c5a037a69c52234e3cf1f91798854527692acd67091d594b616ce
|
||||
v1.23.8: 299803a347e2e50def7740c477f0dedc69fc9e18b26b2f10e9ff84a411edb894
|
||||
v1.23.7: b4c27ad52812ebf3164db927af1a01e503be3fb9dc5ffa058c9281d67c76f66e
|
||||
@@ -362,6 +427,9 @@ kubectl_checksums:
|
||||
v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde
|
||||
v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e
|
||||
v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f
|
||||
v1.22.15: 239a48f1e465ecfd99dd5e3d219066ffea7bbd4cdedb98524e82ff11fd72ba12
|
||||
v1.22.14: a4408b32b9729e38c14b38a64ea6f00d67d2127f9c1314fbc2273a37a987a2d2
|
||||
v1.22.13: b96d2bc9137ec63546a29513c40c5d4f74e9f89aa11edc15e3c2f674d5fa3e02
|
||||
v1.22.12: 8e36c8fa431e454e3368c6174ce3111b7f49c28feebdae6801ab3ca45f02d352
|
||||
v1.22.11: a61c697e3c9871da7b609511248e41d9c9fb6d9e50001425876676924761586b
|
||||
v1.22.10: 225bc8d4ac86e3a9e36b85d2d9cb90cd4b4afade29ba0292f47834ecf570abf2
|
||||
@@ -376,10 +444,16 @@ kubectl_checksums:
|
||||
v1.22.1: 78178a8337fc6c76780f60541fca7199f0f1a2e9c41806bded280a4a5ef665c9
|
||||
v1.22.0: 703e70d49b82271535bc66bc7bd469a58c11d47f188889bd37101c9772f14fa1
|
||||
ppc64le:
|
||||
v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d
|
||||
v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98
|
||||
v1.24.4: cfd7151471dd9878d48ab8d7bc3cf945c207e130568ee778f1aed9ceb84afd44
|
||||
v1.24.3: 893a83cd636650d1ad50be0e9a2517f2f4434c35646dacd9160b66446aee404e
|
||||
v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f
|
||||
v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5
|
||||
v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75
|
||||
v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a
|
||||
v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7
|
||||
v1.23.10: fc0867d7412d7698029413a8307d8e74748d47e402c075e8d6cc79ed772fb232
|
||||
v1.23.9: 141532b62ce75860975d5913bfbf784a09b0abc83ca7d31a6b1eddf28866ce67
|
||||
v1.23.8: 599ed10fc7e8fcb5884485cecf690c7645947d1f144b66d717a3f064f11c0b8f
|
||||
v1.23.7: dab46d2ede0a930f1530ebf857da538ca0879bdb72fc71070d518849c45b9fae
|
||||
@@ -390,6 +464,9 @@ kubectl_checksums:
|
||||
v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015
|
||||
v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a
|
||||
v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851
|
||||
v1.22.15: 748f110815a3781e608778736975f45d5491cd7404f1cabb89a5c7ecc9dffd7e
|
||||
v1.22.14: 808ff251575a3fc6afff52bb97a965d9fb2d01dd6d626d6b666759a35efbe612
|
||||
v1.22.13: fd4a8473a57275579eedd64a5d13aabf801cddef9f4a81f11658c40b19f559da
|
||||
v1.22.12: 3855d0a2add2a093772cb024b3cf678ddfa840b4a764f925b0c58ff94aaf13ee
|
||||
v1.22.11: e74b2c62c524b81e22a5e66bf2abe2f036d26bb541663a4383abd6655d365288
|
||||
v1.22.10: 98226e40cd93c7a23bf3dde675879207d393d886e53d0e3dfdf8a2732307711c
|
||||
@@ -405,10 +482,16 @@ kubectl_checksums:
|
||||
v1.22.0: 7ea30171a5db9dfbdc240674f5cde00fb75a8193ef73783950b8d10c810b6a5b
|
||||
kubeadm_checksums:
|
||||
arm:
|
||||
v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a
|
||||
v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7
|
||||
v1.24.4: e0c1510ab2ed1cd555abad6f226454a3206aaaf20474da7dcf976ddc86a065d4
|
||||
v1.24.3: dc90c93e2305a7babafc41185a43435a9f3af2ef5d546bbd06e6553898e43d9e
|
||||
v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266
|
||||
v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153
|
||||
v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2
|
||||
v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c
|
||||
v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622
|
||||
v1.23.10: e0db03e8c4c06c3c3e5e29558fa316b0b56ac9d2801751c4a36b2e3f84455b1f
|
||||
v1.23.9: fa265d592d4f85b083919baa80b232deae20acaf2a20095a9c417c4d5324e002
|
||||
v1.23.8: 24d159ac19b519453050a977d2f238873c328e3a9dd3dfe524a32f421b64dadb
|
||||
v1.23.7: 18da04d52a05f2b1b8cd7163bc0f0515a4ee793bc0019d2cada4bbf3323d4044
|
||||
@@ -419,6 +502,9 @@ kubeadm_checksums:
|
||||
v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e
|
||||
v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369
|
||||
v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda
|
||||
v1.22.15: d60e76910ca5b76d00d9ef5d5fd211a0a2954d83b02ee65247ff0e8f6b99c436
|
||||
v1.22.14: d852fd8846253c23b86651469e970295a3930130d5005e533cdc66bf64bd413e
|
||||
v1.22.13: dc8cb74f5f427958eda265c8190c2f12877e71eb4f04269dd85dfa86a8044208
|
||||
v1.22.12: d2d1f19c74186e9247cea9ff9ba484a658bd4985060979babe5c28389e594d0a
|
||||
v1.22.11: b2a5a1c827fe18f4589628cdb69e73c1e65011381ec015e1daa7a31198199302
|
||||
v1.22.10: f1ab42fbadb0a66ba200392ee82c05b65e3d29a3d8f3e030b774cbc48915dedb
|
||||
@@ -433,10 +519,16 @@ kubeadm_checksums:
|
||||
v1.22.1: cc08281c5261e860df9a0b5040b8aa2e6d202a243daf25556f5f6d3fd8f2e1e9
|
||||
v1.22.0: 6a002deb0ee191001d5c0e0435e9a995d70aa376d55075c5f61e70ce198433b8
|
||||
arm64:
|
||||
v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41
|
||||
v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2
|
||||
v1.24.4: 18de228f6087a2e5243bffcd2cc88c40180a4fa83e4de310ad071b4620bdd8b6
|
||||
v1.24.3: ea0fb451b69d78e39548698b32fb8623fad61a1a95483fe0add63e3ffb6e31b5
|
||||
v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548
|
||||
v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04
|
||||
v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004
|
||||
v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947
|
||||
v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb
|
||||
v1.23.10: 42e957eebef78f6462644d9debc096616054ebd2832e95a176c07c28ebed645c
|
||||
v1.23.9: a0a007023db78e5f78d3d4cf3268b83f093201847c1c107ffb3dc695f988c113
|
||||
v1.23.8: 9b3d8863ea4ab0438881ccfbe285568529462bc77ef4512b515397a002d81b22
|
||||
v1.23.7: 65fd71aa138166039b7f4f3695308064abe7f41d2f157175e6527e60fb461eae
|
||||
@@ -447,6 +539,9 @@ kubeadm_checksums:
|
||||
v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff
|
||||
v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40
|
||||
v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6
|
||||
v1.22.15: 7f34e1e96831ae4fac769caa0dfd3646d2b02f6e0516394d814ca39d2fac4625
|
||||
v1.22.14: cc03d2bdf29900244ff59614e007786f3ff4820e4d77709067298f5b2db20a9e
|
||||
v1.22.13: 2c42aadc99b46b6b5684acc7dfa630c67cb12c19b17df4cea3d2091ef5753011
|
||||
v1.22.12: d0469a3008411edb50f6562e00f1df28123cf2dc368f1538f1b41e27b0482b1c
|
||||
v1.22.11: 15e1cba65f0db4713bf45ee23dbd01dd30048d20ad97ef985d6b9197f8ae359a
|
||||
v1.22.10: 8ea22a05b428de70a430711e8f75553e1be2925977ab773b5be1c240bc5b9fcd
|
||||
@@ -461,10 +556,16 @@ kubeadm_checksums:
|
||||
v1.22.1: 85df7978b2e5bb78064ed0bcce14a39d105a1a3968bb92ee5d2f96a1fa09ed12
|
||||
v1.22.0: 9fc14b993de2c275b54445255d7770bd1d6cdb49f4cf9c227c5b035f658a2351
|
||||
amd64:
|
||||
v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c
|
||||
v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb
|
||||
v1.24.4: 9ec08e0905c0a29a68676ba9f6dd7de73bef13cfa2b846a45e1c2189572dc57c
|
||||
v1.24.3: 406d5a80712c45d21cdbcc51aab298f0a43170df9477259443d48eac116998ff
|
||||
v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712
|
||||
v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0
|
||||
v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318
|
||||
v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83
|
||||
v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692
|
||||
v1.23.10: 43d186c3c58e3f8858c6a22bc71b5441282ac0ccbff6f1d0c2a66ee045986b64
|
||||
v1.23.9: 947571c50ab840796fdd4ffb129154c005dfcb0fe83c6eff392d46cf187fd296
|
||||
v1.23.8: edbd60fd6a7e11c71f848b3a6e5d1b5a2bb8ebd703e5490caa8db267361a7b89
|
||||
v1.23.7: d7d863213eeb4791cdbd7c5fd398cf0cc2ef1547b3a74de8285786040f75efd2
|
||||
@@ -475,6 +576,9 @@ kubeadm_checksums:
|
||||
v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219
|
||||
v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34
|
||||
v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0
|
||||
v1.22.15: c84799162c33f758facbe9d6cbabfbda1ca3f74e87386e98af8711278c706872
|
||||
v1.22.14: c8343a3e8a3056d922e466733486ccbbd8efd01a453a9e93e1cf8a164281e6b2
|
||||
v1.22.13: acbb0dd67b7656d0c70049484ba31c1981b803be0ae8f430dacad67e3e06c121
|
||||
v1.22.12: 9410dcff069993caa7dfe783d35ac2d929ec258a2c3a4f0c3f269f1091931263
|
||||
v1.22.11: da3594b4e905627fd5c158531280e40a71dadf44f1f0b6c061a1b729a898dd9b
|
||||
v1.22.10: df5e090a3c0e24b92b26f22f1d7689b6ea860099ea89b97edf5d4c19fa6da0ca
|
||||
@@ -489,10 +593,16 @@ kubeadm_checksums:
|
||||
v1.22.1: 50a5f0d186d7aefae309539e9cc7d530ef1a9b45ce690801655c2bee722d978c
|
||||
v1.22.0: 90a48b92a57ff6aef63ff409e2feda0713ca926b2cd243fe7e88a84c483456cc
|
||||
ppc64le:
|
||||
v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15
|
||||
v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a
|
||||
v1.24.4: 00fe93a291ddca28188056e597fc812b798706ea19b2da6f8aaf688f6ea95c0e
|
||||
v1.24.3: 1cb40441d8982362c6d4ffdd9a980a4563dcc5cccc1bb1d7370f0bd7340484d2
|
||||
v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb
|
||||
v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137
|
||||
v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c
|
||||
v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d
|
||||
v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22
|
||||
v1.23.10: c9f484bd8806f50ce051a28776ef92e3634a1cdc0a47c9483ee77c34cde845c1
|
||||
v1.23.9: 03643613aa6afc6251270adc7681029d4fc10e8a75d553a1d8e63cf5b5a2a8fe
|
||||
v1.23.8: dcfb69f564b34942136cc4cc340b1c800e3e610292e517e68ab5e0157b9510af
|
||||
v1.23.7: 525d43db6d24ac048606cb63ff0f737d87473deff66d4c43ed5ae716ed4fb263
|
||||
@@ -503,6 +613,9 @@ kubeadm_checksums:
|
||||
v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01
|
||||
v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc
|
||||
v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1
|
||||
v1.22.15: 30064634eed97957794e56b10003a43ec806ab07759297e663d93f42aedba592
|
||||
v1.22.14: 0230c40496cde3e40ed141d514869c60b0f2ad60d12a7cff9a963e6934d430b3
|
||||
v1.22.13: 066051f2efb29656a04dbb6a378b813779fedacbf3be7034286b07ad43e364c7
|
||||
v1.22.12: 70c14af98ecaa5d4ac234c827a560df9a020b346af250b6fb8ac9e50943486d3
|
||||
v1.22.11: b2a8d92de208b66e3c2bd03521e26cf84a3977c74242e4f0e6724bdebd861326
|
||||
v1.22.10: f74feaf8ea42145a668111733e8ed55a05d062ca40b0281851c2c48d28b74468
|
||||
@@ -604,13 +717,13 @@ krew_archive_checksums:
|
||||
|
||||
helm_archive_checksums:
|
||||
arm:
|
||||
v3.9.2: fb9f0c1c9475c66c2b3579b908c181d519761bbfae963ffac860bc683a2253de
|
||||
v3.9.4: 18ce0f79dcd927fea5b714ca03299929dad05266192d4cde3de6b4c4d4544249
|
||||
arm64:
|
||||
v3.9.2: e4e2f9aad786042d903534e3131bc5300d245c24bbadf64fc46cca1728051dbc
|
||||
v3.9.4: d24163e466f7884c55079d1050968e80a05b633830047116cdfd8ae28d35b0c0
|
||||
amd64:
|
||||
v3.9.2: 3f5be38068a1829670440ccf00b3b6656fd90d0d9cfd4367539f3b13e4c20531
|
||||
v3.9.4: 31960ff2f76a7379d9bac526ddf889fb79241191f1dbe2a24f7864ddcb3f6560
|
||||
ppc64le:
|
||||
v3.9.2: 85ae9bc357095917cdb2d801b7eb62926f3fed6c2dcf07e1280809ad2af3daa9
|
||||
v3.9.4: c63a951415c192397fda07c2f52aa60639b280920381c48d58be6803eb0c22f9
|
||||
|
||||
cri_dockerd_archive_checksums:
|
||||
arm:
|
||||
@@ -628,21 +741,25 @@ runc_checksums:
|
||||
v1.1.1: 0
|
||||
v1.1.2: 0
|
||||
v1.1.3: 0
|
||||
v1.1.4: 0
|
||||
arm64:
|
||||
v1.1.0: 9ec8e68feabc4e7083a4cfa45ebe4d529467391e0b03ee7de7ddda5770b05e68
|
||||
v1.1.1: 20c436a736547309371c7ac2a335f5fe5a42b450120e497d09c8dc3902c28444
|
||||
v1.1.2: 6ebd968d46d00a3886e9a0cae2e0a7b399e110cf5d7b26e63ce23c1d81ea10ef
|
||||
v1.1.3: 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f
|
||||
v1.1.4: dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223
|
||||
amd64:
|
||||
v1.1.0: ab1c67fbcbdddbe481e48a55cf0ef9a86b38b166b5079e0010737fd87d7454bb
|
||||
v1.1.1: 5798c85d2c8b6942247ab8d6830ef362924cd72a8e236e77430c3ab1be15f080
|
||||
v1.1.2: e0436dfc5d26ca88f00e84cbdab5801dd9829b1e5ded05dcfc162ce5718c32ce
|
||||
v1.1.3: 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01
|
||||
v1.1.4: db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce
|
||||
ppc64le:
|
||||
v1.1.0: 4a6b2f43c0f2371b1948b2eceb906fd8b9d8f5e9f6bab7d21bc037f5b300f43e
|
||||
v1.1.1: 5f14bca6e35177134251dfd3c44bccb81136d9043508e7a37494ad9485f5f0e4
|
||||
v1.1.2: 545ac8165646ed2b157fae677dd6509baf10e370ebe67c23b2f800163fa97150
|
||||
v1.1.3: 3b1b7f953fc8402dec53dcf2de05b6b72d86850737efa9766f8ffefc7cae3c0a
|
||||
v1.1.4: 0f7fb3d2426b6012d9b33c354c778c0ffbce02c329c4c16c1189433a958fd60d
|
||||
|
||||
crun_checksums:
|
||||
arm:
|
||||
@@ -728,13 +845,13 @@ gvisor_containerd_shim_binary_checksums:
|
||||
|
||||
nerdctl_archive_checksums:
|
||||
arm:
|
||||
0.20.0: b179e333a6db5a96110c641926ab554b8a2daec25e0ffd3bb043600553d15ad7
|
||||
0.22.2: 3db76ae74a6fac7aa740550cdb4fad338c0297ae585aa850b638042346f260f5
|
||||
arm64:
|
||||
0.20.0: 9a38c99d23587eb89e7461a6d21fa6c6b1e492241d12f153482b4f99e611249c
|
||||
0.22.2: 15fc3f992b59d6fbadca9c71e0337dab77cdfb08d79c925502449180a13d94a4
|
||||
amd64:
|
||||
0.20.0: e23d50316f9e268ca4a21bd4614a544f53b2cecf352144ceefa038da512bb29a
|
||||
0.22.2: ad40ecf11c689fad594a05a40fef65adb4df8ecd1ffb6711e13cff5382aeaed9
|
||||
ppc64le:
|
||||
0.20.0: 5bf2ee0c9851c8a003102f64dff6dafbd34decb6579e66918e5e761987b7a7a4
|
||||
0.22.2: c2c8d2785f0c4fb169f2f5b07547785ca83a5c249560b3c19c84f1c2adb0ff87
|
||||
|
||||
containerd_archive_checksums:
|
||||
arm:
|
||||
@@ -753,6 +870,8 @@ containerd_archive_checksums:
|
||||
1.6.4: 0
|
||||
1.6.5: 0
|
||||
1.6.6: 0
|
||||
1.6.7: 0
|
||||
1.6.8: 0
|
||||
arm64:
|
||||
1.5.5: 0
|
||||
1.5.7: 0
|
||||
@@ -769,6 +888,8 @@ containerd_archive_checksums:
|
||||
1.6.4: 0205bd1907154388dc85b1afeeb550cbb44c470ef4a290cb1daf91501c85cae6
|
||||
1.6.5: 2833e2f0e8f3cb5044566d64121fdd92bbdfe523e9fe912259e936af280da62a
|
||||
1.6.6: 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb
|
||||
1.6.7: 4167bf688a0ed08b76b3ac264b90aad7d9dd1424ad9c3911e9416b45e37b0be5
|
||||
1.6.8: b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd
|
||||
amd64:
|
||||
1.5.5: 8efc527ffb772a82021800f0151374a3113ed2439922497ff08f2596a70f10f1
|
||||
1.5.7: 109fc95b86382065ea668005c376360ddcd8c4ec413e7abe220ae9f461e0e173
|
||||
@@ -785,6 +906,8 @@ containerd_archive_checksums:
|
||||
1.6.4: f23c8ac914d748f85df94d3e82d11ca89ca9fe19a220ce61b99a05b070044de0
|
||||
1.6.5: cf02a2da998bfcf61727c65ede6f53e89052a68190563a1799a7298b0cea86b4
|
||||
1.6.6: 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef
|
||||
1.6.7: 52e817b712d521b193773529ff33626f47507973040c02474a2db95a37da1c37
|
||||
1.6.8: 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e
|
||||
ppc64le:
|
||||
1.5.5: 0
|
||||
1.5.7: 0
|
||||
@@ -801,6 +924,8 @@ containerd_archive_checksums:
|
||||
1.6.4: 0
|
||||
1.6.5: 0
|
||||
1.6.6: 0
|
||||
1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9
|
||||
1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8
|
||||
|
||||
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}"
|
||||
cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}"
|
||||
@@ -929,12 +1054,12 @@ local_path_provisioner_version: "v0.0.22"
|
||||
local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
|
||||
local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}"
|
||||
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
|
||||
ingress_nginx_controller_image_tag: "v1.3.0"
|
||||
ingress_nginx_controller_image_tag: "v1.3.1"
|
||||
ingress_nginx_kube_webhook_certgen_imae_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen"
|
||||
ingress_nginx_kube_webhook_certgen_imae_tag: "v1.1.1"
|
||||
ingress_nginx_kube_webhook_certgen_imae_tag: "v1.3.0"
|
||||
alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller"
|
||||
alb_ingress_image_tag: "v1.1.9"
|
||||
cert_manager_version: "v1.9.0"
|
||||
cert_manager_version: "v1.9.1"
|
||||
cert_manager_controller_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-controller"
|
||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||
cert_manager_cainjector_image_repo: "{{ quay_image_repo }}/jetstack/cert-manager-cainjector"
|
||||
@@ -993,7 +1118,7 @@ gcp_pd_csi_resizer_image_tag: "v0.4.0-gke.0"
|
||||
gcp_pd_csi_registrar_image_tag: "v1.2.0-gke.0"
|
||||
|
||||
dashboard_image_repo: "{{ docker_image_repo }}/kubernetesui/dashboard-{{ image_arch }}"
|
||||
dashboard_image_tag: "v2.6.0"
|
||||
dashboard_image_tag: "v2.6.1"
|
||||
dashboard_metrics_scraper_repo: "{{ docker_image_repo }}/kubernetesui/metrics-scraper"
|
||||
dashboard_metrics_scraper_tag: "v1.0.8"
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | default(5) }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
loop: "{{ download.mirrors | default([download.url]) }}"
|
||||
loop_control:
|
||||
loop_var: mirror
|
||||
@@ -100,7 +100,7 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | default(5) }}"
|
||||
environment: "{{ proxy_env }}"
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
|
||||
- name: download_file | Copy file back to ansible host file cache
|
||||
synchronize:
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
- name: prep_download | Register docker images info
|
||||
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
register: docker_images
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
@@ -66,7 +66,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
|
||||
|
||||
etcd_blkio_weight: 1000
|
||||
|
||||
etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}"
|
||||
etcd_node_cert_hosts: "{{ groups['k8s_cluster'] }}"
|
||||
|
||||
etcd_compaction_retention: "8"
|
||||
|
||||
@@ -115,3 +115,8 @@ etcd_retries: 4
|
||||
# ETCD 3.5.x issue
|
||||
# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer
|
||||
etcd_experimental_initial_corrupt_check: true
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -33,14 +33,13 @@
|
||||
stat:
|
||||
path: "{{ etcd_cert_dir }}/{{ item }}"
|
||||
register: etcd_node_certs
|
||||
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||
inventory_hostname in groups['k8s_cluster'])
|
||||
when: inventory_hostname in groups['k8s_cluster']
|
||||
with_items:
|
||||
- ca.pem
|
||||
- node-{{ inventory_hostname }}.pem
|
||||
- node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node"
|
||||
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(1/2)"
|
||||
set_fact:
|
||||
gen_certs: true
|
||||
when: force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
|
||||
@@ -56,13 +55,39 @@
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
|
||||
{% endfor %}
|
||||
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %}
|
||||
{% set k8s_nodes = groups['kube_control_plane'] %}
|
||||
{% for host in k8s_nodes %}
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
|
||||
- name: "Check_certs | Set 'gen_certs' to true if expected certificates are not on the first etcd node(2/2)"
|
||||
set_fact:
|
||||
gen_certs: true
|
||||
run_once: true
|
||||
with_items: "{{ expected_files }}"
|
||||
vars:
|
||||
expected_files: >-
|
||||
['{{ etcd_cert_dir }}/ca.pem',
|
||||
{% set etcd_members = groups['etcd'] %}
|
||||
{% for host in etcd_members %}
|
||||
'{{ etcd_cert_dir }}/admin-{{ host }}.pem',
|
||||
'{{ etcd_cert_dir }}/admin-{{ host }}-key.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
|
||||
{% endfor %}
|
||||
{% set k8s_nodes = groups['k8s_cluster']|unique|sort %}
|
||||
{% for host in k8s_nodes %}
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- force_etcd_cert_refresh or not item in etcdcert_master.files|map(attribute='path') | list
|
||||
|
||||
- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
|
||||
set_fact:
|
||||
gen_master_certs: |-
|
||||
@@ -89,7 +114,7 @@
|
||||
set_fact:
|
||||
gen_node_certs: |-
|
||||
{
|
||||
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%}
|
||||
{% set k8s_nodes = groups['k8s_cluster'] -%}
|
||||
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
|
||||
{% for host in k8s_nodes -%}
|
||||
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
|
||||
@@ -125,8 +150,7 @@
|
||||
set_fact:
|
||||
kubernetes_host_requires_sync: true
|
||||
when:
|
||||
- (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||
inventory_hostname in groups['k8s_cluster']) and
|
||||
- inventory_hostname in groups['k8s_cluster'] and
|
||||
inventory_hostname not in groups['etcd']
|
||||
- (not etcd_node_certs.results[0].stat.exists|default(false)) or
|
||||
(not etcd_node_certs.results[1].stat.exists|default(false)) or
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
- gen_certs|default(false)
|
||||
- inventory_hostname == groups['etcd'][0]
|
||||
|
||||
- name: Gen_certs | run cert generation script
|
||||
- name: Gen_certs | run cert generation script for etcd and kube control plane nodes
|
||||
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
|
||||
environment:
|
||||
- MASTERS: "{% for m in groups['etcd'] %}
|
||||
@@ -46,7 +46,7 @@
|
||||
{{ m }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
- HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
|
||||
- HOSTS: "{% for h in groups['kube_control_plane'] %}
|
||||
{% if gen_node_certs[h] %}
|
||||
{{ h }}
|
||||
{% endif %}
|
||||
@@ -56,7 +56,23 @@
|
||||
when: gen_certs|default(false)
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Gather etcd member and admin certs from first etcd node
|
||||
- name: Gen_certs | run cert generation script for all clients
|
||||
command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
|
||||
environment:
|
||||
- HOSTS: "{% for h in groups['k8s_cluster'] %}
|
||||
{% if gen_node_certs[h] %}
|
||||
{{ h }}
|
||||
{% endif %}
|
||||
{% endfor %}"
|
||||
run_once: yes
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- gen_certs|default(false)
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Gather etcd member/admin and kube_control_plane clinet certs from first etcd node
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
register: etcd_master_certs
|
||||
@@ -69,6 +85,10 @@
|
||||
'{{ etcd_cert_dir }}/member-{{ node }}.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
- "[{% for node in (groups['kube_control_plane']) %}
|
||||
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
||||
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']
|
||||
@@ -76,7 +96,7 @@
|
||||
- inventory_hostname != groups['etcd'][0]
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Write etcd member and admin certs to other etcd nodes
|
||||
- name: Gen_certs | Write etcd member/admin and kube_control_plane clinet certs to other etcd nodes
|
||||
copy:
|
||||
dest: "{{ item.item }}"
|
||||
content: "{{ item.content | b64decode }}"
|
||||
@@ -96,7 +116,7 @@
|
||||
src: "{{ item }}"
|
||||
register: etcd_master_node_certs
|
||||
with_items:
|
||||
- "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
|
||||
- "[{% for node in groups['k8s_cluster'] %}
|
||||
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
||||
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
||||
{% endfor %}]"
|
||||
@@ -104,6 +124,8 @@
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']
|
||||
- inventory_hostname != groups['etcd'][0]
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: Gen_certs | Write node certs to other etcd nodes
|
||||
@@ -117,47 +139,21 @@
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']
|
||||
- inventory_hostname != groups['etcd'][0]
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
loop_control:
|
||||
label: "{{ item.item }}"
|
||||
|
||||
- name: Gen_certs | Set cert names per node
|
||||
set_fact:
|
||||
my_etcd_node_certs: [ 'ca.pem',
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem']
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' to true on nodes"
|
||||
set_fact:
|
||||
sync_certs: true
|
||||
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||
inventory_hostname in groups['k8s_cluster']) and
|
||||
inventory_hostname not in groups['etcd']
|
||||
with_items:
|
||||
- "{{ my_etcd_node_certs }}"
|
||||
|
||||
- name: Gen_certs | Gather node certs
|
||||
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
warn: false
|
||||
no_log: true
|
||||
register: etcd_node_certs
|
||||
check_mode: no
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||
inventory_hostname in groups['k8s_cluster']) and
|
||||
- include_tasks: gen_nodes_certs_script.yml
|
||||
when:
|
||||
- inventory_hostname in groups['kube_control_plane'] and
|
||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
no_log: true
|
||||
changed_when: false
|
||||
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||
inventory_hostname in groups['k8s_cluster']) and
|
||||
- include_tasks: gen_nodes_certs_script.yml
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- inventory_hostname in groups['k8s_cluster'] and
|
||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
|
||||
32
roles/etcd/tasks/gen_nodes_certs_script.yml
Normal file
32
roles/etcd/tasks/gen_nodes_certs_script.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Gen_certs | Set cert names per node
|
||||
set_fact:
|
||||
my_etcd_node_certs: [ 'ca.pem',
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem']
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: "Check_certs | Set 'sync_certs' to true on nodes"
|
||||
set_fact:
|
||||
sync_certs: true
|
||||
with_items:
|
||||
- "{{ my_etcd_node_certs }}"
|
||||
|
||||
- name: Gen_certs | Gather node certs
|
||||
shell: "set -o pipefail && tar cfz - -C {{ etcd_cert_dir }} {{ my_etcd_node_certs|join(' ') }} | base64 --wrap=0"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
warn: false
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
register: etcd_node_certs
|
||||
check_mode: no
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Gen_certs | Copy certs on nodes
|
||||
shell: "set -o pipefail && base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
changed_when: false
|
||||
@@ -12,6 +12,16 @@
|
||||
- etcd-secrets
|
||||
|
||||
- include_tasks: upd_ca_trust.yml
|
||||
when:
|
||||
- inventory_hostname in groups['etcd']|union(groups['kube_control_plane'])|unique|sort
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
- include_tasks: upd_ca_trust.yml
|
||||
when:
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
@@ -21,7 +31,9 @@
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
@@ -30,7 +42,9 @@
|
||||
set_fact:
|
||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
||||
when:
|
||||
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
- inventory_hostname in groups['k8s_cluster']
|
||||
tags:
|
||||
- master
|
||||
- network
|
||||
|
||||
@@ -13,6 +13,11 @@ data:
|
||||
{{ block['zones'] | join(' ') }} {
|
||||
log
|
||||
errors
|
||||
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
|
||||
{% for rewrite_match in block['rewrite'] %}
|
||||
rewrite {{ rewrite_match }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
forward . {{ block['nameservers'] | join(' ') }}
|
||||
loadbalance
|
||||
cache {{ block['cache'] | default(5) }}
|
||||
@@ -44,10 +49,12 @@ data:
|
||||
{% if upstream_dns_servers is defined and upstream_dns_servers|length > 0 %}
|
||||
forward . {{ upstream_dns_servers|join(' ') }} {
|
||||
prefer_udp
|
||||
max_concurrent 1000
|
||||
}
|
||||
{% else %}
|
||||
forward . /etc/resolv.conf {
|
||||
prefer_udp
|
||||
max_concurrent 1000
|
||||
}
|
||||
{% endif %}
|
||||
{% if enable_coredns_k8s_external %}
|
||||
|
||||
@@ -14,6 +14,11 @@ data:
|
||||
errors
|
||||
cache {{ block['cache'] | default(30) }}
|
||||
reload
|
||||
{% if block['rewrite'] is defined and block['rewrite']|length > 0 %}
|
||||
{% for rewrite_match in block['rewrite'] %}
|
||||
rewrite {{ rewrite_match }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
loop
|
||||
bind {{ nodelocaldns_ip }}
|
||||
forward . {{ block['nameservers'] | join(' ') }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
argocd_enabled: false
|
||||
argocd_version: v2.4.7
|
||||
argocd_version: v2.4.12
|
||||
argocd_namespace: argocd
|
||||
# argocd_admin_password:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Kubernetes Apps | Install yq
|
||||
become: yes
|
||||
get_url:
|
||||
url: "https://github.com/mikefarah/yq/releases/download/v4.25.3/yq_linux_amd64"
|
||||
url: "https://github.com/mikefarah/yq/releases/download/v4.27.5/yq_linux_{{ host_architecture }}"
|
||||
dest: "{{ bin_dir }}/yq"
|
||||
mode: '0755'
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@
|
||||
- {name: azure-csi-cloud-config-secret, file: azure-csi-cloud-config-secret.yml}
|
||||
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller-rbac.yml}
|
||||
- {name: azure-csi-azuredisk-controller, file: azure-csi-azuredisk-controller.yml}
|
||||
- {name: azure-csi-azuredisk-node-rbac, file: azure-csi-azuredisk-node-rbac.yml}
|
||||
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
|
||||
- {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
|
||||
register: azure_csi_manifests
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
|
||||
@@ -165,9 +165,6 @@ spec:
|
||||
- mountPath: /etc/kubernetes/
|
||||
name: azure-cred
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/waagent/ManagedIdentity-Settings
|
||||
readOnly: true
|
||||
name: msi
|
||||
resources:
|
||||
limits:
|
||||
memory: 500Mi
|
||||
|
||||
@@ -150,9 +150,9 @@ spec:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
type: DirectoryOrCreate
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/
|
||||
type: DirectoryOrCreate
|
||||
- secret:
|
||||
defaultMode: 0644
|
||||
secretName: cloud-config
|
||||
name: azure-cred
|
||||
- hostPath:
|
||||
path: /dev
|
||||
|
||||
@@ -3,8 +3,14 @@ upcloud_csi_controller_replicas: 1
|
||||
upcloud_csi_provisioner_image_tag: "v3.1.0"
|
||||
upcloud_csi_attacher_image_tag: "v3.4.0"
|
||||
upcloud_csi_resizer_image_tag: "v1.4.0"
|
||||
upcloud_csi_plugin_image_tag: "v0.2.1"
|
||||
upcloud_csi_plugin_image_tag: "v0.3.3"
|
||||
upcloud_csi_node_image_tag: "v2.5.0"
|
||||
upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}"
|
||||
upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}"
|
||||
upcloud_tolerations: []
|
||||
upcloud_tolerations: []
|
||||
upcloud_csi_enable_volume_snapshot: false
|
||||
upcloud_csi_snapshot_controller_replicas: 2
|
||||
upcloud_csi_snapshotter_image_tag: "v4.2.1"
|
||||
upcloud_csi_snapshot_controller_image_tag: "v4.2.1"
|
||||
upcloud_csi_snapshot_validation_webhook_image_tag: "v4.2.1"
|
||||
upcloud_cacert: "{{ lookup('env','OS_CACERT') }}"
|
||||
@@ -37,4 +37,4 @@
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
- not item is skipped
|
||||
loop_control:
|
||||
label: "{{ item.item.file }}"
|
||||
label: "{{ item.item.file }}"
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
- "--timeout=60s"
|
||||
- "--timeout=600s"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
@@ -36,7 +36,7 @@ spec:
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=30s"
|
||||
- "--timeout=120s"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
@@ -48,7 +48,7 @@ spec:
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:{{ upcloud_csi_resizer_image_tag }}
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--timeout=45s"
|
||||
- "--timeout=120s"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--handle-volume-inuse-error=true"
|
||||
env:
|
||||
@@ -68,8 +68,6 @@ spec:
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: UPCLOUD_API_URL
|
||||
value: https://api.upcloud.com/
|
||||
- name: UPCLOUD_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@@ -92,4 +90,4 @@ spec:
|
||||
- name: regcred
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: { }
|
||||
emptyDir: {}
|
||||
@@ -23,15 +23,6 @@ spec:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
[
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"rm -rf /registration/storage.csi.upcloud.com /registration/storage.csi.upcloud.com-reg.sock",
|
||||
]
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
@@ -56,8 +47,6 @@ spec:
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: UPCLOUD_API_URL
|
||||
value: https://api.upcloud.com/
|
||||
- name: UPCLOUD_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@@ -76,7 +65,7 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: [ "SYS_ADMIN" ]
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
|
||||
@@ -5,6 +5,40 @@ metadata:
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-upcloud-node-sa
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-node-driver-registrar-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-node-driver-registrar-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-upcloud-node-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-upcloud-node-driver-registrar-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@@ -28,12 +62,6 @@ rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "list", "watch", "create", "update", "patch" ]
|
||||
- apiGroups: [ "snapshot.storage.k8s.io" ]
|
||||
resources: [ "volumesnapshots" ]
|
||||
verbs: [ "get", "list" ]
|
||||
- apiGroups: [ "snapshot.storage.k8s.io" ]
|
||||
resources: [ "volumesnapshotcontents" ]
|
||||
verbs: [ "get", "list" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "nodes" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
@@ -90,87 +118,39 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
# Provisioner must be able to work with endpoints and leases in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-snapshotter-role
|
||||
namespace: kube-system
|
||||
name: csi-upcloud-provisioner-cfg-role
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "list", "watch", "create", "update", "patch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "secrets" ]
|
||||
verbs: [ "get", "list" ]
|
||||
- apiGroups: [ "snapshot.storage.k8s.io" ]
|
||||
resources: [ "volumesnapshotclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "snapshot.storage.k8s.io" ]
|
||||
resources: [ "volumesnapshotcontents" ]
|
||||
verbs: [ "create", "get", "list", "watch", "update", "delete" ]
|
||||
- apiGroups: [ "snapshot.storage.k8s.io" ]
|
||||
resources: [ "volumesnapshots" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "apiextensions.k8s.io" ]
|
||||
resources: [ "customresourcedefinitions" ]
|
||||
verbs: [ "create", "list", "watch", "delete" ]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-snapshotter-binding
|
||||
name: csi-provisioner-role-cfg-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-upcloud-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-upcloud-snapshotter-role
|
||||
kind: Role
|
||||
name: csi-upcloud-provisioner-cfg-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-upcloud-node-sa
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-node-driver-registrar-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-node-driver-registrar-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-upcloud-node-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-upcloud-node-driver-registrar-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Resizer must be able to work with PVCs, PVs, SCs.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-upcloud-resizer-role
|
||||
rules:
|
||||
|
||||
@@ -14,6 +14,9 @@ vsphere_csi_node_driver_registrar_image_tag: "v2.5.0"
|
||||
vsphere_csi_driver_image_tag: "v2.5.1"
|
||||
vsphere_csi_resizer_tag: "v1.4.0"
|
||||
|
||||
# Set to kube-system for backward compatibility, should be change to vmware-system-csi on the long run
|
||||
vsphere_csi_namespace: "kube-system"
|
||||
|
||||
vsphere_csi_controller_replicas: 1
|
||||
|
||||
csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}'
|
||||
@@ -21,3 +24,10 @@ csi_endpoint: '{% if external_vsphere_version >= "7.0u1" %}/csi{% else %}/var/li
|
||||
vsphere_csi_aggressive_node_drain: False
|
||||
vsphere_csi_aggressive_node_unreachable_timeout: 300
|
||||
vsphere_csi_aggressive_node_not_ready_timeout: 300
|
||||
|
||||
vsphere_csi_node_affinity: {}
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- vsphere-csi-namespace.yml
|
||||
- vsphere-csi-driver.yml
|
||||
- vsphere-csi-controller-rbac.yml
|
||||
- vsphere-csi-node-rbac.yml
|
||||
@@ -27,17 +28,17 @@
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
||||
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
||||
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml"
|
||||
register: vsphere_csi_secret_manifest
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
|
||||
- name: vSphere CSI Driver | Apply a CSI secret manifest
|
||||
command:
|
||||
cmd: "{{ kubectl }} apply -f -"
|
||||
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
no_log: true
|
||||
no_log: "{{ not (unsafe_show_logs|bool) }}"
|
||||
|
||||
- name: vSphere CSI Driver | Apply Manifests
|
||||
kube:
|
||||
|
||||
@@ -21,4 +21,4 @@ data:
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: internal-feature-states.csi.vsphere.vmware.com
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
|
||||
@@ -2,7 +2,7 @@ kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: vsphere-csi-controller
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
spec:
|
||||
replicas: {{ vsphere_csi_controller_replicas }}
|
||||
strategy:
|
||||
@@ -90,8 +90,8 @@ spec:
|
||||
image: {{ gcr_image_repo }}/cloud-provider-vsphere/csi/release/driver:{{ vsphere_csi_controller }}
|
||||
args:
|
||||
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
|
||||
- "--fss-namespace=kube-system"
|
||||
- "--supervisor-fss-namespace=kube-system"
|
||||
- "--fss-namespace={{ vsphere_csi_namespace }}"
|
||||
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
|
||||
- "--use-gocsi=false"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
env:
|
||||
@@ -150,8 +150,8 @@ spec:
|
||||
args:
|
||||
- "--leader-election"
|
||||
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
|
||||
- "--fss-namespace=kube-system"
|
||||
- "--supervisor-fss-namespace=kube-system"
|
||||
- "--fss-namespace={{ vsphere_csi_namespace }}"
|
||||
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
ports:
|
||||
- containerPort: 2113
|
||||
|
||||
@@ -2,7 +2,7 @@ kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vsphere-csi-controller
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -79,7 +79,7 @@ metadata:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vsphere-csi-controller
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: vsphere-csi-controller-role
|
||||
|
||||
@@ -2,7 +2,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: vsphere-csi-controller
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
labels:
|
||||
app: vsphere-csi-controller
|
||||
spec:
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "{{ vsphere_csi_namespace }}"
|
||||
@@ -3,7 +3,7 @@ kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: vsphere-csi-node
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -24,7 +24,7 @@ metadata:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vsphere-csi-node
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: vsphere-csi-node-cluster-role
|
||||
@@ -34,7 +34,7 @@ kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: vsphere-csi-node-role
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
@@ -44,11 +44,11 @@ kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: vsphere-csi-node-binding
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vsphere-csi-node
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: vsphere-csi-node-role
|
||||
|
||||
@@ -2,7 +2,7 @@ kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: vsphere-csi-node
|
||||
namespace: kube-system
|
||||
namespace: "{{ vsphere_csi_namespace }}"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -19,6 +19,10 @@ spec:
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
{% if vsphere_csi_node_affinity %}
|
||||
affinity:
|
||||
{{ vsphere_csi_node_affinity | to_nice_yaml | indent(width=8) }}
|
||||
{% endif %}
|
||||
serviceAccountName: vsphere-csi-node
|
||||
hostNetwork: true
|
||||
dnsPolicy: "ClusterFirstWithHostNet"
|
||||
@@ -57,8 +61,8 @@ spec:
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
args:
|
||||
- "--fss-name=internal-feature-states.csi.vsphere.vmware.com"
|
||||
- "--fss-namespace=kube-system"
|
||||
- "--supervisor-fss-namespace=kube-system"
|
||||
- "--fss-namespace={{ vsphere_csi_namespace }}"
|
||||
- "--supervisor-fss-namespace={{ vsphere_csi_namespace }}"
|
||||
- "--use-gocsi=false"
|
||||
imagePullPolicy: "Always"
|
||||
env:
|
||||
|
||||
@@ -108,7 +108,6 @@ spec:
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user