mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-11 06:24:44 -03:30
Compare commits
151 Commits
component_
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6965d8ded9 | ||
|
|
8bd5045ecf | ||
|
|
8f73dc9c2f | ||
|
|
cc05dd4d14 | ||
|
|
9582ab3dcd | ||
|
|
a77221d12b | ||
|
|
57364f4085 | ||
|
|
34f43d21e3 | ||
|
|
052846aa28 | ||
|
|
a563431c68 | ||
|
|
3aa0c0cc64 | ||
|
|
9bbef44e32 | ||
|
|
03cfdbf2a9 | ||
|
|
b5b599ecf8 | ||
|
|
4245ddcee8 | ||
|
|
422e7366ec | ||
|
|
bf69e67240 | ||
|
|
c5c2cf16a0 | ||
|
|
69e042bd9e | ||
|
|
20da3bb1b0 | ||
|
|
4d4058ee8e | ||
|
|
f071fccc33 | ||
|
|
70daea701a | ||
|
|
3e42b84e94 | ||
|
|
868ff3cea9 | ||
|
|
0b69a18e35 | ||
|
|
e30076016c | ||
|
|
f4ccdb5e72 | ||
|
|
fcecaf6943 | ||
|
|
37f7a86014 | ||
|
|
fff7f10a85 | ||
|
|
dc09298f7e | ||
|
|
680db0c921 | ||
|
|
9977d4dc10 | ||
|
|
1b6129566b | ||
|
|
c3404c3685 | ||
|
|
fba8708486 | ||
|
|
8dacb9cd16 | ||
|
|
df3f0a2341 | ||
|
|
62e90b3122 | ||
|
|
6b5cc5bdfb | ||
|
|
a277cfdee7 | ||
|
|
bc5528f585 | ||
|
|
2740c13c0c | ||
|
|
52b68bccad | ||
|
|
82c4c0afdf | ||
|
|
63a43cf6db | ||
|
|
666a3a9500 | ||
|
|
28f9c126bf | ||
|
|
d41b629be3 | ||
|
|
851abbc2e3 | ||
|
|
17c72367bc | ||
|
|
d91c7d7576 | ||
|
|
14b20ad2a2 | ||
|
|
72cb1356ef | ||
|
|
51304d57e2 | ||
|
|
a0d7bef90e | ||
|
|
a1ec88e290 | ||
|
|
c9ff62944e | ||
|
|
20ab9179af | ||
|
|
5be35c811a | ||
|
|
ad522d4aab | ||
|
|
9c511069cc | ||
|
|
ed270fcab4 | ||
|
|
0615929727 | ||
|
|
48c25d9ebf | ||
|
|
0bffcacbe7 | ||
|
|
c857252225 | ||
|
|
a0f00761ac | ||
|
|
3a3e5d6954 | ||
|
|
2d6e508084 | ||
|
|
6d850a0dc5 | ||
|
|
6a517e165e | ||
|
|
aaaf82f308 | ||
|
|
e80087df93 | ||
|
|
b7491b957b | ||
|
|
5cf8f3eefc | ||
|
|
1cbccf40a5 | ||
|
|
bcdd702e19 | ||
|
|
20693afe82 | ||
|
|
1bbcfd8dd6 | ||
|
|
8d948f918f | ||
|
|
4d8d1b8aff | ||
|
|
d80318301d | ||
|
|
31cce09fbc | ||
|
|
9a90c9d6c8 | ||
|
|
b9e1e8577f | ||
|
|
5d1dd83b07 | ||
|
|
b203586d6b | ||
|
|
88df61357b | ||
|
|
2edf176294 | ||
|
|
39744146b4 | ||
|
|
118b2dce02 | ||
|
|
4c5eda9f1e | ||
|
|
2512e0c50c | ||
|
|
633d39448e | ||
|
|
4d87ac1032 | ||
|
|
2342d0cd57 | ||
|
|
e6a5266bad | ||
|
|
57f7c44718 | ||
|
|
5789dc839c | ||
|
|
3de6fa7220 | ||
|
|
9a9e8814e6 | ||
|
|
87a4f61d76 | ||
|
|
9975b5d525 | ||
|
|
9d06ce1a8d | ||
|
|
bce107ce3d | ||
|
|
7d7a42d931 | ||
|
|
5183679a89 | ||
|
|
b4fe577203 | ||
|
|
bde51ebddf | ||
|
|
381426d6d5 | ||
|
|
b3ee6d6b75 | ||
|
|
7436d63faa | ||
|
|
6138c6a1a2 | ||
|
|
6115eba3c3 | ||
|
|
1c008d79b1 | ||
|
|
b4bbec6772 | ||
|
|
5c6ee4852a | ||
|
|
8190f952c1 | ||
|
|
3edc3d7a36 | ||
|
|
2f3f1d7e65 | ||
|
|
71c69ec12c | ||
|
|
dab0947150 | ||
|
|
5488e7d805 | ||
|
|
ca9873cfcb | ||
|
|
65f33c3ef0 | ||
|
|
5eccf9ea6c | ||
|
|
db599b3475 | ||
|
|
47140083dc | ||
|
|
2d179879a0 | ||
|
|
61b8e4ce84 | ||
|
|
97a3776d8e | ||
|
|
990695de7b | ||
|
|
4059c699dc | ||
|
|
e22ce15429 | ||
|
|
452d4e63e0 | ||
|
|
d2a46b4ff8 | ||
|
|
e090c9ee26 | ||
|
|
0d6d3f5828 | ||
|
|
b9662dbd86 | ||
|
|
f5a480fdc4 | ||
|
|
5dce75d29b | ||
|
|
5acde6cfe2 | ||
|
|
c6926eb2f9 | ||
|
|
1930ab7ed6 | ||
|
|
3edc979384 | ||
|
|
cde7b2b022 | ||
|
|
0d88532f3d | ||
|
|
1fb14b7463 | ||
|
|
a66d00a535 |
@@ -33,6 +33,8 @@ skip_list:
|
|||||||
# Disable run-once check with free strategy
|
# Disable run-once check with free strategy
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
- 'run-once[task]'
|
- 'run-once[task]'
|
||||||
|
|
||||||
|
- 'jinja[spacing]'
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# Generated files
|
# Generated files
|
||||||
- tests/files/custom_cni/cilium.yaml
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
|||||||
6
.github/workflows/auto-label-os.yml
vendored
6
.github/workflows/auto-label-os.yml
vendored
@@ -13,16 +13,16 @@ jobs:
|
|||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||||
|
|
||||||
- name: Parse issue form
|
- name: Parse issue form
|
||||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
uses: stefanbuck/github-issue-parser@10dcc54158ba4c137713d9d69d70a2da63b6bda3
|
||||||
id: issue-parser
|
id: issue-parser
|
||||||
with:
|
with:
|
||||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||||
|
|
||||||
- name: Set labels based on OS field
|
- name: Set labels based on OS field
|
||||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@e38e6809c5420d038eed380d49ee9a6ca7c92dbf
|
uses: redhat-plumbers-in-action/advanced-issue-labeler@b80ae64e3e156e9c111b075bfa04b295d54e8e2e
|
||||||
with:
|
with:
|
||||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||||
section: os
|
section: os
|
||||||
|
|||||||
@@ -13,14 +13,14 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
branches: ${{ steps.get-branches.outputs.data }}
|
branches: ${{ steps.get-branches.outputs.data }}
|
||||||
steps:
|
steps:
|
||||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
- uses: octokit/graphql-action@ddde8ebb2493e79f390e6449c725c21663a67505
|
||||||
id: get-branches
|
id: get-branches
|
||||||
with:
|
with:
|
||||||
query: |
|
query: |
|
||||||
query get_release_branches($owner:String!, $name:String!) {
|
query get_release_branches($owner:String!, $name:String!) {
|
||||||
repository(owner:$owner, name:$name) {
|
repository(owner:$owner, name:$name) {
|
||||||
refs(refPrefix: "refs/heads/",
|
refs(refPrefix: "refs/heads/",
|
||||||
first: 1, # TODO increment once we have release branch with the new checksums format
|
first: 3,
|
||||||
query: "release-",
|
query: "release-",
|
||||||
orderBy: {
|
orderBy: {
|
||||||
field: ALPHABETICAL,
|
field: ALPHABETICAL,
|
||||||
|
|||||||
6
.github/workflows/upgrade-patch-versions.yml
vendored
6
.github/workflows/upgrade-patch-versions.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
update-patch-versions:
|
update-patch-versions:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||||
with:
|
with:
|
||||||
ref: ${{ inputs.branch }}
|
ref: ${{ inputs.branch }}
|
||||||
- uses: actions/setup-python@v6
|
- uses: actions/setup-python@v6
|
||||||
@@ -22,14 +22,14 @@ jobs:
|
|||||||
- run: update-hashes
|
- run: update-hashes
|
||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions/cache@v4
|
- uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
key: pre-commit-hook-propagate
|
key: pre-commit-hook-propagate
|
||||||
path: |
|
path: |
|
||||||
~/.cache/pre-commit
|
~/.cache/pre-commit
|
||||||
- run: pre-commit run --all-files propagate-ansible-variables
|
- run: pre-commit run --all-files propagate-ansible-variables
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
- uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
|
||||||
with:
|
with:
|
||||||
commit-message: Patch versions updates
|
commit-message: Patch versions updates
|
||||||
title: Patch versions updates - ${{ inputs.branch }}
|
title: Patch versions updates - ${{ inputs.branch }}
|
||||||
|
|||||||
@@ -41,8 +41,10 @@ pr:
|
|||||||
- debian12-cilium
|
- debian12-cilium
|
||||||
- debian13-cilium
|
- debian13-cilium
|
||||||
- fedora39-kube-router
|
- fedora39-kube-router
|
||||||
|
- fedora41-kube-router
|
||||||
- openeuler24-calico
|
- openeuler24-calico
|
||||||
- rockylinux9-cilium
|
- rockylinux9-cilium
|
||||||
|
- rockylinux10-cilium
|
||||||
- ubuntu22-calico-all-in-one
|
- ubuntu22-calico-all-in-one
|
||||||
- ubuntu22-calico-all-in-one-upgrade
|
- ubuntu22-calico-all-in-one-upgrade
|
||||||
- ubuntu24-calico-etcd-datastore
|
- ubuntu24-calico-etcd-datastore
|
||||||
@@ -90,6 +92,8 @@ pr_full:
|
|||||||
- debian12-custom-cni-helm
|
- debian12-custom-cni-helm
|
||||||
- fedora39-calico-swap-selinux
|
- fedora39-calico-swap-selinux
|
||||||
- fedora39-crio
|
- fedora39-crio
|
||||||
|
- fedora41-calico-swap-selinux
|
||||||
|
- fedora41-crio
|
||||||
- ubuntu24-calico-ha-wireguard
|
- ubuntu24-calico-ha-wireguard
|
||||||
- ubuntu24-flannel-ha
|
- ubuntu24-flannel-ha
|
||||||
- ubuntu24-flannel-ha-once
|
- ubuntu24-flannel-ha-once
|
||||||
@@ -127,6 +131,7 @@ pr_extended:
|
|||||||
- debian12-docker
|
- debian12-docker
|
||||||
- debian13-calico
|
- debian13-calico
|
||||||
- rockylinux9-calico
|
- rockylinux9-calico
|
||||||
|
- rockylinux10-calico
|
||||||
- ubuntu22-all-in-one-docker
|
- ubuntu22-all-in-one-docker
|
||||||
- ubuntu24-all-in-one-docker
|
- ubuntu24-all-in-one-docker
|
||||||
- ubuntu24-calico-all-in-one
|
- ubuntu24-calico-all-in-one
|
||||||
@@ -148,6 +153,7 @@ periodic:
|
|||||||
- debian12-cilium-svc-proxy
|
- debian12-cilium-svc-proxy
|
||||||
- fedora39-calico-selinux
|
- fedora39-calico-selinux
|
||||||
- fedora40-docker-calico
|
- fedora40-docker-calico
|
||||||
|
- fedora41-calico-selinux
|
||||||
- ubuntu24-calico-etcd-kubeadm-upgrade-ha
|
- ubuntu24-calico-etcd-kubeadm-upgrade-ha
|
||||||
- ubuntu24-calico-ha-recover
|
- ubuntu24-calico-ha-recover
|
||||||
- ubuntu24-calico-ha-recover-noquorum
|
- ubuntu24-calico-ha-recover-noquorum
|
||||||
|
|||||||
@@ -88,11 +88,10 @@ tf-elastx_cleanup:
|
|||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
tf-elastx_ubuntu20-calico:
|
tf-elastx_ubuntu24-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
when: on_success
|
when: on_success
|
||||||
allow_failure: true
|
|
||||||
variables:
|
variables:
|
||||||
<<: *elastx_variables
|
<<: *elastx_variables
|
||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
@@ -115,5 +114,5 @@ tf-elastx_ubuntu20-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-20.04-server-latest
|
TF_VAR_image: ubuntu-24.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ vagrant:
|
|||||||
policy: pull-push # TODO: change to "pull" when not on main
|
policy: pull-push # TODO: change to "pull" when not on main
|
||||||
stage: deploy-extended
|
stage: deploy-extended
|
||||||
rules:
|
rules:
|
||||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||||
when: on_success
|
when: on_success
|
||||||
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|||||||
10
Dockerfile
10
Dockerfile
@@ -1,7 +1,7 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
@@ -29,14 +29,14 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|||||||
|
|
||||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||||
&& curl -L "https://dl.k8s.io/release/v1.33.8/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
&& curl -L "https://dl.k8s.io/release/v1.35.0/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.33.8/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
&& echo "$(curl -L "https://dl.k8s.io/release/v1.35.0/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||||
&& chmod a+x /usr/local/bin/kubectl
|
&& chmod a+x /usr/local/bin/kubectl
|
||||||
|
|
||||||
COPY *.yml ./
|
COPY *.yml ./
|
||||||
|
|||||||
27
README.md
27
README.md
@@ -22,7 +22,7 @@ Ensure you have installed Docker then
|
|||||||
```ShellSession
|
```ShellSession
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
@@ -89,13 +89,13 @@ vagrant up
|
|||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bookworm, Bullseye, Trixie
|
- **Debian** Bookworm, Bullseye, Trixie
|
||||||
- **Ubuntu** 22.04, 24.04
|
- **Ubuntu** 22.04, 24.04
|
||||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **CentOS Stream / RHEL** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Fedora** 39, 40
|
- **Fedora** 39, 40, 41
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Oracle Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Alma Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Rocky Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8) (experimental in 10: see [Rocky Linux 10 notes](docs/operating_systems/rhel.md#rocky-linux-10))
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||||
@@ -111,24 +111,23 @@ Note:
|
|||||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.33.8
|
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.35.0
|
||||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.26
|
- [etcd](https://github.com/etcd-io/etcd) 3.5.26
|
||||||
- [docker](https://www.docker.com/) 28.3
|
- [docker](https://www.docker.com/) 28.3
|
||||||
- [containerd](https://containerd.io/) 2.1.6
|
- [containerd](https://containerd.io/) 2.2.1
|
||||||
- [cri-o](http://cri-o.io/) 1.33.9 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) 1.35.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||||
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||||
- [cilium](https://github.com/cilium/cilium) 1.18.5
|
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||||
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
- [kube-vip](https://github.com/kube-vip/kube-vip) 1.0.3
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||||
- [coredns](https://github.com/coredns/coredns) 1.12.0
|
- [coredns](https://github.com/coredns/coredns) 1.12.4
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.13.3
|
|
||||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||||
- [helm](https://helm.sh/) 3.18.4
|
- [helm](https://helm.sh/) 3.18.4
|
||||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||||
@@ -202,8 +201,6 @@ See also [Network checker](docs/advanced/netcheck.md).
|
|||||||
|
|
||||||
## Ingress Plugins
|
## Ingress Plugins
|
||||||
|
|
||||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
|
||||||
|
|
||||||
- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||||
|
|
||||||
## Community docs and resources
|
## Community docs and resources
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||||||
1. The release issue is closed
|
1. The release issue is closed
|
||||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
1. Create/Update Issue for upgrading kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||||
|
|
||||||
## Major/minor releases and milestones
|
## Major/minor releases and milestones
|
||||||
|
|
||||||
|
|||||||
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@@ -35,6 +35,8 @@ SUPPORTED_OS = {
|
|||||||
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
||||||
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
||||||
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
||||||
|
"fedora41" => {box: "fedora/41-cloud-base", user: "vagrant"},
|
||||||
|
"fedora41-bento" => {box: "bento/fedora-41", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
|
|||||||
9
contrib/collection.sh
Executable file
9
contrib/collection.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -eux
|
||||||
|
# Install collection from source assuming dependencies are present.
|
||||||
|
# Run in SemaphoreUI this bash script can install Kubespray from the repo
|
||||||
|
NAMESPACE=kubernetes_sigs
|
||||||
|
COLLECTION=kubespray
|
||||||
|
MY_VER=$(grep '^version:' galaxy.yml|cut -d: -f2|sed 's/ //')
|
||||||
|
|
||||||
|
ansible-galaxy collection build --force --output-path .
|
||||||
|
ansible-galaxy collection install --offline --force $NAMESPACE-$COLLECTION-$MY_VER.tar.gz
|
||||||
@@ -20,7 +20,6 @@ function create_container_image_tar() {
|
|||||||
|
|
||||||
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
||||||
# NOTE: etcd and pause cannot be seen as pods.
|
# NOTE: etcd and pause cannot be seen as pods.
|
||||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
|
||||||
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
||||||
else
|
else
|
||||||
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
||||||
|
|||||||
@@ -281,9 +281,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default |
|
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}, { "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default |
|
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, `[{ "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||||
|
|||||||
@@ -1006,7 +1006,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||||
count = var.number_of_gfs_nodes_no_floating_ip
|
count = var.number_of_gfs_nodes_no_floating_ip
|
||||||
availability_zone = element(var.az_list, count.index)
|
availability_zone = element(var.az_list, count.index)
|
||||||
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
image_id = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
||||||
flavor_id = var.flavor_gfs_node
|
flavor_id = var.flavor_gfs_node
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
|
|
||||||
@@ -1078,7 +1078,7 @@ resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
|
|||||||
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
resource "openstack_blockstorage_volume_v3" "glusterfs_volume" {
|
||||||
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
||||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||||
description = "Non-ephemeral volume for GlusterFS"
|
description = "Non-ephemeral volume for GlusterFS"
|
||||||
@@ -1088,5 +1088,5 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
|||||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||||
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
|
||||||
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
|
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
|
||||||
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
|
volume_id = element(openstack_blockstorage_volume_v3.glusterfs_volume.*.id, count.index)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -271,7 +271,14 @@ variable "master_allowed_ports" {
|
|||||||
variable "master_allowed_ports_ipv6" {
|
variable "master_allowed_ports_ipv6" {
|
||||||
type = list(any)
|
type = list(any)
|
||||||
|
|
||||||
default = []
|
default = [
|
||||||
|
{
|
||||||
|
"protocol" = "ipv6-icmp"
|
||||||
|
"port_range_min" = 0
|
||||||
|
"port_range_max" = 0
|
||||||
|
"remote_ip_prefix" = "::/0"
|
||||||
|
},
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_allowed_ports" {
|
variable "worker_allowed_ports" {
|
||||||
@@ -297,6 +304,12 @@ variable "worker_allowed_ports_ipv6" {
|
|||||||
"port_range_max" = 32767
|
"port_range_max" = 32767
|
||||||
"remote_ip_prefix" = "::/0"
|
"remote_ip_prefix" = "::/0"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"protocol" = "ipv6-icmp"
|
||||||
|
"port_range_min" = 0
|
||||||
|
"port_range_max" = 0
|
||||||
|
"remote_ip_prefix" = "::/0"
|
||||||
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>5.9.0"
|
version = "~>5.29.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>5.9.0"
|
version = "~>5.29.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -1,5 +1,13 @@
|
|||||||
# Cilium
|
# Cilium
|
||||||
|
|
||||||
|
## Unprivileged agent configuration
|
||||||
|
|
||||||
|
By default, Cilium is installed with `securityContext.privileged: false`. You need to set the `kube_owner` variable to `root` in the inventory:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
kube_owner: root
|
||||||
|
```
|
||||||
|
|
||||||
## IP Address Management (IPAM)
|
## IP Address Management (IPAM)
|
||||||
|
|
||||||
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
||||||
@@ -237,7 +245,7 @@ cilium_operator_extra_volume_mounts:
|
|||||||
## Choose Cilium version
|
## Choose Cilium version
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
cilium_version: "1.18.5"
|
cilium_version: "1.18.6"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add variable to config
|
## Add variable to config
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
|||||||
|
|
||||||
* Disable nodelocaldns
|
* Disable nodelocaldns
|
||||||
|
|
||||||
The nodelocal dns IP is not reacheable.
|
The nodelocal dns IP is not reachable.
|
||||||
|
|
||||||
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
|
|||||||
@@ -65,9 +65,8 @@ In kubespray, the default runtime name is "runc", and it can be configured with
|
|||||||
containerd_runc_runtime:
|
containerd_runc_runtime:
|
||||||
name: runc
|
name: runc
|
||||||
type: "io.containerd.runc.v2"
|
type: "io.containerd.runc.v2"
|
||||||
engine: ""
|
|
||||||
root: ""
|
|
||||||
options:
|
options:
|
||||||
|
Root: ""
|
||||||
SystemdCgroup: "false"
|
SystemdCgroup: "false"
|
||||||
BinaryName: /usr/local/bin/my-runc
|
BinaryName: /usr/local/bin/my-runc
|
||||||
base_runtime_spec: cri-base.json
|
base_runtime_spec: cri-base.json
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to
|
|||||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||||
|
|
||||||
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
||||||
Defaults capabilties are:
|
Defaults capabilities are:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
crio_default_capabilities:
|
crio_default_capabilities:
|
||||||
|
|||||||
2
docs/_sidebar.md
generated
2
docs/_sidebar.md
generated
@@ -6,7 +6,6 @@
|
|||||||
* [Downloads](/docs/advanced/downloads.md)
|
* [Downloads](/docs/advanced/downloads.md)
|
||||||
* [Gcp-lb](/docs/advanced/gcp-lb.md)
|
* [Gcp-lb](/docs/advanced/gcp-lb.md)
|
||||||
* [Kubernetes-reliability](/docs/advanced/kubernetes-reliability.md)
|
* [Kubernetes-reliability](/docs/advanced/kubernetes-reliability.md)
|
||||||
* [Mitogen](/docs/advanced/mitogen.md)
|
|
||||||
* [Netcheck](/docs/advanced/netcheck.md)
|
* [Netcheck](/docs/advanced/netcheck.md)
|
||||||
* [Ntp](/docs/advanced/ntp.md)
|
* [Ntp](/docs/advanced/ntp.md)
|
||||||
* [Proxy](/docs/advanced/proxy.md)
|
* [Proxy](/docs/advanced/proxy.md)
|
||||||
@@ -58,7 +57,6 @@
|
|||||||
* [Setting-up-your-first-cluster](/docs/getting_started/setting-up-your-first-cluster.md)
|
* [Setting-up-your-first-cluster](/docs/getting_started/setting-up-your-first-cluster.md)
|
||||||
* Ingress
|
* Ingress
|
||||||
* [Alb Ingress Controller](/docs/ingress/alb_ingress_controller.md)
|
* [Alb Ingress Controller](/docs/ingress/alb_ingress_controller.md)
|
||||||
* [Ingress Nginx](/docs/ingress/ingress_nginx.md)
|
|
||||||
* [Kube-vip](/docs/ingress/kube-vip.md)
|
* [Kube-vip](/docs/ingress/kube-vip.md)
|
||||||
* [Metallb](/docs/ingress/metallb.md)
|
* [Metallb](/docs/ingress/metallb.md)
|
||||||
* Operating Systems
|
* Operating Systems
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key)
|
- [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key)
|
||||||
- [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit)
|
- [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit)
|
||||||
- [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file)
|
- [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file)
|
||||||
- [Create Certficate Signing Request (CSR) Configuration File](#create-certficate-signing-request-csr-configuration-file)
|
- [Create Certificate Signing Request (CSR) Configuration File](#create-certificate-signing-request-csr-configuration-file)
|
||||||
- [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key)
|
- [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key)
|
||||||
|
|
||||||
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
||||||
@@ -30,14 +30,7 @@ If you don't have a TLS Root CA certificate and key available, you can create th
|
|||||||
|
|
||||||
A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this.
|
A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this.
|
||||||
|
|
||||||
To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and set `ingress_nginx_enabled` to true.
|
For example, if you're using the Traefik ingress controller, you can secure the Prometheus ingress by adding the annotation `cert-manager.io/cluster-issuer: ca-issuer` and the `spec.tls` section to the `Ingress` resource definition.
|
||||||
|
|
||||||
```ini
|
|
||||||
# Nginx ingress controller deployment
|
|
||||||
ingress_nginx_enabled: true
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, if you're using the Nginx ingress controller, you can secure the Prometheus ingress by adding the annotation `cert-manager.io/cluster-issuer: ca-issuer` and the `spec.tls` section to the `Ingress` resource definition.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
@@ -48,9 +41,9 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
prometheus: k8s
|
prometheus: k8s
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: "nginx"
|
|
||||||
cert-manager.io/cluster-issuer: ca-issuer
|
cert-manager.io/cluster-issuer: ca-issuer
|
||||||
spec:
|
spec:
|
||||||
|
ingressClassName: "traefik"
|
||||||
tls:
|
tls:
|
||||||
- hosts:
|
- hosts:
|
||||||
- prometheus.example.com
|
- prometheus.example.com
|
||||||
@@ -72,8 +65,8 @@ Once deployed to your K8s cluster, every 3 months cert-manager will automaticall
|
|||||||
|
|
||||||
Please consult the official upstream documentation:
|
Please consult the official upstream documentation:
|
||||||
|
|
||||||
- [cert-manager Ingress Usage](https://cert-manager.io/v1.5-docs/usage/ingress/)
|
- [cert-manager Ingress Usage](https://cert-manager.io/usage/ingress/)
|
||||||
- [cert-manager Ingress Tutorial](https://cert-manager.io/v1.5-docs/tutorials/acme/ingress/#step-3-assign-a-dns-name)
|
- [cert-manager Ingress Tutorial](https://cert-manager.io/tutorials/acme/ingress/#step-3-assign-a-dns-name)
|
||||||
|
|
||||||
### ACME
|
### ACME
|
||||||
|
|
||||||
@@ -81,12 +74,12 @@ The ACME Issuer type represents a single account registered with the Automated C
|
|||||||
|
|
||||||
Certificates issued by public ACME servers are typically trusted by client’s computers by default. This means that, for example, visiting a website that is backed by an ACME certificate issued for that URL, will be trusted by default by most client’s web browsers. ACME certificates are typically free.
|
Certificates issued by public ACME servers are typically trusted by client’s computers by default. This means that, for example, visiting a website that is backed by an ACME certificate issued for that URL, will be trusted by default by most client’s web browsers. ACME certificates are typically free.
|
||||||
|
|
||||||
- [ACME Configuration](https://cert-manager.io/v1.5-docs/configuration/acme/)
|
- [ACME Configuration](https://cert-manager.io/docs/configuration/acme/)
|
||||||
- [ACME HTTP Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/http-validation/)
|
- [ACME HTTP Validation](https://cert-manager.io/docs/tutorials/acme/http-validation/)
|
||||||
- [HTTP01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/http01/)
|
- [HTTP01 Challenges](https://cert-manager.io/docs/configuration/acme/http01/)
|
||||||
- [ACME DNS Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/dns-validation/)
|
- [ACME DNS Validation](https://cert-manager.io/docs/tutorials/acme/dns-validation/)
|
||||||
- [DNS01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/dns01/)
|
- [DNS01 Challenges](https://cert-manager.io/docs/configuration/acme/dns01/)
|
||||||
- [ACME FAQ](https://cert-manager.io/v1.5-docs/faq/acme/)
|
- [ACME FAQ](https://cert-manager.io/docs/troubleshooting/acme/)
|
||||||
|
|
||||||
#### ACME With An Internal Certificate Authority
|
#### ACME With An Internal Certificate Authority
|
||||||
|
|
||||||
@@ -134,7 +127,7 @@ $ cat > ca-config.json <<EOF
|
|||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Create Certficate Signing Request (CSR) Configuration File
|
#### Create Certificate Signing Request (CSR) Configuration File
|
||||||
|
|
||||||
The TLS certificate `names` details can be updated to your own specific requirements.
|
The TLS certificate `names` details can be updated to your own specific requirements.
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# GCP Load Balancers for type=LoadBalacer of Kubernetes Services
|
# GCP Load Balancers for type=LoadBalancer of Kubernetes Services
|
||||||
|
|
||||||
> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider)
|
> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider)
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
# Mitogen
|
|
||||||
|
|
||||||
*Warning:* Mitogen support is now deprecated in kubespray due to upstream not releasing an updated version to support ansible 4.x (ansible-base 2.11.x) and above. The CI support has been stripped for mitogen and we are no longer validating any support or regressions for it. The supporting mitogen install playbook and integration documentation will be removed in a later version.
|
|
||||||
|
|
||||||
[Mitogen for Ansible](https://mitogen.networkgenomics.com/ansible_detailed.html) allow a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, depending on network conditions, modules executed, and time already spent by targets on useful work. Mitogen cannot improve a module once it is executing, it can only ensure the module executes as quickly as possible.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
ansible-playbook contrib/mitogen/mitogen.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
The above playbook sets the ansible `strategy` and `strategy_plugins` in `ansible.cfg` but you can also enable them if you use your own `ansible.cfg` by setting the environment varialbles:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
export ANSIBLE_STRATEGY=mitogen_linear
|
|
||||||
export ANSIBLE_STRATEGY_PLUGINS=plugins/mitogen/ansible_mitogen/plugins/strategy
|
|
||||||
```
|
|
||||||
|
|
||||||
... or `ansible.cfg` setup:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[defaults]
|
|
||||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
|
||||||
strategy=mitogen_linear
|
|
||||||
```
|
|
||||||
|
|
||||||
## Limitation
|
|
||||||
|
|
||||||
If you are experiencing problems, please see the [documentation](https://mitogen.networkgenomics.com/ansible_detailed.html#noteworthy-differences).
|
|
||||||
@@ -30,9 +30,9 @@ If the latest version supported according to pip is 6.7.0 it means you are runni
|
|||||||
|
|
||||||
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
||||||
|
|
||||||
| Ansible Version | Python Version |
|
| Ansible Version | Python Version |
|
||||||
|-----------------|----------------|
|
|-------------------|----------------|
|
||||||
| >= 2.17.3 | 3.10-3.12 |
|
| >=2.18.0, <2.19.0 | 3.11-3.13 |
|
||||||
|
|
||||||
## Customize Ansible vars
|
## Customize Ansible vars
|
||||||
|
|
||||||
@@ -42,13 +42,10 @@ Kubespray expects users to use one of the following variables sources for settin
|
|||||||
|----------------------------------------|------------------------------------------------------------------------------|
|
|----------------------------------------|------------------------------------------------------------------------------|
|
||||||
| inventory vars | |
|
| inventory vars | |
|
||||||
| - **inventory group_vars** | most used |
|
| - **inventory group_vars** | most used |
|
||||||
| - inventory host_vars | host specifc vars overrides, group_vars is usually more practical |
|
| - inventory host_vars | host specific vars overrides, group_vars is usually more practical |
|
||||||
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
||||||
|
|
||||||
[!IMPORTANT]
|
> Extra vars are best used to override kubespray internal variables, for instances, roles/vars/. Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||||
Extra vars are best used to override kubespray internal variables, for instances, roles/vars/.
|
|
||||||
Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray
|
|
||||||
interface. Thus they can change, disappear, or break stuff unexpectedly.
|
|
||||||
|
|
||||||
## Ansible tags
|
## Ansible tags
|
||||||
|
|
||||||
@@ -81,7 +78,6 @@ The following tags are defined in playbooks:
|
|||||||
| crio | Configuring crio container engine for hosts |
|
| crio | Configuring crio container engine for hosts |
|
||||||
| crun | Configuring crun runtime |
|
| crun | Configuring crun runtime |
|
||||||
| csi-driver | Configuring csi driver |
|
| csi-driver | Configuring csi driver |
|
||||||
| dashboard | Installing and configuring the Kubernetes Dashboard |
|
|
||||||
| dns | Remove dns entries when resetting |
|
| dns | Remove dns entries when resetting |
|
||||||
| docker | Configuring docker engine runtime for hosts |
|
| docker | Configuring docker engine runtime for hosts |
|
||||||
| download | Fetching container images to a delegate host |
|
| download | Fetching container images to a delegate host |
|
||||||
@@ -122,7 +118,7 @@ The following tags are defined in playbooks:
|
|||||||
| metrics_server | Configuring metrics_server |
|
| metrics_server | Configuring metrics_server |
|
||||||
| netchecker | Installing netchecker K8s app |
|
| netchecker | Installing netchecker K8s app |
|
||||||
| network | Configuring networking plugins for K8s |
|
| network | Configuring networking plugins for K8s |
|
||||||
| mounts | Umount kubelet dirs when reseting |
|
| mounts | Umount kubelet dirs when resetting |
|
||||||
| multus | Network plugin multus |
|
| multus | Network plugin multus |
|
||||||
| nginx | Configuring LB for kube-apiserver instances |
|
| nginx | Configuring LB for kube-apiserver instances |
|
||||||
| node | Configuring K8s minion (compute) node role |
|
| node | Configuring K8s minion (compute) node role |
|
||||||
@@ -181,17 +177,13 @@ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
|||||||
|
|
||||||
Note: use `--tags` and `--skip-tags` wisely and only if you're 100% sure what you're doing.
|
Note: use `--tags` and `--skip-tags` wisely and only if you're 100% sure what you're doing.
|
||||||
|
|
||||||
## Mitogen
|
|
||||||
|
|
||||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/advanced/mitogen.md) for usage and reasons for deprecation.
|
|
||||||
|
|
||||||
## Troubleshooting Ansible issues
|
## Troubleshooting Ansible issues
|
||||||
|
|
||||||
Having the wrong version of ansible, ansible collections or python dependencies can cause issue.
|
Having the wrong version of ansible, ansible collections or python dependencies can cause issue.
|
||||||
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRAY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRARY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
export ANSIBLE_LIBRAY=<kubespray_dir>/library`
|
export ANSIBLE_LIBRARY=<kubespray_dir>/library`
|
||||||
```
|
```
|
||||||
|
|
||||||
A simple way to ensure you get all the correct version of Ansible is to use
|
A simple way to ensure you get all the correct version of Ansible is to use
|
||||||
@@ -200,11 +192,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
|||||||
to access the inventory and SSH key in the container, like this:
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.29.0
|
git checkout v2.30.0
|
||||||
docker pull quay.io/kubespray/kubespray:v2.29.0
|
docker pull quay.io/kubespray/kubespray:v2.30.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ See [.gitlab-ci.yml](/.gitlab-ci.yml) and the included files for an overview.
|
|||||||
|
|
||||||
## Runners
|
## Runners
|
||||||
|
|
||||||
Kubespray has 2 types of GitLab runners, both deployed on the Kubespray CI cluster (hosted on Oracle Cloud Infrastucture):
|
Kubespray has 2 types of GitLab runners, both deployed on the Kubespray CI cluster (hosted on Oracle Cloud Infrastructure):
|
||||||
|
|
||||||
- pods: use the [gitlab-ci kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes/)
|
- pods: use the [gitlab-ci kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes/)
|
||||||
- vagrant: custom executor running in pods with access to the libvirt socket on the nodes
|
- vagrant: custom executor running in pods with access to the libvirt socket on the nodes
|
||||||
@@ -145,7 +145,6 @@ upstream_dns_servers:
|
|||||||
- 1.0.0.1
|
- 1.0.0.1
|
||||||
|
|
||||||
# Extensions
|
# Extensions
|
||||||
ingress_nginx_enabled: True
|
|
||||||
helm_enabled: True
|
helm_enabled: True
|
||||||
cert_manager_enabled: True
|
cert_manager_enabled: True
|
||||||
metrics_server_enabled: True
|
metrics_server_enabled: True
|
||||||
@@ -156,7 +155,7 @@ kube_feature_gates:
|
|||||||
- "NodeSwap=True"
|
- "NodeSwap=True"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Aditional files
|
## Additional files
|
||||||
|
|
||||||
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,11 @@ debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
|
|||||||
debian13 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
debian13 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora41 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
flatcar4081 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
flatcar4081 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu24 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
ubuntu24 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
||||||
|
|
||||||
@@ -31,10 +32,11 @@ debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora41 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
@@ -49,9 +51,10 @@ debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora41 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|||||||
@@ -83,32 +83,6 @@ authentication. One can get a kubeconfig from kube_control_plane hosts
|
|||||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||||
|
|
||||||
## Accessing Kubernetes Dashboard
|
|
||||||
|
|
||||||
Supported version is kubernetes-dashboard v2.0.x :
|
|
||||||
|
|
||||||
- Login option : token/kubeconfig by default
|
|
||||||
- Deployed by default in "kube-system" namespace, can be overridden with `dashboard_namespace: kubernetes-dashboard` in inventory,
|
|
||||||
- Only serves over https
|
|
||||||
|
|
||||||
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/tree/master/docs/user/accessing-dashboard). With kubespray's default deployment in kube-system namespace, instead of kubernetes-dashboard :
|
|
||||||
|
|
||||||
- Proxy URL is <http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login>
|
|
||||||
- kubectl commands must be run with "-n kube-system"
|
|
||||||
|
|
||||||
Accessing through Ingress is highly recommended. For proxy access, please note that proxy must listen to [localhost](https://github.com/kubernetes/dashboard/issues/692#issuecomment-220492484) (`proxy --address="x.x.x.x"` will not work)
|
|
||||||
|
|
||||||
For token authentication, guide to create Service Account is provided in [dashboard sample user](https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md) doc. Still take care of default namespace.
|
|
||||||
|
|
||||||
Access can also by achieved via ssh tunnel on a control plane :
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# localhost:8081 will be sent to control-plane-1's own localhost:8081
|
|
||||||
ssh -L8001:localhost:8001 user@control-plane-1
|
|
||||||
sudo -i
|
|
||||||
kubectl proxy
|
|
||||||
```
|
|
||||||
|
|
||||||
## Accessing Kubernetes API
|
## Accessing Kubernetes API
|
||||||
|
|
||||||
The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane
|
The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane
|
||||||
|
|||||||
@@ -1,203 +0,0 @@
|
|||||||
# Installation Guide
|
|
||||||
|
|
||||||
## Contents
|
|
||||||
|
|
||||||
- [Prerequisite Generic Deployment Command](#prerequisite-generic-deployment-command)
|
|
||||||
- [Provider Specific Steps](#provider-specific-steps)
|
|
||||||
- [Docker for Mac](#docker-for-mac)
|
|
||||||
- [minikube](#minikube)
|
|
||||||
- [AWS](#aws)
|
|
||||||
- [GCE - GKE](#gce-gke)
|
|
||||||
- [Azure](#azure)
|
|
||||||
- [Bare-metal](#bare-metal)
|
|
||||||
- [Verify installation](#verify-installation)
|
|
||||||
- [Detect installed version](#detect-installed-version)
|
|
||||||
- [Using Helm](#using-helm)
|
|
||||||
|
|
||||||
## Prerequisite Generic Deployment Command
|
|
||||||
|
|
||||||
!!! attention
|
|
||||||
The default configuration watches Ingress object from *all the namespaces*.
|
|
||||||
To change this behavior use the flag `--watch-namespace` to limit the scope to a particular namespace.
|
|
||||||
|
|
||||||
!!! warning
|
|
||||||
If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions.
|
|
||||||
|
|
||||||
!!! attention
|
|
||||||
If you're using GKE you need to initialize your user as a cluster-admin with the following command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl create clusterrolebinding cluster-admin-binding \
|
|
||||||
--clusterrole cluster-admin \
|
|
||||||
--user $(gcloud config get-value account)
|
|
||||||
```
|
|
||||||
|
|
||||||
The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version.
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.13.3/deploy/static/provider/cloud/deploy.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Provider Specific Steps
|
|
||||||
|
|
||||||
There are cloud provider specific yaml files.
|
|
||||||
|
|
||||||
#### Docker for Mac
|
|
||||||
|
|
||||||
Kubernetes is available in Docker for Mac (from [version 18.06.0-ce](https://docs.docker.com/docker-for-mac/release-notes/#stable-releases-of-2018))
|
|
||||||
|
|
||||||
First you need to [enable kubernetes](https://docs.docker.com/docker-for-mac/#kubernetes).
|
|
||||||
|
|
||||||
Then you have to create a service:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### minikube
|
|
||||||
|
|
||||||
For standard usage:
|
|
||||||
|
|
||||||
```console
|
|
||||||
minikube addons enable ingress
|
|
||||||
```
|
|
||||||
|
|
||||||
For development:
|
|
||||||
|
|
||||||
1. Disable the ingress addon:
|
|
||||||
|
|
||||||
```console
|
|
||||||
minikube addons disable ingress
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Execute `make dev-env`
|
|
||||||
1. Confirm the `nginx-ingress-controller` deployment exists:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl get pods -n ingress-nginx
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s
|
|
||||||
nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s
|
|
||||||
```
|
|
||||||
|
|
||||||
#### AWS
|
|
||||||
|
|
||||||
In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`.
|
|
||||||
Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB)
|
|
||||||
Please check the [elastic load balancing AWS details page](https://aws.amazon.com/elasticloadbalancing/details/)
|
|
||||||
|
|
||||||
##### Elastic Load Balancer - ELB
|
|
||||||
|
|
||||||
This setup requires to choose in which layer (L4 or L7) we want to configure the Load Balancer:
|
|
||||||
|
|
||||||
- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): Use an Network Load Balancer (NLB) with TCP as the listener protocol for ports 80 and 443.
|
|
||||||
- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): Use an Elastic Load Balancer (ELB) with HTTP as the listener protocol for port 80 and terminate TLS in the ELB
|
|
||||||
|
|
||||||
For L4:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
For L7:
|
|
||||||
|
|
||||||
Change the value of `service.beta.kubernetes.io/aws-load-balancer-ssl-cert` in the file `provider/aws/deploy-tls-termination.yaml` replacing the dummy id with a valid one. The dummy value is `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"`
|
|
||||||
|
|
||||||
Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the [ELB Idle Timeouts section](#elb-idle-timeouts) for additional information. If a change is required, users will need to update the value of `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` in `provider/aws/deploy-tls-termination.yaml`
|
|
||||||
|
|
||||||
Then execute:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/deploy-tls-termination.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
This example creates an ELB with just two listeners, one in port 80 and another in port 443
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
##### ELB Idle Timeouts
|
|
||||||
|
|
||||||
In some scenarios users will need to modify the value of the ELB idle timeout.
|
|
||||||
Users need to ensure the idle timeout is less than the [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) that is configured for NGINX.
|
|
||||||
By default NGINX `keepalive_timeout` is set to `75s`.
|
|
||||||
|
|
||||||
The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified,
|
|
||||||
in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.
|
|
||||||
|
|
||||||
*Please Note: An idle timeout of `3600s` is recommended when using WebSockets.*
|
|
||||||
|
|
||||||
More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).
|
|
||||||
|
|
||||||
##### Network Load Balancer (NLB)
|
|
||||||
|
|
||||||
This type of load balancer is supported since v1.10.0 as an ALPHA feature.
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/service-nlb.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### GCE-GKE
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important Note:** proxy protocol is not supported in GCE/GKE
|
|
||||||
|
|
||||||
#### Azure
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Bare-metal
|
|
||||||
|
|
||||||
Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport):
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/baremetal/deploy.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
For extended notes regarding deployments on bare-metal, see [Bare-metal considerations](https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/baremetal.md).
|
|
||||||
|
|
||||||
### Verify installation
|
|
||||||
|
|
||||||
To check if the ingress controller pods have started, run the following command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch
|
|
||||||
```
|
|
||||||
|
|
||||||
Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`.
|
|
||||||
Now, you are ready to create your first ingress.
|
|
||||||
|
|
||||||
### Detect installed version
|
|
||||||
|
|
||||||
To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command.
|
|
||||||
|
|
||||||
```console
|
|
||||||
POD_NAMESPACE=ingress-nginx
|
|
||||||
POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/component=controller -o jsonpath='{.items[0].metadata.name}')
|
|
||||||
|
|
||||||
kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using Helm
|
|
||||||
|
|
||||||
NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [ingress-nginx/ingress-nginx](https://kubernetes.github.io/ingress-nginx).
|
|
||||||
Official documentation is [here](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm)
|
|
||||||
|
|
||||||
To install the chart with the release name `my-nginx`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
|
||||||
helm install my-nginx ingress-nginx/ingress-nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
Detect installed version:
|
|
||||||
|
|
||||||
```console
|
|
||||||
POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}')
|
|
||||||
kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version
|
|
||||||
```
|
|
||||||
@@ -21,6 +21,12 @@ metallb_enabled: true
|
|||||||
metallb_speaker_enabled: true
|
metallb_speaker_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
By default, MetalLB resources are deployed into the `metallb-system` namespace. You can override this namespace using a variable.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
metallb_namespace: woodenlb-system
|
||||||
|
```
|
||||||
|
|
||||||
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -35,7 +41,7 @@ metallb_config:
|
|||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fasion:
|
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fashion:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_config:
|
metallb_config:
|
||||||
|
|||||||
@@ -37,4 +37,12 @@ If you have containers that are using iptables in the host network namespace (`h
|
|||||||
you need to ensure they are using iptables-nft.
|
you need to ensure they are using iptables-nft.
|
||||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||||
|
|
||||||
The kernel version is lower than the kubenretes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
The kernel version is lower than the kubernetes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
||||||
|
|
||||||
|
## Rocky Linux 10
|
||||||
|
|
||||||
|
(Experimental in Kubespray CI)
|
||||||
|
|
||||||
|
The official Rocky Linux 10 cloud image does not include `kernel-module-extra`. Both Kube Proxy and CNI rely on this package, and since it relates to kernel version compatibility (which may require VM reboots, etc.), we haven't found an ideal solution.
|
||||||
|
|
||||||
|
However, some users report that it doesn't affect them (minimal version). Therefore, the Kubespray CI Rocky Linux 10 image is built by Kubespray maintainers using `diskimage-builder`. For detailed methods, please refer to [the comments](https://github.com/kubernetes-sigs/kubespray/pull/12355#issuecomment-3705400093).
|
||||||
|
|||||||
@@ -100,8 +100,6 @@ kubelet_make_iptables_util_chains: true
|
|||||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||||
kubelet_seccomp_default: true
|
kubelet_seccomp_default: true
|
||||||
kubelet_systemd_hardening: true
|
kubelet_systemd_hardening: true
|
||||||
# To disable kubelet's staticPodPath (for nodes that don't use static pods like worker nodes)
|
|
||||||
kubelet_static_pod_path: ""
|
|
||||||
# In case you have multiple interfaces in your
|
# In case you have multiple interfaces in your
|
||||||
# control plane nodes and you want to specify the right
|
# control plane nodes and you want to specify the right
|
||||||
# IP addresses, kubelet_secure_addresses allows you
|
# IP addresses, kubelet_secure_addresses allows you
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ kubeadm_ignore_preflight_errors:
|
|||||||
|
|
||||||
The Kernel Version Matrixs:
|
The Kernel Version Matrixs:
|
||||||
|
|
||||||
| OS Verion | Kernel Verion | Kernel >=4.19 |
|
| OS Version | Kernel Version | Kernel >=4.19 |
|
||||||
|--- | --- | --- |
|
|--- | --- | --- |
|
||||||
| RHEL 9 | 5.14 | :white_check_mark: |
|
| RHEL 9 | 5.14 | :white_check_mark: |
|
||||||
| RHEL 8 | 4.18 | :x: |
|
| RHEL 8 | 4.18 | :x: |
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ That's it.
|
|||||||
|
|
||||||
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
||||||
|
|
||||||
|
**Note:** When adding new control plane nodes, always append them to the end of the `kube_control_plane` group in your inventory. Adding control plane nodes in the first position is not supported and will cause the playbook to fail.
|
||||||
|
|
||||||
### 2) Restart kube-system/nginx-proxy
|
### 2) Restart kube-system/nginx-proxy
|
||||||
|
|
||||||
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
namespace: kubernetes_sigs
|
namespace: kubernetes_sigs
|
||||||
description: Deploy a production ready Kubernetes cluster
|
description: Deploy a production ready Kubernetes cluster
|
||||||
name: kubespray
|
name: kubespray
|
||||||
version: 2.29.2
|
version: 2.31.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||||
|
|||||||
@@ -38,6 +38,7 @@
|
|||||||
loadSidebar: 'docs/_sidebar.md',
|
loadSidebar: 'docs/_sidebar.md',
|
||||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||||
auto2top: true,
|
auto2top: true,
|
||||||
|
noCompileLinks: ['.*\.ini'],
|
||||||
logo: '/logo/logo-clear.png'
|
logo: '/logo/logo-clear.png'
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@@ -11,15 +11,15 @@
|
|||||||
# containerd_runc_runtime:
|
# containerd_runc_runtime:
|
||||||
# name: runc
|
# name: runc
|
||||||
# type: "io.containerd.runc.v2"
|
# type: "io.containerd.runc.v2"
|
||||||
# engine: ""
|
# options:
|
||||||
# root: ""
|
# Root: ""
|
||||||
|
|
||||||
# containerd_additional_runtimes:
|
# containerd_additional_runtimes:
|
||||||
# Example for Kata Containers as additional runtime:
|
# Example for Kata Containers as additional runtime:
|
||||||
# - name: kata
|
# - name: kata
|
||||||
# type: "io.containerd.kata.v2"
|
# type: "io.containerd.kata.v2"
|
||||||
# engine: ""
|
# options:
|
||||||
# root: ""
|
# Root: ""
|
||||||
|
|
||||||
# containerd_grpc_max_recv_message_size: 16777216
|
# containerd_grpc_max_recv_message_size: 16777216
|
||||||
# containerd_grpc_max_send_message_size: 16777216
|
# containerd_grpc_max_send_message_size: 16777216
|
||||||
|
|||||||
@@ -1,8 +1,4 @@
|
|||||||
---
|
---
|
||||||
# Kubernetes dashboard
|
|
||||||
# RBAC required. see docs/getting-started.md for access details.
|
|
||||||
# dashboard_enabled: false
|
|
||||||
|
|
||||||
# Helm deployment
|
# Helm deployment
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
|
|
||||||
@@ -67,39 +63,6 @@ local_volume_provisioner_enabled: false
|
|||||||
# Gateway API CRDs
|
# Gateway API CRDs
|
||||||
gateway_api_enabled: false
|
gateway_api_enabled: false
|
||||||
|
|
||||||
# Nginx ingress controller deployment
|
|
||||||
ingress_nginx_enabled: false
|
|
||||||
# ingress_nginx_host_network: false
|
|
||||||
# ingress_nginx_service_type: LoadBalancer
|
|
||||||
# ingress_nginx_service_annotations:
|
|
||||||
# example.io/loadbalancerIPs: 1.2.3.4
|
|
||||||
# ingress_nginx_service_nodeport_http: 30080
|
|
||||||
# ingress_nginx_service_nodeport_https: 30081
|
|
||||||
ingress_publish_status_address: ""
|
|
||||||
# ingress_nginx_nodeselector:
|
|
||||||
# kubernetes.io/os: "linux"
|
|
||||||
# ingress_nginx_tolerations:
|
|
||||||
# - key: "node-role.kubernetes.io/control-plane"
|
|
||||||
# operator: "Equal"
|
|
||||||
# value: ""
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
# ingress_nginx_namespace: "ingress-nginx"
|
|
||||||
# ingress_nginx_insecure_port: 80
|
|
||||||
# ingress_nginx_secure_port: 443
|
|
||||||
# ingress_nginx_configmap:
|
|
||||||
# map-hash-bucket-size: "128"
|
|
||||||
# ssl-protocols: "TLSv1.2 TLSv1.3"
|
|
||||||
# ingress_nginx_configmap_tcp_services:
|
|
||||||
# 9000: "default/example-go:8080"
|
|
||||||
# ingress_nginx_configmap_udp_services:
|
|
||||||
# 53: "kube-system/coredns:53"
|
|
||||||
# ingress_nginx_extra_args:
|
|
||||||
# - --default-ssl-certificate=default/foo-tls
|
|
||||||
# ingress_nginx_termination_grace_period_seconds: 300
|
|
||||||
# ingress_nginx_class: nginx
|
|
||||||
# ingress_nginx_without_class: true
|
|
||||||
# ingress_nginx_default: false
|
|
||||||
|
|
||||||
# ALB ingress controller deployment
|
# ALB ingress controller deployment
|
||||||
ingress_alb_enabled: false
|
ingress_alb_enabled: false
|
||||||
# alb_ingress_aws_region: "us-east-1"
|
# alb_ingress_aws_region: "us-east-1"
|
||||||
|
|||||||
@@ -22,7 +22,8 @@ local_release_dir: "/tmp/releases"
|
|||||||
# Random shifts for retrying failed ops like pushing/downloading
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
retry_stagger: 5
|
retry_stagger: 5
|
||||||
|
|
||||||
# This is the user that owns tha cluster installation.
|
# This is the user that owns the cluster installation.
|
||||||
|
# Note: cilium needs to set kube_owner to root https://kubespray.io/#/docs/CNI/cilium?id=unprivileged-agent-configuration
|
||||||
kube_owner: kube
|
kube_owner: kube
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
# This is the group that the cert creation scripts chgrp the
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
---
|
---
|
||||||
requires_ansible: ">=2.17.3"
|
requires_ansible: ">=2.18.0,<2.19.0"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
@@ -27,14 +27,14 @@ RUN apt update -q \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
gnupg2 \
|
gnupg2 \
|
||||||
software-properties-common \
|
|
||||||
unzip \
|
unzip \
|
||||||
libvirt-clients \
|
libvirt-clients \
|
||||||
qemu-utils \
|
qemu-utils \
|
||||||
qemu-kvm \
|
qemu-kvm \
|
||||||
dnsmasq \
|
dnsmasq \
|
||||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc \
|
||||||
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
|
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | tee /etc/apt/sources.list.d/docker.list \
|
||||||
&& apt update -q \
|
&& apt update -q \
|
||||||
&& apt install --no-install-recommends -yq docker-ce \
|
&& apt install --no-install-recommends -yq docker-ce \
|
||||||
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
|
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
|
||||||
@@ -44,11 +44,10 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
|||||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||||
|
|
||||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||||
&& pip install --no-compile --no-cache-dir pip -U \
|
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
&& curl -L https://dl.k8s.io/release/v1.35.0/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||||
&& curl -L https://dl.k8s.io/release/v1.33.8/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
&& echo $(curl -L https://dl.k8s.io/release/v1.35.0/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/v1.33.8/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
|
||||||
&& chmod a+x /usr/local/bin/kubectl \
|
&& chmod a+x /usr/local/bin/kubectl \
|
||||||
# Install Vagrant
|
# Install Vagrant
|
||||||
&& curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
&& curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
|||||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||||
&& vagrant plugin install vagrant-libvirt \
|
&& vagrant plugin install vagrant-libvirt \
|
||||||
# Install Kubernetes collections
|
# Install Kubernetes collections
|
||||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||||
&& ansible-galaxy collection install kubernetes.core
|
&& ansible-galaxy collection install kubernetes.core
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
become: false
|
become: false
|
||||||
run_once: true
|
run_once: true
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.17.3
|
minimal_ansible_version: 2.18.0
|
||||||
maximal_ansible_version: 2.18.0
|
maximal_ansible_version: 2.19.0
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||||
|
|||||||
@@ -55,7 +55,7 @@
|
|||||||
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
||||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
|
- { role: kubernetes/control-plane, tags: control-plane, upgrade_cluster_setup: true }
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes/node-label, tags: node-label }
|
- { role: kubernetes/node-label, tags: node-label }
|
||||||
- { role: kubernetes/node-taint, tags: node-taint }
|
- { role: kubernetes/node-taint, tags: node-taint }
|
||||||
@@ -100,7 +100,7 @@
|
|||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray_defaults }
|
- { role: kubespray_defaults }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
- { role: win_nodes/kubernetes_patch, tags: ["control-plane", "win_nodes"] }
|
||||||
|
|
||||||
- name: Install Calico Route Reflector
|
- name: Install Calico Route Reflector
|
||||||
hosts: calico_rr
|
hosts: calico_rr
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
ansible==10.7.0
|
ansible==11.13.0
|
||||||
# Needed for community.crypto module
|
# Needed for community.crypto module
|
||||||
cryptography==46.0.2
|
cryptography==46.0.4
|
||||||
# Needed for jinja2 json_query templating
|
# Needed for jinja2 json_query templating
|
||||||
jmespath==1.0.1
|
jmespath==1.1.0
|
||||||
# Needed for ansible.utils.ipaddr
|
# Needed for ansible.utils.ipaddr
|
||||||
netaddr==1.3.0
|
netaddr==1.3.0
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ platforms:
|
|||||||
vm_memory: 512
|
vm_memory: 512
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_ROLES_PATH: ../../../
|
||||||
config_options:
|
config_options:
|
||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ platforms:
|
|||||||
vm_memory: 512
|
vm_memory: 512
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_ROLES_PATH: ../../../
|
||||||
config_options:
|
config_options:
|
||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ platforms:
|
|||||||
vm_memory: 512
|
vm_memory: 512
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
|
env:
|
||||||
|
ANSIBLE_ROLES_PATH: ../../../
|
||||||
config_options:
|
config_options:
|
||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
|
|||||||
@@ -13,10 +13,9 @@ containerd_snapshotter: "overlayfs"
|
|||||||
containerd_runc_runtime:
|
containerd_runc_runtime:
|
||||||
name: runc
|
name: runc
|
||||||
type: "io.containerd.runc.v2"
|
type: "io.containerd.runc.v2"
|
||||||
engine: ""
|
|
||||||
root: ""
|
|
||||||
base_runtime_spec: cri-base.json
|
base_runtime_spec: cri-base.json
|
||||||
options:
|
options:
|
||||||
|
Root: ""
|
||||||
SystemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
SystemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||||
BinaryName: "{{ bin_dir }}/runc"
|
BinaryName: "{{ bin_dir }}/runc"
|
||||||
|
|
||||||
@@ -24,8 +23,8 @@ containerd_additional_runtimes: []
|
|||||||
# Example for Kata Containers as additional runtime:
|
# Example for Kata Containers as additional runtime:
|
||||||
# - name: kata
|
# - name: kata
|
||||||
# type: "io.containerd.kata.v2"
|
# type: "io.containerd.kata.v2"
|
||||||
# engine: ""
|
# options:
|
||||||
# root: ""
|
# Root: ""
|
||||||
|
|
||||||
containerd_base_runtime_spec_rlimit_nofile: 65535
|
containerd_base_runtime_spec_rlimit_nofile: 65535
|
||||||
|
|
||||||
@@ -36,8 +35,8 @@ containerd_default_base_runtime_spec_patch:
|
|||||||
hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||||
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||||
|
|
||||||
# Can help reduce disk usage
|
# Only for containerd < 2.1; discard unpacked layers to save disk space
|
||||||
# https://github.com/containerd/containerd/discussions/6295
|
# https://github.com/containerd/containerd/blob/release/2.1/docs/cri/config.md#image-pull-configuration-since-containerd-v21
|
||||||
containerd_discard_unpacked_layers: true
|
containerd_discard_unpacked_layers: true
|
||||||
|
|
||||||
containerd_base_runtime_specs:
|
containerd_base_runtime_specs:
|
||||||
|
|||||||
@@ -52,8 +52,6 @@ oom_score = {{ containerd_oom_score }}
|
|||||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}]
|
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}]
|
||||||
runtime_type = "{{ runtime.type }}"
|
runtime_type = "{{ runtime.type }}"
|
||||||
runtime_engine = "{{ runtime.engine }}"
|
|
||||||
runtime_root = "{{ runtime.root }}"
|
|
||||||
{% if runtime.base_runtime_spec is defined %}
|
{% if runtime.base_runtime_spec is defined %}
|
||||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -78,7 +76,9 @@ oom_score = {{ containerd_oom_score }}
|
|||||||
|
|
||||||
[plugins."io.containerd.cri.v1.images"]
|
[plugins."io.containerd.cri.v1.images"]
|
||||||
snapshotter = "{{ containerd_snapshotter }}"
|
snapshotter = "{{ containerd_snapshotter }}"
|
||||||
|
{% if containerd_discard_unpacked_layers and containerd_version is version('2.1.0', '<') %}
|
||||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||||
|
{% endif %}
|
||||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||||
[plugins."io.containerd.cri.v1.images".pinned_images]
|
[plugins."io.containerd.cri.v1.images".pinned_images]
|
||||||
sandbox = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
sandbox = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ provisioner:
|
|||||||
group_vars:
|
group_vars:
|
||||||
all:
|
all:
|
||||||
become: true
|
become: true
|
||||||
|
k8s_cluster:
|
||||||
|
container_manager: docker
|
||||||
playbooks:
|
playbooks:
|
||||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||||
prepare: ../../../molecule/prepare.yml
|
prepare: ../../../molecule/prepare.yml
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ crio_registry_auth: []
|
|||||||
crio_seccomp_profile: ""
|
crio_seccomp_profile: ""
|
||||||
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
|
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
|
||||||
crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
|
crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
|
||||||
|
# Set the pull progress timeout
|
||||||
|
crio_pull_progress_timeout: "10s"
|
||||||
|
|
||||||
# Override system default for storage driver
|
# Override system default for storage driver
|
||||||
# crio_storage_driver: "overlay"
|
# crio_storage_driver: "overlay"
|
||||||
|
|||||||
@@ -2,8 +2,6 @@
|
|||||||
- name: Converge
|
- name: Converge
|
||||||
hosts: all
|
hosts: all
|
||||||
become: true
|
become: true
|
||||||
vars:
|
|
||||||
container_manager: crio
|
|
||||||
roles:
|
roles:
|
||||||
- role: kubespray_defaults
|
- role: kubespray_defaults
|
||||||
- role: container-engine/cri-o
|
- role: container-engine/cri-o
|
||||||
|
|||||||
@@ -41,6 +41,10 @@ provisioner:
|
|||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
timeout: 120
|
timeout: 120
|
||||||
|
inventory:
|
||||||
|
group_vars:
|
||||||
|
k8s_cluster:
|
||||||
|
container_manager: crio
|
||||||
playbooks:
|
playbooks:
|
||||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||||
prepare: ../../../molecule/prepare.yml
|
prepare: ../../../molecule/prepare.yml
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
- name: Test CRI-O cri
|
- name: Test CRI-O cri
|
||||||
import_playbook: ../../../molecule/test_cri.yml
|
import_playbook: ../../../molecule/test_cri.yml
|
||||||
vars:
|
vars:
|
||||||
container_manager: crio
|
|
||||||
cri_socket: unix:///var/run/crio/crio.sock
|
cri_socket: unix:///var/run/crio/crio.sock
|
||||||
cri_name: cri-o
|
cri_name: cri-o
|
||||||
- name: Test running a container with crun
|
- name: Test running a container with crun
|
||||||
|
|||||||
@@ -348,6 +348,12 @@ signature_policy = "{{ crio_signature_policy }}"
|
|||||||
# ignore; the latter will ignore volumes entirely.
|
# ignore; the latter will ignore volumes entirely.
|
||||||
image_volumes = "mkdir"
|
image_volumes = "mkdir"
|
||||||
|
|
||||||
|
# The timeout for an image pull to make progress until the pull operation gets
|
||||||
|
# canceled. This value will be also used for calculating the pull progress interval
|
||||||
|
# to pull_progress_timeout / 10. Can be set to 0 to disable the timeout as well as
|
||||||
|
# the progress output.
|
||||||
|
pull_progress_timeout = "{{ crio_pull_progress_timeout }}"
|
||||||
|
|
||||||
# The crio.network table containers settings pertaining to the management of
|
# The crio.network table containers settings pertaining to the management of
|
||||||
# CNI plugins.
|
# CNI plugins.
|
||||||
[crio.network]
|
[crio.network]
|
||||||
|
|||||||
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Crictl | Download crictl
|
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
|
||||||
vars:
|
|
||||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
|
||||||
|
|
||||||
- name: Install crictl config
|
|
||||||
template:
|
|
||||||
src: crictl.yaml.j2
|
|
||||||
dest: /etc/crictl.yaml
|
|
||||||
owner: root
|
|
||||||
mode: "0644"
|
|
||||||
|
|
||||||
- name: Copy crictl binary from download dir
|
|
||||||
copy:
|
|
||||||
src: "{{ local_release_dir }}/crictl"
|
|
||||||
dest: "{{ bin_dir }}/crictl"
|
|
||||||
mode: "0755"
|
|
||||||
remote_src: true
|
|
||||||
notify:
|
|
||||||
- Get crictl completion
|
|
||||||
- Install crictl completion
|
|
||||||
@@ -1,3 +1,22 @@
|
|||||||
---
|
---
|
||||||
- name: Install crictl
|
- name: Crictl | Download crictl
|
||||||
include_tasks: crictl.yml
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
|
vars:
|
||||||
|
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||||
|
|
||||||
|
- name: Install crictl config
|
||||||
|
template:
|
||||||
|
src: crictl.yaml.j2
|
||||||
|
dest: /etc/crictl.yaml
|
||||||
|
owner: root
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Copy crictl binary from download dir
|
||||||
|
copy:
|
||||||
|
src: "{{ local_release_dir }}/crictl"
|
||||||
|
dest: "{{ bin_dir }}/crictl"
|
||||||
|
mode: "0755"
|
||||||
|
remote_src: true
|
||||||
|
notify:
|
||||||
|
- Get crictl completion
|
||||||
|
- Install crictl completion
|
||||||
|
|||||||
@@ -55,7 +55,7 @@
|
|||||||
register: keyserver_task_result
|
register: keyserver_task_result
|
||||||
until: keyserver_task_result is succeeded
|
until: keyserver_task_result is succeeded
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | d(3) }}"
|
delay: "{{ retry_stagger }}"
|
||||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
when: ansible_pkg_mgr == 'apt'
|
when: ansible_pkg_mgr == 'apt'
|
||||||
@@ -128,7 +128,7 @@
|
|||||||
register: docker_task_result
|
register: docker_task_result
|
||||||
until: docker_task_result is succeeded
|
until: docker_task_result is succeeded
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | d(3) }}"
|
delay: "{{ retry_stagger }}"
|
||||||
notify: Restart docker
|
notify: Restart docker
|
||||||
when:
|
when:
|
||||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|||||||
@@ -21,6 +21,11 @@ provisioner:
|
|||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
timeout: 120
|
timeout: 120
|
||||||
|
inventory:
|
||||||
|
group_vars:
|
||||||
|
k8s_cluster:
|
||||||
|
gvisor_enabled: true
|
||||||
|
container_manager: containerd
|
||||||
playbooks:
|
playbooks:
|
||||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||||
prepare: ../../../molecule/prepare.yml
|
prepare: ../../../molecule/prepare.yml
|
||||||
|
|||||||
@@ -12,11 +12,20 @@
|
|||||||
is_ostree: "{{ ostree.stat.exists }}"
|
is_ostree: "{{ ostree.stat.exists }}"
|
||||||
|
|
||||||
- name: Runc | Uninstall runc package managed by package manager
|
- name: Runc | Uninstall runc package managed by package manager
|
||||||
package:
|
|
||||||
name: "{{ runc_package_name }}"
|
|
||||||
state: absent
|
|
||||||
when:
|
when:
|
||||||
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
- not is_ostree
|
||||||
|
- ansible_distribution != "Flatcar Container Linux by Kinvolk"
|
||||||
|
- ansible_distribution != "Flatcar"
|
||||||
|
block:
|
||||||
|
- name: Runc | Remove package
|
||||||
|
package:
|
||||||
|
name: "{{ runc_package_name }}"
|
||||||
|
state: absent
|
||||||
|
- name: Runc | Remove orphaned binary
|
||||||
|
file:
|
||||||
|
path: /usr/bin/runc
|
||||||
|
state: absent
|
||||||
|
when: runc_bin_dir != "/usr/bin"
|
||||||
|
|
||||||
- name: Runc | Download runc binary
|
- name: Runc | Download runc binary
|
||||||
include_tasks: "../../../download/tasks/download_file.yml"
|
include_tasks: "../../../download/tasks/download_file.yml"
|
||||||
@@ -29,10 +38,3 @@
|
|||||||
dest: "{{ runc_bin_dir }}/runc"
|
dest: "{{ runc_bin_dir }}/runc"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
remote_src: true
|
remote_src: true
|
||||||
|
|
||||||
- name: Runc | Remove orphaned binary
|
|
||||||
file:
|
|
||||||
path: /usr/bin/runc
|
|
||||||
state: absent
|
|
||||||
when: runc_bin_dir != "/usr/bin"
|
|
||||||
ignore_errors: true # noqa ignore-errors
|
|
||||||
|
|||||||
@@ -21,6 +21,11 @@ provisioner:
|
|||||||
defaults:
|
defaults:
|
||||||
callbacks_enabled: profile_tasks
|
callbacks_enabled: profile_tasks
|
||||||
timeout: 120
|
timeout: 120
|
||||||
|
inventory:
|
||||||
|
group_vars:
|
||||||
|
k8s_cluster:
|
||||||
|
youki_enabled: true
|
||||||
|
container_manager: crio
|
||||||
playbooks:
|
playbooks:
|
||||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||||
prepare: ../../../molecule/prepare.yml
|
prepare: ../../../molecule/prepare.yml
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ dns_nodes_per_replica: 16
|
|||||||
dns_cores_per_replica: 256
|
dns_cores_per_replica: 256
|
||||||
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}"
|
dns_prevent_single_point_failure: "{{ 'true' if dns_min_replicas | int > 1 else 'false' }}"
|
||||||
enable_coredns_reverse_dns_lookups: true
|
enable_coredns_reverse_dns_lookups: true
|
||||||
|
coredns_svc_name: "coredns"
|
||||||
coredns_ordinal_suffix: ""
|
coredns_ordinal_suffix: ""
|
||||||
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
|
# dns_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
|
||||||
coredns_affinity:
|
coredns_affinity:
|
||||||
@@ -118,29 +119,5 @@ netchecker_agent_log_level: 5
|
|||||||
netchecker_server_log_level: 5
|
netchecker_server_log_level: 5
|
||||||
netchecker_etcd_log_level: info
|
netchecker_etcd_log_level: info
|
||||||
|
|
||||||
# Dashboard
|
|
||||||
dashboard_replicas: 1
|
|
||||||
|
|
||||||
# Namespace for dashboard
|
|
||||||
dashboard_namespace: kube-system
|
|
||||||
|
|
||||||
# Limits for dashboard
|
|
||||||
dashboard_cpu_limit: 100m
|
|
||||||
dashboard_memory_limit: 256M
|
|
||||||
dashboard_cpu_requests: 50m
|
|
||||||
dashboard_memory_requests: 64M
|
|
||||||
|
|
||||||
# Set dashboard_use_custom_certs to true if overriding dashboard_certs_secret_name with a secret that
|
|
||||||
# contains dashboard_tls_key_file and dashboard_tls_cert_file instead of using the initContainer provisioned certs
|
|
||||||
dashboard_use_custom_certs: false
|
|
||||||
dashboard_certs_secret_name: kubernetes-dashboard-certs
|
|
||||||
dashboard_tls_key_file: dashboard.key
|
|
||||||
dashboard_tls_cert_file: dashboard.crt
|
|
||||||
dashboard_master_toleration: true
|
|
||||||
|
|
||||||
# Override dashboard default settings
|
|
||||||
dashboard_token_ttl: 900
|
|
||||||
dashboard_skip_login: false
|
|
||||||
|
|
||||||
# Policy Controllers
|
# Policy Controllers
|
||||||
# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
|
# policy_controller_extra_tolerations: [{effect: NoSchedule, operator: "Exists"}]
|
||||||
|
|||||||
@@ -109,15 +109,3 @@
|
|||||||
- netchecker-server-clusterrolebinding.yml.j2
|
- netchecker-server-clusterrolebinding.yml.j2
|
||||||
- netchecker-server-deployment.yml.j2
|
- netchecker-server-deployment.yml.j2
|
||||||
- netchecker-server-svc.yml.j2
|
- netchecker-server-svc.yml.j2
|
||||||
|
|
||||||
- name: Kubernetes Apps | Dashboard
|
|
||||||
command:
|
|
||||||
cmd: "{{ kubectl_apply_stdin }}"
|
|
||||||
stdin: "{{ lookup('template', 'dashboard.yml.j2') }}"
|
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
|
||||||
run_once: true
|
|
||||||
vars:
|
|
||||||
k8s_namespace: "{{ dashboard_namespace }}"
|
|
||||||
when: dashboard_enabled
|
|
||||||
tags:
|
|
||||||
- dashboard
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: coredns{{ coredns_ordinal_suffix }}
|
name: {{ coredns_svc_name }}{{ coredns_ordinal_suffix }}
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
|
k8s-app: kube-dns{{ coredns_ordinal_suffix }}
|
||||||
|
|||||||
@@ -1,323 +0,0 @@
|
|||||||
# Copyright 2017 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
|
||||||
# Kubernetes 1.8.
|
|
||||||
#
|
|
||||||
# Example usage: kubectl create -f <this_file>
|
|
||||||
|
|
||||||
{% if k8s_namespace != 'kube-system' %}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: {{ k8s_namespace }}
|
|
||||||
labels:
|
|
||||||
name: {{ k8s_namespace }}
|
|
||||||
{% endif %}
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard Secrets ------------------- #
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard-certs
|
|
||||||
type: Opaque
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard-csrf
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
csrf: ""
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard-key-holder
|
|
||||||
type: Opaque
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard ConfigMap ------------------- #
|
|
||||||
kind: ConfigMap
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard-settings
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard Service Account ------------------- #
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
rules:
|
|
||||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
|
||||||
verbs: ["get", "update", "delete"]
|
|
||||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["configmaps"]
|
|
||||||
resourceNames: ["kubernetes-dashboard-settings"]
|
|
||||||
verbs: ["get", "update"]
|
|
||||||
# Allow Dashboard to get metrics.
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
|
||||||
verbs: ["proxy"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services/proxy"]
|
|
||||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
|
||||||
verbs: ["get"]
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
namespace: {{ k8s_namespace }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
namespace: {{ k8s_namespace }}
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard Deployment ------------------- #
|
|
||||||
|
|
||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
spec:
|
|
||||||
replicas: {{ dashboard_replicas }}
|
|
||||||
revisionHistoryLimit: 10
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
containers:
|
|
||||||
- name: kubernetes-dashboard
|
|
||||||
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
|
|
||||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: {{ dashboard_cpu_limit }}
|
|
||||||
memory: {{ dashboard_memory_limit }}
|
|
||||||
requests:
|
|
||||||
cpu: {{ dashboard_cpu_requests }}
|
|
||||||
memory: {{ dashboard_memory_requests }}
|
|
||||||
ports:
|
|
||||||
- containerPort: 8443
|
|
||||||
protocol: TCP
|
|
||||||
args:
|
|
||||||
- --namespace={{ k8s_namespace }}
|
|
||||||
{% if dashboard_use_custom_certs %}
|
|
||||||
- --tls-key-file={{ dashboard_tls_key_file }}
|
|
||||||
- --tls-cert-file={{ dashboard_tls_cert_file }}
|
|
||||||
{% else %}
|
|
||||||
- --auto-generate-certificates
|
|
||||||
{% endif %}
|
|
||||||
{% if dashboard_skip_login %}
|
|
||||||
- --enable-skip-login
|
|
||||||
{% endif %}
|
|
||||||
- --authentication-mode=token
|
|
||||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
|
||||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
|
||||||
# to it. Uncomment only if the default does not work.
|
|
||||||
# - --apiserver-host=http://my-address:port
|
|
||||||
- --token-ttl={{ dashboard_token_ttl }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: kubernetes-dashboard-certs
|
|
||||||
mountPath: /certs
|
|
||||||
# Create on-disk volume to store exec logs
|
|
||||||
- mountPath: /tmp
|
|
||||||
name: tmp-volume
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
scheme: HTTPS
|
|
||||||
path: /
|
|
||||||
port: 8443
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
timeoutSeconds: 30
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsUser: 1001
|
|
||||||
runAsGroup: 2001
|
|
||||||
volumes:
|
|
||||||
- name: kubernetes-dashboard-certs
|
|
||||||
secret:
|
|
||||||
secretName: {{ dashboard_certs_secret_name }}
|
|
||||||
- name: tmp-volume
|
|
||||||
emptyDir: {}
|
|
||||||
serviceAccountName: kubernetes-dashboard
|
|
||||||
{% if dashboard_master_toleration %}
|
|
||||||
tolerations:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Dashboard Service ------------------- #
|
|
||||||
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- port: 443
|
|
||||||
targetPort: 8443
|
|
||||||
selector:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
|
|
||||||
---
|
|
||||||
# ------------------- Metrics Scraper Service Account ------------------- #
|
|
||||||
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-dashboard
|
|
||||||
name: kubernetes-dashboard
|
|
||||||
rules:
|
|
||||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
|
||||||
- apiGroups: ["metrics.k8s.io"]
|
|
||||||
resources: ["pods", "nodes"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# ------------------- Metrics Scraper Service ------------------- #
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-metrics-scraper
|
|
||||||
name: dashboard-metrics-scraper
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- port: 8000
|
|
||||||
targetPort: 8000
|
|
||||||
selector:
|
|
||||||
k8s-app: kubernetes-metrics-scraper
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
# ------------------- Metrics Scraper Deployment ------------------- #
|
|
||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-metrics-scraper
|
|
||||||
name: kubernetes-metrics-scraper
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
revisionHistoryLimit: 10
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kubernetes-metrics-scraper
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kubernetes-metrics-scraper
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
containers:
|
|
||||||
- name: kubernetes-metrics-scraper
|
|
||||||
image: {{ dashboard_metrics_scraper_repo }}:{{ dashboard_metrics_scraper_tag }}
|
|
||||||
ports:
|
|
||||||
- containerPort: 8000
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
scheme: HTTP
|
|
||||||
path: /
|
|
||||||
port: 8000
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
timeoutSeconds: 30
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsUser: 1001
|
|
||||||
runAsGroup: 2001
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /tmp
|
|
||||||
name: tmp-volume
|
|
||||||
serviceAccountName: kubernetes-dashboard
|
|
||||||
volumes:
|
|
||||||
- name: tmp-volume
|
|
||||||
emptyDir: {}
|
|
||||||
{% if dashboard_master_toleration %}
|
|
||||||
tolerations:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
gateway_api_enabled: false
|
gateway_api_enabled: false
|
||||||
gateway_api_version: 1.2.1
|
|
||||||
|
|
||||||
# `gateway_api_channel` default is "standard".
|
# `gateway_api_channel` default is "standard".
|
||||||
# "standard" release channel includes all resources that have graduated to GA or beta, including GatewayClass, Gateway, HTTPRoute, and ReferenceGrant.
|
# "standard" release channel includes all resources that have graduated to GA or beta, including GatewayClass, Gateway, HTTPRoute, and ReferenceGrant.
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ external_openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
|
|||||||
## arg1: "value1"
|
## arg1: "value1"
|
||||||
## arg2: "value2"
|
## arg2: "value2"
|
||||||
external_openstack_cloud_controller_extra_args: {}
|
external_openstack_cloud_controller_extra_args: {}
|
||||||
external_openstack_cloud_controller_image_tag: "v1.32.0"
|
external_openstack_cloud_controller_image_tag: "v1.35.0"
|
||||||
external_openstack_cloud_controller_bind_address: 127.0.0.1
|
external_openstack_cloud_controller_bind_address: 127.0.0.1
|
||||||
external_openstack_cloud_controller_dns_policy: ClusterFirst
|
external_openstack_cloud_controller_dns_policy: ClusterFirst
|
||||||
|
|
||||||
|
|||||||
@@ -8,3 +8,4 @@ local_path_provisioner_is_default_storageclass: "true"
|
|||||||
local_path_provisioner_debug: false
|
local_path_provisioner_debug: false
|
||||||
local_path_provisioner_helper_image_repo: "busybox"
|
local_path_provisioner_helper_image_repo: "busybox"
|
||||||
local_path_provisioner_helper_image_tag: "latest"
|
local_path_provisioner_helper_image_tag: "latest"
|
||||||
|
local_path_provisioner_resources: {}
|
||||||
|
|||||||
@@ -35,6 +35,10 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
|
{% if local_path_provisioner_resources %}
|
||||||
|
resources:
|
||||||
|
{{ local_path_provisioner_resources | to_nice_yaml | indent(10) | trim }}
|
||||||
|
{% endif %}
|
||||||
volumes:
|
volumes:
|
||||||
- name: config-volume
|
- name: config-volume
|
||||||
configMap:
|
configMap:
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
ingress_nginx_namespace: "ingress-nginx"
|
|
||||||
ingress_nginx_host_network: false
|
|
||||||
ingress_nginx_service_type: LoadBalancer
|
|
||||||
ingress_nginx_service_nodeport_http: ""
|
|
||||||
ingress_nginx_service_nodeport_https: ""
|
|
||||||
ingress_nginx_service_annotations: {}
|
|
||||||
ingress_publish_status_address: ""
|
|
||||||
ingress_nginx_publish_service: "{{ ingress_nginx_namespace }}/ingress-nginx"
|
|
||||||
ingress_nginx_nodeselector:
|
|
||||||
kubernetes.io/os: "linux"
|
|
||||||
ingress_nginx_tolerations: []
|
|
||||||
ingress_nginx_insecure_port: 80
|
|
||||||
ingress_nginx_secure_port: 443
|
|
||||||
ingress_nginx_metrics_port: 10254
|
|
||||||
ingress_nginx_configmap: {}
|
|
||||||
ingress_nginx_configmap_tcp_services: {}
|
|
||||||
ingress_nginx_configmap_udp_services: {}
|
|
||||||
ingress_nginx_extra_args: []
|
|
||||||
ingress_nginx_termination_grace_period_seconds: 300
|
|
||||||
ingress_nginx_class: nginx
|
|
||||||
ingress_nginx_without_class: true
|
|
||||||
ingress_nginx_default: false
|
|
||||||
ingress_nginx_webhook_enabled: false
|
|
||||||
ingress_nginx_webhook_job_ttl: 1800
|
|
||||||
ingress_nginx_opentelemetry_enabled: false
|
|
||||||
|
|
||||||
ingress_nginx_probe_initial_delay_seconds: 10
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Create addon dir
|
|
||||||
file:
|
|
||||||
path: "{{ kube_config_dir }}/addons/ingress_nginx"
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: "0755"
|
|
||||||
when:
|
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Templates list
|
|
||||||
set_fact:
|
|
||||||
ingress_nginx_templates:
|
|
||||||
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
|
|
||||||
- { name: cm-ingress-nginx, file: cm-ingress-nginx.yml, type: cm }
|
|
||||||
- { name: cm-tcp-services, file: cm-tcp-services.yml, type: cm }
|
|
||||||
- { name: cm-udp-services, file: cm-udp-services.yml, type: cm }
|
|
||||||
- { name: sa-ingress-nginx, file: sa-ingress-nginx.yml, type: sa }
|
|
||||||
- { name: clusterrole-ingress-nginx, file: clusterrole-ingress-nginx.yml, type: clusterrole }
|
|
||||||
- { name: clusterrolebinding-ingress-nginx, file: clusterrolebinding-ingress-nginx.yml, type: clusterrolebinding }
|
|
||||||
- { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role }
|
|
||||||
- { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding }
|
|
||||||
- { name: ingressclass-nginx, file: ingressclass-nginx.yml, type: ingressclass }
|
|
||||||
- { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds }
|
|
||||||
ingress_nginx_template_for_service:
|
|
||||||
- { name: svc-ingress-nginx, file: svc-ingress-nginx.yml, type: svc }
|
|
||||||
ingress_nginx_templates_for_webhook:
|
|
||||||
- { name: admission-webhook-configuration, file: admission-webhook-configuration.yml, type: sa }
|
|
||||||
- { name: sa-admission-webhook, file: sa-admission-webhook.yml, type: sa }
|
|
||||||
- { name: clusterrole-admission-webhook, file: clusterrole-admission-webhook.yml, type: clusterrole }
|
|
||||||
- { name: clusterrolebinding-admission-webhook, file: clusterrolebinding-admission-webhook.yml, type: clusterrolebinding }
|
|
||||||
- { name: role-admission-webhook, file: role-admission-webhook.yml, type: role }
|
|
||||||
- { name: rolebinding-admission-webhook, file: rolebinding-admission-webhook.yml, type: rolebinding }
|
|
||||||
- { name: admission-webhook-job, file: admission-webhook-job.yml, type: job }
|
|
||||||
- { name: svc-ingress-nginx-controller-admission, file: svc-ingress-nginx-controller-admission.yml, type: svc }
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Template list for service
|
|
||||||
set_fact:
|
|
||||||
ingress_nginx_templates: "{{ ingress_nginx_templates + ingress_nginx_template_for_service }}"
|
|
||||||
when: not ingress_nginx_host_network
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for webhook
|
|
||||||
set_fact:
|
|
||||||
ingress_nginx_templates: "{{ ingress_nginx_templates + ingress_nginx_templates_for_webhook }}"
|
|
||||||
when: ingress_nginx_webhook_enabled
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Create manifests
|
|
||||||
template:
|
|
||||||
src: "{{ item.file }}.j2"
|
|
||||||
dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}"
|
|
||||||
mode: "0644"
|
|
||||||
with_items: "{{ ingress_nginx_templates }}"
|
|
||||||
register: ingress_nginx_manifests
|
|
||||||
when:
|
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Apply manifests
|
|
||||||
kube:
|
|
||||||
name: "{{ item.item.name }}"
|
|
||||||
namespace: "{{ ingress_nginx_namespace }}"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
resource: "{{ item.item.type }}"
|
|
||||||
filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}"
|
|
||||||
state: "latest"
|
|
||||||
with_items: "{{ ingress_nginx_manifests.results }}"
|
|
||||||
when:
|
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
name: {{ ingress_nginx_namespace }}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
apiVersion: admissionregistration.k8s.io/v1
|
|
||||||
kind: ValidatingWebhookConfiguration
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
webhooks:
|
|
||||||
- admissionReviewVersions:
|
|
||||||
- v1
|
|
||||||
clientConfig:
|
|
||||||
service:
|
|
||||||
name: ingress-nginx-controller-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
path: /networking/v1/ingresses
|
|
||||||
port: 443
|
|
||||||
failurePolicy: Fail
|
|
||||||
matchPolicy: Equivalent
|
|
||||||
name: validate.nginx.ingress.kubernetes.io
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
apiVersions:
|
|
||||||
- v1
|
|
||||||
operations:
|
|
||||||
- CREATE
|
|
||||||
- UPDATE
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
sideEffects: None
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission-create
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission-create
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- args:
|
|
||||||
- create
|
|
||||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
|
||||||
- --namespace=$(POD_NAMESPACE)
|
|
||||||
- --secret-name=ingress-nginx-admission
|
|
||||||
env:
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
image: "{{ ingress_nginx_kube_webhook_certgen_image_repo }}:{{ ingress_nginx_kube_webhook_certgen_image_tag }}"
|
|
||||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
||||||
name: create
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsGroup: 65532
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 65532
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
serviceAccountName: ingress-nginx-admission
|
|
||||||
ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }}
|
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission-patch
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission-patch
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- args:
|
|
||||||
- patch
|
|
||||||
- --webhook-name=ingress-nginx-admission
|
|
||||||
- --namespace=$(POD_NAMESPACE)
|
|
||||||
- --patch-mutating=false
|
|
||||||
- --secret-name=ingress-nginx-admission
|
|
||||||
- --patch-failure-policy=Fail
|
|
||||||
env:
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
image: "{{ ingress_nginx_kube_webhook_certgen_image_repo }}:{{ ingress_nginx_kube_webhook_certgen_image_tag }}"
|
|
||||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
||||||
name: patch
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsGroup: 65532
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 65532
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
serviceAccountName: ingress-nginx-admission
|
|
||||||
ttlSecondsAfterFinished: {{ ingress_nginx_webhook_job_ttl }}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- admissionregistration.k8s.io
|
|
||||||
resources:
|
|
||||||
- validatingwebhookconfigurations
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- update
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets", "namespaces"]
|
|
||||||
verbs: ["list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes"]
|
|
||||||
verbs: ["get"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["create", "patch"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses/status"]
|
|
||||||
verbs: ["update"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingressclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["list", "watch"]
|
|
||||||
- apiGroups: ["discovery.k8s.io"]
|
|
||||||
resources: ["endpointslices"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: ingress-nginx
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% if ingress_nginx_configmap %}
|
|
||||||
data:
|
|
||||||
{{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: tcp-services
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% if ingress_nginx_configmap_tcp_services %}
|
|
||||||
data:
|
|
||||||
{{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: udp-services
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% if ingress_nginx_configmap_udp_services %}
|
|
||||||
data:
|
|
||||||
{{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,201 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx-controller
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
annotations:
|
|
||||||
prometheus.io/port: "10254"
|
|
||||||
prometheus.io/scrape: "true"
|
|
||||||
spec:
|
|
||||||
serviceAccountName: ingress-nginx
|
|
||||||
terminationGracePeriodSeconds: {{ ingress_nginx_termination_grace_period_seconds }}
|
|
||||||
{% if ingress_nginx_opentelemetry_enabled %}
|
|
||||||
initContainers:
|
|
||||||
- name: opentelemetry
|
|
||||||
command:
|
|
||||||
- /init_module
|
|
||||||
image: {{ ingress_nginx_opentelemetry_image_repo }}:{{ ingress_nginx_opentelemetry_image_tag }}
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_BIND_SERVICE
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: false
|
|
||||||
runAsGroup: 82
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 101
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /modules_mount
|
|
||||||
name: modules
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_host_network %}
|
|
||||||
hostNetwork: true
|
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_nodeselector %}
|
|
||||||
nodeSelector:
|
|
||||||
{{ ingress_nginx_nodeselector | to_nice_yaml | indent(width=8) }}
|
|
||||||
{%- endif %}
|
|
||||||
{% if ingress_nginx_tolerations %}
|
|
||||||
tolerations:
|
|
||||||
{{ ingress_nginx_tolerations | to_nice_yaml(indent=2) | indent(width=8) }}
|
|
||||||
{% endif %}
|
|
||||||
priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{ '' }}
|
|
||||||
containers:
|
|
||||||
- name: ingress-nginx-controller
|
|
||||||
image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }}
|
|
||||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
|
||||||
lifecycle:
|
|
||||||
preStop:
|
|
||||||
exec:
|
|
||||||
command:
|
|
||||||
- /wait-shutdown
|
|
||||||
args:
|
|
||||||
- /nginx-ingress-controller
|
|
||||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx
|
|
||||||
- --election-id=ingress-controller-leader-{{ ingress_nginx_class }}
|
|
||||||
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
|
|
||||||
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
|
|
||||||
- --annotations-prefix=nginx.ingress.kubernetes.io
|
|
||||||
- --ingress-class={{ ingress_nginx_class }}
|
|
||||||
{% if ingress_nginx_without_class %}
|
|
||||||
- --watch-ingress-without-class=true
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_publish_status_address != "" %}
|
|
||||||
- --publish-status-address={{ ingress_publish_status_address }}
|
|
||||||
{% elif ingress_nginx_host_network %}
|
|
||||||
- --report-node-internal-ip-address
|
|
||||||
{% elif ingress_nginx_publish_service != "" %}
|
|
||||||
- --publish-service={{ ingress_nginx_publish_service }}
|
|
||||||
{% endif %}
|
|
||||||
{% for extra_arg in ingress_nginx_extra_args %}
|
|
||||||
- {{ extra_arg }}
|
|
||||||
{% endfor %}
|
|
||||||
{% if ingress_nginx_webhook_enabled %}
|
|
||||||
- --validating-webhook=:8443
|
|
||||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
|
||||||
- --validating-webhook-key=/usr/local/certificates/key
|
|
||||||
{% endif %}
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_BIND_SERVICE
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: false
|
|
||||||
runAsGroup: 82
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 101
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
env:
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
- name: LD_PRELOAD
|
|
||||||
value: /usr/local/lib/libmimalloc.so
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 80
|
|
||||||
hostPort: {{ ingress_nginx_insecure_port }}
|
|
||||||
- name: https
|
|
||||||
containerPort: 443
|
|
||||||
hostPort: {{ ingress_nginx_secure_port }}
|
|
||||||
- name: metrics
|
|
||||||
containerPort: 10254
|
|
||||||
{% if not ingress_nginx_host_network %}
|
|
||||||
hostPort: {{ ingress_nginx_metrics_port }}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_configmap_tcp_services %}
|
|
||||||
{% for port in ingress_nginx_configmap_tcp_services.keys() %}
|
|
||||||
- name: tcp-port-{{ port }}
|
|
||||||
containerPort: {{ port | int }}
|
|
||||||
protocol: TCP
|
|
||||||
{% if not ingress_nginx_host_network %}
|
|
||||||
hostPort: {{ port | int }}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_configmap_udp_services %}
|
|
||||||
{% for port in ingress_nginx_configmap_udp_services.keys() %}
|
|
||||||
- name: udp-port-{{ port }}
|
|
||||||
containerPort: {{ port | int }}
|
|
||||||
protocol: UDP
|
|
||||||
{% if not ingress_nginx_host_network %}
|
|
||||||
hostPort: {{ port | int }}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_webhook_enabled %}
|
|
||||||
- name: webhook
|
|
||||||
containerPort: 8443
|
|
||||||
protocol: TCP
|
|
||||||
{% endif %}
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 10254
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }}
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 10254
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: {{ ingress_nginx_probe_initial_delay_seconds }}
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 5
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
{% if ingress_nginx_webhook_enabled or ingress_nginx_opentelemetry_enabled %}
|
|
||||||
volumeMounts:
|
|
||||||
{% if ingress_nginx_webhook_enabled %}
|
|
||||||
- mountPath: /usr/local/certificates/
|
|
||||||
name: webhook-cert
|
|
||||||
readOnly: true
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_opentelemetry_enabled %}
|
|
||||||
- name: modules
|
|
||||||
mountPath: /modules_mount
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_webhook_enabled or ingress_nginx_opentelemetry_enabled %}
|
|
||||||
volumes:
|
|
||||||
{% if ingress_nginx_webhook_enabled %}
|
|
||||||
- name: webhook-cert
|
|
||||||
secret:
|
|
||||||
secretName: ingress-nginx-admission
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_opentelemetry_enabled %}
|
|
||||||
- name: modules
|
|
||||||
emptyDir: {}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: IngressClass
|
|
||||||
metadata:
|
|
||||||
name: {{ ingress_nginx_class }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% if ingress_nginx_default %}
|
|
||||||
annotations:
|
|
||||||
ingressclass.kubernetes.io/is-default-class: "true"
|
|
||||||
{% endif %}
|
|
||||||
spec:
|
|
||||||
controller: k8s.io/ingress-nginx
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- create
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["namespaces"]
|
|
||||||
verbs: ["get"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["configmaps", "pods", "secrets", "endpoints"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses/status"]
|
|
||||||
verbs: ["update"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingressclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
# Defaults to "<election-id>", defined in
|
|
||||||
# ds-ingress-nginx-controller.yml.js
|
|
||||||
# by a command-line argument.
|
|
||||||
#
|
|
||||||
# This is the correct behaviour for ingress-controller
|
|
||||||
# version 1.8.1
|
|
||||||
resourceNames: ["ingress-controller-leader-{{ ingress_nginx_class }}"]
|
|
||||||
verbs: ["get", "update"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["create", "patch"]
|
|
||||||
- apiGroups: ["coordination.k8s.io"]
|
|
||||||
resources: ["leases"]
|
|
||||||
verbs: ["create"]
|
|
||||||
- apiGroups: ["discovery.k8s.io"]
|
|
||||||
resources: ["endpointslices"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: ingress-nginx
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
name: ingress-nginx-controller-admission
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
spec:
|
|
||||||
type: ClusterIP
|
|
||||||
ports:
|
|
||||||
- appProtocol: https
|
|
||||||
name: https-webhook
|
|
||||||
port: 443
|
|
||||||
targetPort: webhook
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
{% if not ingress_nginx_host_network %}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: ingress-nginx
|
|
||||||
namespace: {{ ingress_nginx_namespace }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% if ingress_nginx_service_annotations %}
|
|
||||||
annotations:
|
|
||||||
{{ ingress_nginx_service_annotations | to_nice_yaml(indent=2, width=1337) | indent(width=4) }}
|
|
||||||
{% endif %}
|
|
||||||
spec:
|
|
||||||
type: {{ ingress_nginx_service_type }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: 80
|
|
||||||
targetPort: 80
|
|
||||||
protocol: TCP
|
|
||||||
{% if (ingress_nginx_service_type == 'NodePort' or ingress_nginx_service_type == 'LoadBalancer') and ingress_nginx_service_nodeport_http %}
|
|
||||||
nodePort: {{ingress_nginx_service_nodeport_http | int}}
|
|
||||||
{% endif %}
|
|
||||||
- name: https
|
|
||||||
port: 443
|
|
||||||
targetPort: 443
|
|
||||||
protocol: TCP
|
|
||||||
{% if (ingress_nginx_service_type == 'NodePort' or ingress_nginx_service_type == 'LoadBalancer') and ingress_nginx_service_nodeport_https %}
|
|
||||||
nodePort: {{ingress_nginx_service_nodeport_https | int}}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_configmap_tcp_services %}
|
|
||||||
{% for port in ingress_nginx_configmap_tcp_services.keys() %}
|
|
||||||
- name: tcp-port-{{ port }}
|
|
||||||
port: {{ port | int }}
|
|
||||||
targetPort: {{ port | int }}
|
|
||||||
protocol: TCP
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ingress_nginx_configmap_udp_services %}
|
|
||||||
{% for port in ingress_nginx_configmap_udp_services.keys() %}
|
|
||||||
- name: udp-port-{{ port }}
|
|
||||||
port: {{ port | int }}
|
|
||||||
targetPort: {{ port | int }}
|
|
||||||
protocol: UDP
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: ingress-nginx
|
|
||||||
app.kubernetes.io/part-of: ingress-nginx
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,12 +1,5 @@
|
|||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: kubernetes-apps/ingress_controller/ingress_nginx
|
|
||||||
when: ingress_nginx_enabled
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
- ingress-controller
|
|
||||||
- ingress-nginx
|
|
||||||
|
|
||||||
- role: kubernetes-apps/ingress_controller/cert_manager
|
- role: kubernetes-apps/ingress_controller/cert_manager
|
||||||
when: cert_manager_enabled
|
when: cert_manager_enabled
|
||||||
tags:
|
tags:
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
|
- role: kubernetes-apps/utils
|
||||||
|
|
||||||
- role: kubernetes-apps/ansible
|
- role: kubernetes-apps/ansible
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
metallb_enabled: false
|
metallb_enabled: false
|
||||||
metallb_log_level: info
|
metallb_log_level: info
|
||||||
|
metallb_namespace: "metallb-system"
|
||||||
metallb_port: "7472"
|
metallb_port: "7472"
|
||||||
metallb_memberlist_port: "7946"
|
metallb_memberlist_port: "7946"
|
||||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||||
|
|||||||
@@ -58,12 +58,6 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- nodes/proxy
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- topology.node.k8s.io
|
- topology.node.k8s.io
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@@ -43,12 +43,12 @@
|
|||||||
- { name: registry-cm, file: registry-cm.yml, type: cm }
|
- { name: registry-cm, file: registry-cm.yml, type: cm }
|
||||||
- { name: registry-rs, file: registry-rs.yml, type: rs }
|
- { name: registry-rs, file: registry-rs.yml, type: rs }
|
||||||
|
|
||||||
- name: Registry | Append nginx ingress templates to Registry Templates list when ingress enabled
|
- name: Registry | Append ingress templates to Registry Templates list when ALB ingress enabled
|
||||||
set_fact:
|
set_fact:
|
||||||
registry_templates: "{{ registry_templates + [item] }}"
|
registry_templates: "{{ registry_templates + [item] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- [{ name: registry-ing, file: registry-ing.yml, type: ing }]
|
- [{ name: registry-ing, file: registry-ing.yml, type: ing }]
|
||||||
when: ingress_nginx_enabled or ingress_alb_enabled
|
when: ingress_alb_enabled
|
||||||
|
|
||||||
- name: Registry | Create manifests
|
- name: Registry | Create manifests
|
||||||
template:
|
template:
|
||||||
|
|||||||
12
roles/kubernetes-apps/utils/vars/main.yml
Normal file
12
roles/kubernetes-apps/utils/vars/main.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
_kubectl_apply_stdin:
|
||||||
|
- "{{ kubectl }}"
|
||||||
|
- apply
|
||||||
|
- -f
|
||||||
|
- "-"
|
||||||
|
- -n
|
||||||
|
- "{{ k8s_namespace }}"
|
||||||
|
- --server-side="{{ server_side_apply | lower }}"
|
||||||
|
# TODO: switch to default SSA
|
||||||
|
server_side_apply: false
|
||||||
|
kubectl_apply_stdin: "{{ _kubectl_apply_stdin | join(' ') }}"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user