mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-01 09:38:12 -03:30
Compare commits
132 Commits
release-2.
...
component_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d1add5859 | ||
|
|
4d4058ee8e | ||
|
|
f071fccc33 | ||
|
|
70daea701a | ||
|
|
3e42b84e94 | ||
|
|
868ff3cea9 | ||
|
|
0b69a18e35 | ||
|
|
e30076016c | ||
|
|
f4ccdb5e72 | ||
|
|
fcecaf6943 | ||
|
|
37f7a86014 | ||
|
|
fff7f10a85 | ||
|
|
dc09298f7e | ||
|
|
680db0c921 | ||
|
|
9977d4dc10 | ||
|
|
1b6129566b | ||
|
|
c3404c3685 | ||
|
|
fba8708486 | ||
|
|
8dacb9cd16 | ||
|
|
df3f0a2341 | ||
|
|
62e90b3122 | ||
|
|
6b5cc5bdfb | ||
|
|
a277cfdee7 | ||
|
|
bc5528f585 | ||
|
|
2740c13c0c | ||
|
|
52b68bccad | ||
|
|
82c4c0afdf | ||
|
|
63a43cf6db | ||
|
|
666a3a9500 | ||
|
|
28f9c126bf | ||
|
|
d41b629be3 | ||
|
|
851abbc2e3 | ||
|
|
17c72367bc | ||
|
|
d91c7d7576 | ||
|
|
14b20ad2a2 | ||
|
|
72cb1356ef | ||
|
|
51304d57e2 | ||
|
|
a0d7bef90e | ||
|
|
a1ec88e290 | ||
|
|
c9ff62944e | ||
|
|
20ab9179af | ||
|
|
5be35c811a | ||
|
|
ad522d4aab | ||
|
|
9c511069cc | ||
|
|
ed270fcab4 | ||
|
|
0615929727 | ||
|
|
48c25d9ebf | ||
|
|
0bffcacbe7 | ||
|
|
c857252225 | ||
|
|
a0f00761ac | ||
|
|
3a3e5d6954 | ||
|
|
2d6e508084 | ||
|
|
6d850a0dc5 | ||
|
|
6a517e165e | ||
|
|
aaaf82f308 | ||
|
|
e80087df93 | ||
|
|
b7491b957b | ||
|
|
5cf8f3eefc | ||
|
|
1cbccf40a5 | ||
|
|
bcdd702e19 | ||
|
|
20693afe82 | ||
|
|
1bbcfd8dd6 | ||
|
|
8d948f918f | ||
|
|
4d8d1b8aff | ||
|
|
d80318301d | ||
|
|
31cce09fbc | ||
|
|
9a90c9d6c8 | ||
|
|
b9e1e8577f | ||
|
|
5d1dd83b07 | ||
|
|
b203586d6b | ||
|
|
88df61357b | ||
|
|
2edf176294 | ||
|
|
39744146b4 | ||
|
|
118b2dce02 | ||
|
|
4c5eda9f1e | ||
|
|
2512e0c50c | ||
|
|
633d39448e | ||
|
|
4d87ac1032 | ||
|
|
2342d0cd57 | ||
|
|
e6a5266bad | ||
|
|
57f7c44718 | ||
|
|
5789dc839c | ||
|
|
3de6fa7220 | ||
|
|
9a9e8814e6 | ||
|
|
87a4f61d76 | ||
|
|
9975b5d525 | ||
|
|
9d06ce1a8d | ||
|
|
bce107ce3d | ||
|
|
7d7a42d931 | ||
|
|
5183679a89 | ||
|
|
b4fe577203 | ||
|
|
bde51ebddf | ||
|
|
381426d6d5 | ||
|
|
b3ee6d6b75 | ||
|
|
7436d63faa | ||
|
|
6138c6a1a2 | ||
|
|
6115eba3c3 | ||
|
|
1c008d79b1 | ||
|
|
b4bbec6772 | ||
|
|
5c6ee4852a | ||
|
|
8190f952c1 | ||
|
|
3edc3d7a36 | ||
|
|
2f3f1d7e65 | ||
|
|
71c69ec12c | ||
|
|
dab0947150 | ||
|
|
5488e7d805 | ||
|
|
ca9873cfcb | ||
|
|
65f33c3ef0 | ||
|
|
5eccf9ea6c | ||
|
|
db599b3475 | ||
|
|
47140083dc | ||
|
|
2d179879a0 | ||
|
|
61b8e4ce84 | ||
|
|
97a3776d8e | ||
|
|
990695de7b | ||
|
|
4059c699dc | ||
|
|
e22ce15429 | ||
|
|
452d4e63e0 | ||
|
|
d2a46b4ff8 | ||
|
|
e090c9ee26 | ||
|
|
0d6d3f5828 | ||
|
|
b9662dbd86 | ||
|
|
f5a480fdc4 | ||
|
|
5dce75d29b | ||
|
|
5acde6cfe2 | ||
|
|
c6926eb2f9 | ||
|
|
1930ab7ed6 | ||
|
|
3edc979384 | ||
|
|
cde7b2b022 | ||
|
|
0d88532f3d | ||
|
|
1fb14b7463 | ||
|
|
a66d00a535 |
@@ -1,5 +1,4 @@
|
||||
---
|
||||
parseable: true
|
||||
skip_list:
|
||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||
|
||||
@@ -34,6 +33,8 @@ skip_list:
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
|
||||
- 'jinja[spacing]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
|
||||
6
.github/workflows/auto-label-os.yml
vendored
6
.github/workflows/auto-label-os.yml
vendored
@@ -13,16 +13,16 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
||||
uses: stefanbuck/github-issue-parser@10dcc54158ba4c137713d9d69d70a2da63b6bda3
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@e38e6809c5420d038eed380d49ee9a6ca7c92dbf
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@b80ae64e3e156e9c111b075bfa04b295d54e8e2e
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
|
||||
@@ -13,14 +13,14 @@ jobs:
|
||||
outputs:
|
||||
branches: ${{ steps.get-branches.outputs.data }}
|
||||
steps:
|
||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
||||
- uses: octokit/graphql-action@ddde8ebb2493e79f390e6449c725c21663a67505
|
||||
id: get-branches
|
||||
with:
|
||||
query: |
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 1, # TODO increment once we have release branch with the new checksums format
|
||||
first: 3,
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
|
||||
6
.github/workflows/upgrade-patch-versions.yml
vendored
6
.github/workflows/upgrade-patch-versions.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
update-patch-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- uses: actions/setup-python@v6
|
||||
@@ -22,14 +22,14 @@ jobs:
|
||||
- run: update-hashes
|
||||
env:
|
||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
key: pre-commit-hook-propagate
|
||||
path: |
|
||||
~/.cache/pre-commit
|
||||
- run: pre-commit run --all-files propagate-ansible-variables
|
||||
continue-on-error: true
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
||||
- uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
|
||||
with:
|
||||
commit-message: Patch versions updates
|
||||
title: Patch versions updates - ${{ inputs.branch }}
|
||||
|
||||
@@ -24,7 +24,7 @@ variables:
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
ANSIBLE_STDOUT_CALLBACK: "default"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
interruptible: true
|
||||
script:
|
||||
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
||||
-c local -e @"tests/files/${TESTCASE}.yml"
|
||||
-e @"tests/files/${TESTCASE}.yml"
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
@@ -43,6 +43,7 @@ pr:
|
||||
- fedora39-kube-router
|
||||
- openeuler24-calico
|
||||
- rockylinux9-cilium
|
||||
- rockylinux10-cilium
|
||||
- ubuntu22-calico-all-in-one
|
||||
- ubuntu22-calico-all-in-one-upgrade
|
||||
- ubuntu24-calico-etcd-datastore
|
||||
@@ -127,6 +128,7 @@ pr_extended:
|
||||
- debian12-docker
|
||||
- debian13-calico
|
||||
- rockylinux9-calico
|
||||
- rockylinux10-calico
|
||||
- ubuntu22-all-in-one-docker
|
||||
- ubuntu24-all-in-one-docker
|
||||
- ubuntu24-calico-all-in-one
|
||||
|
||||
@@ -37,7 +37,6 @@ terraform_validate:
|
||||
- hetzner
|
||||
- vsphere
|
||||
- upcloud
|
||||
- nifcloud
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
@@ -89,11 +88,10 @@ tf-elastx_cleanup:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
allow_failure: true
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
tf-elastx_ubuntu24-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part1
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
PROVIDER: openstack
|
||||
@@ -116,5 +114,5 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_image: ubuntu-24.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -36,7 +36,7 @@ vagrant:
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||
when: on_success
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
@@ -15,13 +15,13 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.35.1
|
||||
rev: v1.37.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
rev: v0.11.0.1
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: ["--severity=error"]
|
||||
@@ -29,7 +29,7 @@ repos:
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v25.1.1
|
||||
rev: v25.11.0
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
@@ -38,7 +38,7 @@ repos:
|
||||
- distlib
|
||||
|
||||
- repo: https://github.com/golangci/misspell
|
||||
rev: v0.6.0
|
||||
rev: v0.7.0
|
||||
hooks:
|
||||
- id: misspell
|
||||
exclude: "OWNERS_ALIASES$"
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@@ -1,7 +1,7 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
@@ -29,14 +29,14 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.33.5/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.33.5/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.34.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.34.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
26
README.md
26
README.md
@@ -22,7 +22,7 @@ Ensure you have installed Docker then
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -89,13 +89,13 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye, Trixie
|
||||
- **Ubuntu** 22.04, 24.04
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **CentOS Stream / RHEL** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Fedora** 39, 40
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Oracle Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8) (experimental in 10: see [Rocky Linux 10 notes](docs/operating_systems/rhel.md#rocky-linux-10))
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
@@ -111,23 +111,23 @@ Note:
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.33.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.23
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.34.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.26
|
||||
- [docker](https://www.docker.com/) 28.3
|
||||
- [containerd](https://containerd.io/) 2.1.4
|
||||
- [cri-o](http://cri-o.io/) 1.33.5 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) 2.2.1
|
||||
- [cri-o](http://cri-o.io/) 1.34.4 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.3
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.2
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 1.0.3
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.12.0
|
||||
- [coredns](https://github.com/coredns/coredns) 1.12.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.13.3
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.18.4
|
||||
|
||||
@@ -15,7 +15,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
1. Create/Update Issue for upgrading kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
|
||||
9
contrib/collection.sh
Executable file
9
contrib/collection.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash -eux
|
||||
# Install collection from source assuming dependencies are present.
|
||||
# Run in SemaphoreUI this bash script can install Kubespray from the repo
|
||||
NAMESPACE=kubernetes_sigs
|
||||
COLLECTION=kubespray
|
||||
MY_VER=$(grep '^version:' galaxy.yml|cut -d: -f2|sed 's/ //')
|
||||
|
||||
ansible-galaxy collection build --force --output-path .
|
||||
ansible-galaxy collection install --offline --force $NAMESPACE-$COLLECTION-$MY_VER.tar.gz
|
||||
@@ -20,7 +20,6 @@ function create_container_image_tar() {
|
||||
|
||||
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
||||
else
|
||||
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
||||
|
||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
sample-inventory/inventory.ini
|
||||
@@ -1,138 +0,0 @@
|
||||
# Kubernetes on NIFCLOUD with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+----------------------------+
|
||||
+---------------+ | +--------------------+ |
|
||||
| | | | +--------------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Control Plane/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------------+ |
|
||||
| | +--------------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
+----------------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 1.3.7
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Export Variables
|
||||
|
||||
* Your NIFCLOUD credentials:
|
||||
|
||||
```bash
|
||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||
```
|
||||
|
||||
* The SSH KEY used to connect to the instance:
|
||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||
|
||||
```bash
|
||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||
```
|
||||
|
||||
* The IP address to connect to bastion server:
|
||||
|
||||
```bash
|
||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||
```
|
||||
|
||||
### Create The Infrastructure
|
||||
|
||||
* Run terraform:
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||
```
|
||||
|
||||
### Setup The Kubernetes
|
||||
|
||||
* Generate cluster configuration file:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
```
|
||||
|
||||
* Export Variables:
|
||||
|
||||
```bash
|
||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||
```
|
||||
|
||||
* Set ssh-agent"
|
||||
|
||||
```bash
|
||||
eval `ssh-agent`
|
||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||
```
|
||||
|
||||
* Run cluster.yml playbook:
|
||||
|
||||
```bash
|
||||
cd ./../../../
|
||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
### Connecting to Kubernetes
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||
* Fetching kubeconfig file:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.kube
|
||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
```
|
||||
|
||||
* Rewrite /etc/hosts
|
||||
|
||||
```bash
|
||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Run kubectl
|
||||
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `region`: Region where to run the cluster
|
||||
* `az`: Availability zone where to run the cluster
|
||||
* `private_ip_bn`: Private ip address of bastion server
|
||||
* `private_network_cidr`: Subnet of private network
|
||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||
* `instance_type_bn`: The instance type of bastion server
|
||||
* `instance_type_wk`: The instance type of worker node
|
||||
* `instance_type_cp`: The instance type of control plane
|
||||
* `image_name`: OS image used for the instance
|
||||
* `working_instance_ip`: The IP address to connect to bastion server
|
||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
TF_OUT=$(terraform output -json)
|
||||
|
||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo "[all]"
|
||||
# Generate control plane hosts
|
||||
i=1
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo ""
|
||||
echo "[all:vars]"
|
||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
@@ -1,36 +0,0 @@
|
||||
provider "nifcloud" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
module "kubernetes_cluster" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
availability_zone = var.az
|
||||
prefix = "dev"
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
|
||||
instance_key_name = var.instance_key_name
|
||||
instances_cp = var.instances_cp
|
||||
instances_wk = var.instances_wk
|
||||
image_name = var.image_name
|
||||
|
||||
instance_type_bn = var.instance_type_bn
|
||||
instance_type_cp = var.instance_type_cp
|
||||
instance_type_wk = var.instance_type_wk
|
||||
|
||||
private_ip_bn = var.private_ip_bn
|
||||
|
||||
additional_lb_filter = [var.working_instance_ip]
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
module.kubernetes_cluster.security_group_name.bastion
|
||||
]
|
||||
type = "IN"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_ip = var.working_instance_ip
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
locals {
|
||||
# e.g. east-11 is 11
|
||||
az_num = reverse(split("-", var.availability_zone))[0]
|
||||
# e.g. east-11 is e11
|
||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||
|
||||
# Port used by the protocol
|
||||
port_ssh = 22
|
||||
port_kubectl = 6443
|
||||
port_kubelet = 10250
|
||||
|
||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||
port_bgp = 179
|
||||
port_vxlan = 4789
|
||||
port_etcd = 2379
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
# data
|
||||
data "nifcloud_image" "this" {
|
||||
image_name = var.image_name
|
||||
}
|
||||
|
||||
# private lan
|
||||
resource "nifcloud_private_lan" "this" {
|
||||
private_lan_name = "${var.prefix}lan"
|
||||
availability_zone = var.availability_zone
|
||||
cidr_block = var.private_network_cidr
|
||||
accounting_type = var.accounting_type
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Bastion
|
||||
##
|
||||
resource "nifcloud_security_group" "bn" {
|
||||
group_name = "${var.prefix}bn"
|
||||
description = "${var.prefix} bastion"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "bn" {
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||
security_group = nifcloud_security_group.bn.group_name
|
||||
instance_type = var.instance_type_bn
|
||||
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = var.private_ip_bn
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Control Plane
|
||||
##
|
||||
resource "nifcloud_security_group" "cp" {
|
||||
group_name = "${var.prefix}cp"
|
||||
description = "${var.prefix} control plane"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "cp" {
|
||||
for_each = var.instances_cp
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.cp.group_name
|
||||
instance_type = var.instance_type_cp
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nifcloud_load_balancer" "this" {
|
||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||
accounting_type = var.accounting_type
|
||||
balancing_type = 1 // Round-Robin
|
||||
load_balancer_port = local.port_kubectl
|
||||
instance_port = local.port_kubectl
|
||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||
filter = concat(
|
||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||
var.additional_lb_filter,
|
||||
)
|
||||
filter_type = 1 // Allow
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
resource "nifcloud_security_group" "wk" {
|
||||
group_name = "${var.prefix}wk"
|
||||
description = "${var.prefix} worker"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "wk" {
|
||||
for_each = var.instances_wk
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.wk.group_name
|
||||
instance_type = var.instance_type_wk
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: Kubernetes
|
||||
##
|
||||
|
||||
# ssh
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_ssh
|
||||
to_port = local.port_ssh
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||
}
|
||||
|
||||
# kubectl
|
||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubectl
|
||||
to_port = local.port_kubectl
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# kubelet
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: calico
|
||||
##
|
||||
|
||||
# vslan
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# bgp
|
||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_etcd
|
||||
to_port = local.port_etcd
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
output "control_plane_lb" {
|
||||
description = "The DNS name of LB for control plane"
|
||||
value = nifcloud_load_balancer.this.dns_name
|
||||
}
|
||||
|
||||
output "security_group_name" {
|
||||
description = "The security group used in the cluster"
|
||||
value = {
|
||||
bastion = nifcloud_security_group.bn.group_name,
|
||||
control_plane = nifcloud_security_group.cp.group_name,
|
||||
worker = nifcloud_security_group.wk.group_name,
|
||||
}
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
description = "The private network used in the cluster"
|
||||
value = nifcloud_private_lan.this.id
|
||||
}
|
||||
|
||||
output "bastion_info" {
|
||||
description = "The basion information in cluster"
|
||||
value = { (nifcloud_instance.bn.instance_id) : {
|
||||
instance_id = nifcloud_instance.bn.instance_id,
|
||||
unique_id = nifcloud_instance.bn.unique_id,
|
||||
private_ip = nifcloud_instance.bn.private_ip,
|
||||
public_ip = nifcloud_instance.bn.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "worker_info" {
|
||||
description = "The worker information in cluster"
|
||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "control_plane_info" {
|
||||
description = "The control plane information in cluster"
|
||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################
|
||||
##
|
||||
## IP Address
|
||||
##
|
||||
configure_private_ip_address () {
|
||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: yes
|
||||
dhcp6: yes
|
||||
dhcp-identifier: mac
|
||||
ens224:
|
||||
dhcp4: no
|
||||
dhcp6: no
|
||||
addresses: [${private_ip_address}]
|
||||
EOS
|
||||
netplan apply
|
||||
}
|
||||
configure_private_ip_address
|
||||
|
||||
#################################################
|
||||
##
|
||||
## SSH
|
||||
##
|
||||
configure_ssh_port () {
|
||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||
}
|
||||
configure_ssh_port
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Hostname
|
||||
##
|
||||
hostnamectl set-hostname ${hostname}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||
##
|
||||
systemctl mask "dev-sda3.swap"
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = ">= 1.8.0, < 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
variable "availability_zone" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "The prefix for the entire cluster"
|
||||
type = string
|
||||
validation {
|
||||
condition = length(var.prefix) <= 5
|
||||
error_message = "Must be a less than 5 character long."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "additional_lb_filter" {
|
||||
description = "Additional LB filter"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "1"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "kubernetes_cluster" {
|
||||
value = module.kubernetes_cluster
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
region = "jp-west-1"
|
||||
az = "west-11"
|
||||
|
||||
instance_key_name = "deployerkey"
|
||||
|
||||
instance_type_bn = "e-medium"
|
||||
instance_type_cp = "e-medium"
|
||||
instance_type_wk = "e-medium"
|
||||
|
||||
private_network_cidr = "192.168.30.0/24"
|
||||
instances_cp = {
|
||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||
}
|
||||
instances_wk = {
|
||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||
}
|
||||
private_ip_bn = "192.168.30.10/24"
|
||||
|
||||
image_name = "Ubuntu Server 22.04 LTS"
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = "1.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
variable "region" {
|
||||
description = "The region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "az" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "working_instance_ip" {
|
||||
description = "The IP address to connect to bastion server."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "2"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -281,9 +281,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}, { "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default |
|
||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, `[{ "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
|
||||
@@ -271,7 +271,14 @@ variable "master_allowed_ports" {
|
||||
variable "master_allowed_ports_ipv6" {
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
default = [
|
||||
{
|
||||
"protocol" = "ipv6-icmp"
|
||||
"port_range_min" = 0
|
||||
"port_range_max" = 0
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
@@ -297,6 +304,12 @@ variable "worker_allowed_ports_ipv6" {
|
||||
"port_range_max" = 32767
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
{
|
||||
"protocol" = "ipv6-icmp"
|
||||
"port_range_min" = 0
|
||||
"port_range_max" = 0
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>5.29.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>5.29.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Cilium
|
||||
|
||||
## Unprivileged agent configuration
|
||||
|
||||
By default, Cilium is installed with `securityContext.privileged: false`. You need to set the `kube_owner` variable to `root` in the inventory:
|
||||
|
||||
```yml
|
||||
kube_owner: root
|
||||
```
|
||||
|
||||
## IP Address Management (IPAM)
|
||||
|
||||
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
||||
@@ -237,7 +245,7 @@ cilium_operator_extra_volume_mounts:
|
||||
## Choose Cilium version
|
||||
|
||||
```yml
|
||||
cilium_version: "1.18.2"
|
||||
cilium_version: "1.18.6"
|
||||
```
|
||||
|
||||
## Add variable to config
|
||||
|
||||
@@ -32,7 +32,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
||||
|
||||
* Disable nodelocaldns
|
||||
|
||||
The nodelocal dns IP is not reacheable.
|
||||
The nodelocal dns IP is not reachable.
|
||||
|
||||
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
||||
|
||||
|
||||
@@ -65,9 +65,8 @@ In kubespray, the default runtime name is "runc", and it can be configured with
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
options:
|
||||
Root: ""
|
||||
SystemdCgroup: "false"
|
||||
BinaryName: /usr/local/bin/my-runc
|
||||
base_runtime_spec: cri-base.json
|
||||
|
||||
@@ -80,7 +80,7 @@ The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to
|
||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||
|
||||
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
||||
Defaults capabilties are:
|
||||
Defaults capabilities are:
|
||||
|
||||
```yaml
|
||||
crio_default_capabilities:
|
||||
|
||||
1
docs/_sidebar.md
generated
1
docs/_sidebar.md
generated
@@ -6,7 +6,6 @@
|
||||
* [Downloads](/docs/advanced/downloads.md)
|
||||
* [Gcp-lb](/docs/advanced/gcp-lb.md)
|
||||
* [Kubernetes-reliability](/docs/advanced/kubernetes-reliability.md)
|
||||
* [Mitogen](/docs/advanced/mitogen.md)
|
||||
* [Netcheck](/docs/advanced/netcheck.md)
|
||||
* [Ntp](/docs/advanced/ntp.md)
|
||||
* [Proxy](/docs/advanced/proxy.md)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key)
|
||||
- [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit)
|
||||
- [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file)
|
||||
- [Create Certficate Signing Request (CSR) Configuration File](#create-certficate-signing-request-csr-configuration-file)
|
||||
- [Create Certificate Signing Request (CSR) Configuration File](#create-certificate-signing-request-csr-configuration-file)
|
||||
- [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key)
|
||||
|
||||
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
||||
@@ -134,7 +134,7 @@ $ cat > ca-config.json <<EOF
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Create Certficate Signing Request (CSR) Configuration File
|
||||
#### Create Certificate Signing Request (CSR) Configuration File
|
||||
|
||||
The TLS certificate `names` details can be updated to your own specific requirements.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# GCP Load Balancers for type=LoadBalacer of Kubernetes Services
|
||||
# GCP Load Balancers for type=LoadBalancer of Kubernetes Services
|
||||
|
||||
> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider)
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Mitogen
|
||||
|
||||
*Warning:* Mitogen support is now deprecated in kubespray due to upstream not releasing an updated version to support ansible 4.x (ansible-base 2.11.x) and above. The CI support has been stripped for mitogen and we are no longer validating any support or regressions for it. The supporting mitogen install playbook and integration documentation will be removed in a later version.
|
||||
|
||||
[Mitogen for Ansible](https://mitogen.networkgenomics.com/ansible_detailed.html) allow a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, depending on network conditions, modules executed, and time already spent by targets on useful work. Mitogen cannot improve a module once it is executing, it can only ensure the module executes as quickly as possible.
|
||||
|
||||
## Install
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook contrib/mitogen/mitogen.yml
|
||||
```
|
||||
|
||||
The above playbook sets the ansible `strategy` and `strategy_plugins` in `ansible.cfg` but you can also enable them if you use your own `ansible.cfg` by setting the environment varialbles:
|
||||
|
||||
```ShellSession
|
||||
export ANSIBLE_STRATEGY=mitogen_linear
|
||||
export ANSIBLE_STRATEGY_PLUGINS=plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
```
|
||||
|
||||
... or `ansible.cfg` setup:
|
||||
|
||||
```ini
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
strategy=mitogen_linear
|
||||
```
|
||||
|
||||
## Limitation
|
||||
|
||||
If you are experiencing problems, please see the [documentation](https://mitogen.networkgenomics.com/ansible_detailed.html#noteworthy-differences).
|
||||
@@ -42,13 +42,10 @@ Kubespray expects users to use one of the following variables sources for settin
|
||||
|----------------------------------------|------------------------------------------------------------------------------|
|
||||
| inventory vars | |
|
||||
| - **inventory group_vars** | most used |
|
||||
| - inventory host_vars | host specifc vars overrides, group_vars is usually more practical |
|
||||
| - inventory host_vars | host specific vars overrides, group_vars is usually more practical |
|
||||
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
||||
|
||||
[!IMPORTANT]
|
||||
Extra vars are best used to override kubespray internal variables, for instances, roles/vars/.
|
||||
Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray
|
||||
interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||
> Extra vars are best used to override kubespray internal variables, for instances, roles/vars/. Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||
|
||||
## Ansible tags
|
||||
|
||||
@@ -122,7 +119,7 @@ The following tags are defined in playbooks:
|
||||
| metrics_server | Configuring metrics_server |
|
||||
| netchecker | Installing netchecker K8s app |
|
||||
| network | Configuring networking plugins for K8s |
|
||||
| mounts | Umount kubelet dirs when reseting |
|
||||
| mounts | Umount kubelet dirs when resetting |
|
||||
| multus | Network plugin multus |
|
||||
| nginx | Configuring LB for kube-apiserver instances |
|
||||
| node | Configuring K8s minion (compute) node role |
|
||||
@@ -181,17 +178,13 @@ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
||||
|
||||
Note: use `--tags` and `--skip-tags` wisely and only if you're 100% sure what you're doing.
|
||||
|
||||
## Mitogen
|
||||
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/advanced/mitogen.md) for usage and reasons for deprecation.
|
||||
|
||||
## Troubleshooting Ansible issues
|
||||
|
||||
Having the wrong version of ansible, ansible collections or python dependencies can cause issue.
|
||||
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRAY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
||||
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRARY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
||||
|
||||
```ShellSession
|
||||
export ANSIBLE_LIBRAY=<kubespray_dir>/library`
|
||||
export ANSIBLE_LIBRARY=<kubespray_dir>/library`
|
||||
```
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
@@ -200,11 +193,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.29.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.29.0
|
||||
git checkout v2.30.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.30.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@ See [.gitlab-ci.yml](/.gitlab-ci.yml) and the included files for an overview.
|
||||
|
||||
## Runners
|
||||
|
||||
Kubespray has 2 types of GitLab runners, both deployed on the Kubespray CI cluster (hosted on Oracle Cloud Infrastucture):
|
||||
Kubespray has 2 types of GitLab runners, both deployed on the Kubespray CI cluster (hosted on Oracle Cloud Infrastructure):
|
||||
|
||||
- pods: use the [gitlab-ci kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes/)
|
||||
- vagrant: custom executor running in pods with access to the libvirt socket on the nodes
|
||||
@@ -156,7 +156,7 @@ kube_feature_gates:
|
||||
- "NodeSwap=True"
|
||||
```
|
||||
|
||||
## Aditional files
|
||||
## Additional files
|
||||
|
||||
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
|
||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
||||
|
||||
@@ -33,8 +33,8 @@ fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -51,7 +51,7 @@ fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -21,6 +21,12 @@ metallb_enabled: true
|
||||
metallb_speaker_enabled: true
|
||||
```
|
||||
|
||||
By default, MetalLB resources are deployed into the `metallb-system` namespace. You can override this namespace using a variable.
|
||||
|
||||
```yaml
|
||||
metallb_namespace: woodenlb-system
|
||||
```
|
||||
|
||||
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
||||
|
||||
```yaml
|
||||
@@ -35,7 +41,7 @@ metallb_config:
|
||||
effect: "NoSchedule"
|
||||
```
|
||||
|
||||
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fasion:
|
||||
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fashion:
|
||||
|
||||
```yaml
|
||||
metallb_config:
|
||||
|
||||
@@ -37,4 +37,12 @@ If you have containers that are using iptables in the host network namespace (`h
|
||||
you need to ensure they are using iptables-nft.
|
||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||
|
||||
The kernel version is lower than the kubenretes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
||||
The kernel version is lower than the kubernetes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
||||
|
||||
## Rocky Linux 10
|
||||
|
||||
(Experimental in Kubespray CI)
|
||||
|
||||
The official Rocky Linux 10 cloud image does not include `kernel-module-extra`. Both Kube Proxy and CNI rely on this package, and since it relates to kernel version compatibility (which may require VM reboots, etc.), we haven't found an ideal solution.
|
||||
|
||||
However, some users report that it doesn't affect them (minimal version). Therefore, the Kubespray CI Rocky Linux 10 image is built by Kubespray maintainers using `diskimage-builder`. For detailed methods, please refer to [the comments](https://github.com/kubernetes-sigs/kubespray/pull/12355#issuecomment-3705400093).
|
||||
|
||||
@@ -11,7 +11,7 @@ kubeadm_ignore_preflight_errors:
|
||||
|
||||
The Kernel Version Matrixs:
|
||||
|
||||
| OS Verion | Kernel Verion | Kernel >=4.19 |
|
||||
| OS Version | Kernel Version | Kernel >=4.19 |
|
||||
|--- | --- | --- |
|
||||
| RHEL 9 | 5.14 | :white_check_mark: |
|
||||
| RHEL 8 | 4.18 | :x: |
|
||||
|
||||
@@ -31,6 +31,8 @@ That's it.
|
||||
|
||||
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
||||
|
||||
**Note:** When adding new control plane nodes, always append them to the end of the `kube_control_plane` group in your inventory. Adding control plane nodes in the first position is not supported and will cause the playbook to fail.
|
||||
|
||||
### 2) Restart kube-system/nginx-proxy
|
||||
|
||||
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
namespace: kubernetes_sigs
|
||||
description: Deploy a production ready Kubernetes cluster
|
||||
name: kubespray
|
||||
version: 2.29.0
|
||||
version: 2.31.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
loadSidebar: 'docs/_sidebar.md',
|
||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||
auto2top: true,
|
||||
noCompileLinks: ['.*\.ini'],
|
||||
logo: '/logo/logo-clear.png'
|
||||
}
|
||||
</script>
|
||||
|
||||
@@ -11,15 +11,15 @@
|
||||
# containerd_runc_runtime:
|
||||
# name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
# containerd_additional_runtimes:
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
# containerd_grpc_max_recv_message_size: 16777216
|
||||
# containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
@@ -22,7 +22,8 @@ local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# This is the user that owns tha cluster installation.
|
||||
# This is the user that owns the cluster installation.
|
||||
# Note: cilium needs to set kube_owner to root https://kubespray.io/#/docs/CNI/cilium?id=unprivileged-agent-configuration
|
||||
kube_owner: kube
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
|
||||
@@ -56,8 +56,8 @@ cilium_l2announcements: false
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
# cilium_monitor_aggregation_flags: "all"
|
||||
# Kube Proxy Replacement mode (strict/partial)
|
||||
# cilium_kube_proxy_replacement: partial
|
||||
# Kube Proxy Replacement mode (true/false)
|
||||
# cilium_kube_proxy_replacement: false
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
@@ -44,11 +44,10 @@ ADD ./requirements.txt /kubespray/requirements.txt
|
||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||
|
||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v1.33.5/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.33.5/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& pip install --break-system-packages --ignore-installed --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.34.3/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
# Install Vagrant
|
||||
&& curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
@@ -56,5 +55,5 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
&& vagrant plugin install vagrant-libvirt \
|
||||
# Install Kubernetes collections
|
||||
&& pip install --no-compile --no-cache-dir kubernetes \
|
||||
&& pip install --break-system-packages --no-compile --no-cache-dir kubernetes \
|
||||
&& ansible-galaxy collection install kubernetes.core
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
|
||||
- { role: kubernetes/control-plane, tags: control-plane, upgrade_cluster_setup: true }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: kubernetes/node-taint, tags: node-taint }
|
||||
@@ -100,7 +100,7 @@
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray_defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["control-plane", "win_nodes"] }
|
||||
|
||||
- name: Install Calico Route Reflector
|
||||
hosts: calico_rr
|
||||
|
||||
3
remove_node.yml
Normal file
3
remove_node.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: Remove node
|
||||
ansible.builtin.import_playbook: playbooks/remove_node.yml
|
||||
@@ -1,7 +1,7 @@
|
||||
ansible==10.7.0
|
||||
# Needed for community.crypto module
|
||||
cryptography==46.0.2
|
||||
cryptography==46.0.3
|
||||
# Needed for jinja2 json_query templating
|
||||
jmespath==1.0.1
|
||||
jmespath==1.1.0
|
||||
# Needed for ansible.utils.ipaddr
|
||||
netaddr==1.3.0
|
||||
|
||||
@@ -9,6 +9,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -9,6 +9,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -37,8 +37,3 @@ override_system_hostname: true
|
||||
is_fedora_coreos: false
|
||||
|
||||
skip_http_proxy_on_os_packages: false
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -21,6 +21,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -13,10 +13,9 @@ containerd_snapshotter: "overlayfs"
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
base_runtime_spec: cri-base.json
|
||||
options:
|
||||
Root: ""
|
||||
SystemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||
BinaryName: "{{ bin_dir }}/runc"
|
||||
|
||||
@@ -24,8 +23,8 @@ containerd_additional_runtimes: []
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
containerd_base_runtime_spec_rlimit_nofile: 65535
|
||||
|
||||
@@ -36,8 +35,8 @@ containerd_default_base_runtime_spec_patch:
|
||||
hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
|
||||
# Can help reduce disk usage
|
||||
# https://github.com/containerd/containerd/discussions/6295
|
||||
# Only for containerd < 2.1; discard unpacked layers to save disk space
|
||||
# https://github.com/containerd/containerd/blob/release/2.1/docs/cri/config.md#image-pull-configuration-since-containerd-v21
|
||||
containerd_discard_unpacked_layers: true
|
||||
|
||||
containerd_base_runtime_specs:
|
||||
|
||||
@@ -34,8 +34,6 @@
|
||||
with_items:
|
||||
- "{{ containerd_systemd_dir }}"
|
||||
- "{{ containerd_cfg_dir }}"
|
||||
- "{{ containerd_storage_dir }}"
|
||||
- "{{ containerd_state_dir }}"
|
||||
|
||||
- name: Containerd | Write containerd proxy drop-in
|
||||
template:
|
||||
|
||||
@@ -52,8 +52,6 @@ oom_score = {{ containerd_oom_score }}
|
||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
{% if runtime.base_runtime_spec is defined %}
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
{% endif %}
|
||||
@@ -78,7 +76,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
|
||||
[plugins."io.containerd.cri.v1.images"]
|
||||
snapshotter = "{{ containerd_snapshotter }}"
|
||||
{% if containerd_discard_unpacked_layers and containerd_version is version('2.1.0', '<') %}
|
||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||
{% endif %}
|
||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||
[plugins."io.containerd.cri.v1.images".pinned_images]
|
||||
sandbox = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
|
||||
@@ -25,6 +25,8 @@ provisioner:
|
||||
group_vars:
|
||||
all:
|
||||
become: true
|
||||
k8s_cluster:
|
||||
container_manager: docker
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -32,6 +32,8 @@ crio_registry_auth: []
|
||||
crio_seccomp_profile: ""
|
||||
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
|
||||
crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
|
||||
# Set the pull progress timeout
|
||||
crio_pull_progress_timeout: "10s"
|
||||
|
||||
# Override system default for storage driver
|
||||
# crio_storage_driver: "overlay"
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
container_manager: crio
|
||||
roles:
|
||||
- role: kubespray_defaults
|
||||
- role: container-engine/cri-o
|
||||
|
||||
@@ -41,6 +41,10 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
container_manager: crio
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
- name: Test CRI-O cri
|
||||
import_playbook: ../../../molecule/test_cri.yml
|
||||
vars:
|
||||
container_manager: crio
|
||||
cri_socket: unix:///var/run/crio/crio.sock
|
||||
cri_name: cri-o
|
||||
- name: Test running a container with crun
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
{% if crio_registry_auth is defined and crio_registry_auth|length %}
|
||||
{
|
||||
{% for reg in crio_registry_auth %}
|
||||
"auths": {
|
||||
{% for reg in crio_registry_auth %}
|
||||
"{{ reg.registry }}": {
|
||||
"auth": "{{ (reg.username + ':' + reg.password) | string | b64encode }}"
|
||||
}
|
||||
{% if not loop.last %}
|
||||
},
|
||||
},
|
||||
{% else %}
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
}
|
||||
{% else %}
|
||||
{}
|
||||
|
||||
@@ -348,6 +348,12 @@ signature_policy = "{{ crio_signature_policy }}"
|
||||
# ignore; the latter will ignore volumes entirely.
|
||||
image_volumes = "mkdir"
|
||||
|
||||
# The timeout for an image pull to make progress until the pull operation gets
|
||||
# canceled. This value will be also used for calculating the pull progress interval
|
||||
# to pull_progress_timeout / 10. Can be set to 0 to disable the timeout as well as
|
||||
# the progress output.
|
||||
pull_progress_timeout = "{{ crio_pull_progress_timeout }}"
|
||||
|
||||
# The crio.network table containers settings pertaining to the management of
|
||||
# CNI plugins.
|
||||
[crio.network]
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Crictl | Download crictl
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
template:
|
||||
src: crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/crictl"
|
||||
dest: "{{ bin_dir }}/crictl"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
notify:
|
||||
- Get crictl completion
|
||||
- Install crictl completion
|
||||
@@ -1,3 +1,22 @@
|
||||
---
|
||||
- name: Install crictl
|
||||
include_tasks: crictl.yml
|
||||
- name: Crictl | Download crictl
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
template:
|
||||
src: crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/crictl"
|
||||
dest: "{{ bin_dir }}/crictl"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
notify:
|
||||
- Get crictl completion
|
||||
- Install crictl completion
|
||||
|
||||
@@ -21,6 +21,11 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
gvisor_enabled: true
|
||||
container_manager: containerd
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -12,11 +12,20 @@
|
||||
is_ostree: "{{ ostree.stat.exists }}"
|
||||
|
||||
- name: Runc | Uninstall runc package managed by package manager
|
||||
package:
|
||||
name: "{{ runc_package_name }}"
|
||||
state: absent
|
||||
when:
|
||||
- not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar"))
|
||||
- not is_ostree
|
||||
- ansible_distribution != "Flatcar Container Linux by Kinvolk"
|
||||
- ansible_distribution != "Flatcar"
|
||||
block:
|
||||
- name: Runc | Remove package
|
||||
package:
|
||||
name: "{{ runc_package_name }}"
|
||||
state: absent
|
||||
- name: Runc | Remove orphaned binary
|
||||
file:
|
||||
path: /usr/bin/runc
|
||||
state: absent
|
||||
when: runc_bin_dir != "/usr/bin"
|
||||
|
||||
- name: Runc | Download runc binary
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
@@ -29,10 +38,3 @@
|
||||
dest: "{{ runc_bin_dir }}/runc"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
|
||||
- name: Runc | Remove orphaned binary
|
||||
file:
|
||||
path: /usr/bin/runc
|
||||
state: absent
|
||||
when: runc_bin_dir != "/usr/bin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
@@ -21,6 +21,11 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
youki_enabled: true
|
||||
container_manager: crio
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
|
||||
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
|
||||
file:
|
||||
@@ -145,15 +144,6 @@
|
||||
- ('k8s_cluster' in group_names) and
|
||||
sync_certs | default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
file:
|
||||
path: "{{ etcd_cert_dir }}"
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
|
||||
# This is a hack around the fact kubeadm expect the same certs path on all kube_control_plane
|
||||
# TODO: fix certs generation to have the same file everywhere
|
||||
# OR work with kubeadm on node-specific config
|
||||
|
||||
@@ -32,23 +32,16 @@ DNS.{{ counter["dns"] }} = {{ hostvars[host]['etcd_access_address'] }}{{ increme
|
||||
{# This will always expand to inventory_hostname, which can be a completely arbitrary name, that etcd will not know or care about, hence this line is (probably) redundant. #}
|
||||
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% if apiserver_loadbalancer_domain_name is defined %}
|
||||
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
|
||||
{% endif %}
|
||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||
DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% for host in groups['etcd'] %}
|
||||
{% if hostvars[host]['access_ip'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if hostvars[host]['access_ip6'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip6'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if ipv6_stack %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip6'] | default(hostvars[host]['fallback_ip6']) }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['main_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% for address in hostvars[host]['main_access_ips'] %}
|
||||
IP.{{ counter["ip"] }} = {{ address }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% for address in hostvars[host]['main_ips'] %}
|
||||
IP.{{ counter["ip"] }} = {{ address }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% for cert_alt_ip in etcd_cert_alt_ips %}
|
||||
IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }}
|
||||
|
||||
@@ -18,7 +18,6 @@ etcd_backup_retention_count: -1
|
||||
force_etcd_cert_refresh: true
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
||||
etcd_cert_dir_mode: "0700"
|
||||
etcd_cert_group: root
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
@@ -117,11 +116,6 @@ etcd_retries: 4
|
||||
# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer
|
||||
etcd_experimental_initial_corrupt_check: true
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
# Enable distributed tracing
|
||||
# https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing
|
||||
etcd_experimental_enable_distributed_tracing: false
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
gateway_api_enabled: false
|
||||
gateway_api_version: 1.2.1
|
||||
|
||||
# `gateway_api_channel` default is "standard".
|
||||
# "standard" release channel includes all resources that have graduated to GA or beta, including GatewayClass, Gateway, HTTPRoute, and ReferenceGrant.
|
||||
|
||||
@@ -27,11 +27,6 @@ vsphere_csi_aggressive_node_not_ready_timeout: 300
|
||||
|
||||
vsphere_csi_node_affinity: {}
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi-
|
||||
# according to the above link , we can controler the block-volume-snapshot parameter
|
||||
vsphere_csi_block_volume_snapshot: false
|
||||
|
||||
@@ -8,3 +8,4 @@ local_path_provisioner_is_default_storageclass: "true"
|
||||
local_path_provisioner_debug: false
|
||||
local_path_provisioner_helper_image_repo: "busybox"
|
||||
local_path_provisioner_helper_image_tag: "latest"
|
||||
local_path_provisioner_resources: {}
|
||||
|
||||
@@ -35,6 +35,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{% if local_path_provisioner_resources %}
|
||||
resources:
|
||||
{{ local_path_provisioner_resources | to_nice_yaml | indent(10) | trim }}
|
||||
{% endif %}
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: kubernetes-apps/utils
|
||||
|
||||
- role: kubernetes-apps/ansible
|
||||
when:
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
metallb_enabled: false
|
||||
metallb_log_level: info
|
||||
metallb_namespace: "metallb-system"
|
||||
metallb_port: "7472"
|
||||
metallb_memberlist_port: "7946"
|
||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
|
||||
@@ -26,6 +26,16 @@ rules:
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Services are monitored for service LoadBalancer IP allocation
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
- services/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
{% elif calico_datastore == "kdd" %}
|
||||
# Nodes are watched to monitor for deletions.
|
||||
- apiGroups: [""]
|
||||
@@ -79,6 +89,7 @@ rules:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
# Needs access to update clusterinformations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
|
||||
12
roles/kubernetes-apps/utils/vars/main.yml
Normal file
12
roles/kubernetes-apps/utils/vars/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
_kubectl_apply_stdin:
|
||||
- "{{ kubectl }}"
|
||||
- apply
|
||||
- -f
|
||||
- "-"
|
||||
- -n
|
||||
- "{{ k8s_namespace }}"
|
||||
- --server-side="{{ server_side_apply | lower }}"
|
||||
# TODO: switch to default SSA
|
||||
server_side_apply: false
|
||||
kubectl_apply_stdin: "{{ _kubectl_apply_stdin | join(' ') }}"
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
kubectl_apply_stdin: "{{ kubectl }} apply -f - -n {{ k8s_namespace }}"
|
||||
@@ -2,6 +2,9 @@
|
||||
# disable upgrade cluster
|
||||
upgrade_cluster_setup: false
|
||||
|
||||
# Number of retries (with 5 seconds interval) to check that new control plane nodes
|
||||
# are in Ready condition after joining
|
||||
control_plane_node_become_ready_tries: 24
|
||||
# By default the external API listens on all interfaces, this can be changed to
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
@@ -240,6 +243,10 @@ auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
|
||||
# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false
|
||||
kubeadm_upgrade_auto_cert_renewal: true
|
||||
|
||||
# Add Subject Alternative Names to the Kubernetes apiserver certificates.
|
||||
# Useful if you access the API from multiples load balancers, for instance.
|
||||
supplementary_addresses_in_ssl_keys: []
|
||||
|
||||
# Bash alias of kubectl to interact with Kubernetes cluster much easier
|
||||
# kubectl_alias: k
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz"
|
||||
url: "https://{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: ('kube_control_plane' in group_names)
|
||||
register: _result
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Check which kube-control nodes are already members of the cluster
|
||||
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
|
||||
register: kube_control_planes_raw
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Set fact joined_control_planes
|
||||
set_fact:
|
||||
joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}"
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['kube_control_plane'] }}"
|
||||
when: kube_control_planes_raw is succeeded
|
||||
run_once: true
|
||||
|
||||
- name: Set fact first_kube_control_plane
|
||||
set_fact:
|
||||
first_kube_control_plane: "{{ joined_control_planes | default([]) | first | default(groups['kube_control_plane'] | first) }}"
|
||||
@@ -11,24 +11,23 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
- name: Obtain kubeadm certificate key for joining control planes nodes
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ hostvars[first_kube_control_plane]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
|
||||
run_once: true
|
||||
when:
|
||||
- hostvars[first_kube_control_plane]['kubeadm_upload_cert'] is defined
|
||||
- hostvars[first_kube_control_plane]['kubeadm_upload_cert'] is not skipped
|
||||
block:
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
@@ -99,3 +98,18 @@
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for new control plane nodes to be Ready
|
||||
when: kubeadm_already_run.stat.exists
|
||||
run_once: true
|
||||
command: >
|
||||
{{ kubectl }} get nodes --selector node-role.kubernetes.io/control-plane
|
||||
-o jsonpath-as-json="{.items[*].status.conditions[?(@.type == 'Ready')]}"
|
||||
register: control_plane_node_ready_conditions
|
||||
retries: "{{ control_plane_node_become_ready_tries }}"
|
||||
delay: 5
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
until: >
|
||||
control_plane_node_ready_conditions.stdout
|
||||
| from_json | selectattr('status', '==', 'True')
|
||||
| length == (groups['kube_control_plane'] | length)
|
||||
|
||||
@@ -25,9 +25,9 @@
|
||||
|
||||
- name: Kubeadm | aggregate all SANs
|
||||
set_fact:
|
||||
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_ipv4_address + sans_ipv6_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
||||
apiserver_sans: "{{ _apiserver_sans | flatten | select | unique }}"
|
||||
vars:
|
||||
sans_base:
|
||||
_apiserver_sans:
|
||||
- "kubernetes"
|
||||
- "kubernetes.default"
|
||||
- "kubernetes.default.svc"
|
||||
@@ -36,17 +36,17 @@
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
- "::1"
|
||||
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
|
||||
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
|
||||
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') | list | select('defined') | list }}"
|
||||
sans_ipv4_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_ipv6_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'address']) | list | select('defined') | list }}"
|
||||
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
|
||||
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
|
||||
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
|
||||
sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}"
|
||||
- "{{ apiserver_loadbalancer_domain_name }}"
|
||||
- "{{ loadbalancer_apiserver.address | d('') }}"
|
||||
- "{{ supplementary_addresses_in_ssl_keys }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | select('defined') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'address']) | select('defined') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ansible_hostname') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ansible_fqdn') }}"
|
||||
- "{{ kube_override_hostname }}"
|
||||
- "{{ kube_vip_address }}"
|
||||
tags: facts
|
||||
|
||||
- name: Create audit-policy directory
|
||||
@@ -90,7 +90,7 @@
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
- name: Set kubeadm_config_api_fqdn define
|
||||
set_fact:
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
|
||||
- name: Kubeadm | Create kubeadm config
|
||||
@@ -179,9 +179,10 @@
|
||||
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
||||
{{ bin_dir }}/kubeadm init
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors | join(',') }}
|
||||
--ignore-preflight-errors={{ _ignore_errors | flatten | join(',') }}
|
||||
--skip-phases={{ kubeadm_init_phases_skip | join(',') }}
|
||||
{{ kube_external_ca_mode | ternary('', '--upload-certs') }}
|
||||
_ignore_errors: "{{ kubeadm_ignore_preflight_errors }}"
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Control plane | restart kubelet
|
||||
@@ -195,6 +196,15 @@
|
||||
# This retry task is separated from 1st task to show log of failure of 1st task.
|
||||
- name: Kubeadm | Initialize first control plane node (retry)
|
||||
command: "{{ kubeadm_init_first_control_plane_cmd }}"
|
||||
vars:
|
||||
_errors_from_first_try:
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml'
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml'
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml'
|
||||
- 'Port-10250'
|
||||
_ignore_errors:
|
||||
- "{{ kubeadm_ignore_preflight_errors }}"
|
||||
- "{{ _errors_from_first_try if 'all' not in kubeadm_ignore_preflight_errors else [] }}"
|
||||
register: kubeadm_init
|
||||
retries: 2
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
|
||||
@@ -92,9 +92,6 @@
|
||||
- upgrade
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Define nodes already joined to existing cluster and first_kube_control_plane
|
||||
import_tasks: define-first-kube-control.yml
|
||||
|
||||
- name: Include kubeadm setup
|
||||
import_tasks: kubeadm-setup.yml
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ echo "## Check Expiration before renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
|
||||
days_buffer=7 # set a time margin, because we should not renew at the last moment
|
||||
calendar={{ auto_renew_certificates_systemd_calendar }}
|
||||
next_time=$(systemctl show k8s-certs-renew.timer -p NextElapseUSecRealtime --value)
|
||||
|
||||
if [ "${next_time}" == "" ]; then
|
||||
|
||||
@@ -61,8 +61,6 @@ eviction_hard_control_plane: {}
|
||||
kubelet_status_update_frequency: 10s
|
||||
|
||||
# kube-vip
|
||||
kube_vip_version: 0.8.0
|
||||
|
||||
kube_vip_arp_enabled: false
|
||||
kube_vip_interface:
|
||||
kube_vip_services_interface:
|
||||
@@ -80,7 +78,6 @@ kube_vip_bgp_peeraddress:
|
||||
kube_vip_bgp_peerpass:
|
||||
kube_vip_bgp_peeras: 65000
|
||||
kube_vip_bgppeers:
|
||||
kube_vip_address:
|
||||
kube_vip_enableServicesElection: false
|
||||
kube_vip_lb_enable: false
|
||||
kube_vip_leasename: plndr-cp-lock
|
||||
|
||||
@@ -18,14 +18,7 @@
|
||||
owner: root
|
||||
mode: "0755"
|
||||
backup: true
|
||||
|
||||
- name: Haproxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ haproxy_config_dir }}/haproxy.cfg"
|
||||
get_attributes: false
|
||||
get_checksum: true
|
||||
get_mime: false
|
||||
register: haproxy_stat
|
||||
register: haproxy_conf
|
||||
|
||||
- name: Haproxy | Write static pod
|
||||
template:
|
||||
|
||||
@@ -18,14 +18,7 @@
|
||||
owner: root
|
||||
mode: "0755"
|
||||
backup: true
|
||||
|
||||
- name: Nginx-proxy | Get checksum from config
|
||||
stat:
|
||||
path: "{{ nginx_config_dir }}/nginx.conf"
|
||||
get_attributes: false
|
||||
get_checksum: true
|
||||
get_mime: false
|
||||
register: nginx_stat
|
||||
register: nginx_conf
|
||||
|
||||
- name: Nginx-proxy | Write static pod
|
||||
template:
|
||||
|
||||
@@ -32,7 +32,7 @@ frontend healthz
|
||||
frontend kube_api_frontend
|
||||
bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{% if ipv6_stack -%}
|
||||
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{% endif -%}
|
||||
mode tcp
|
||||
option tcplog
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: kube-haproxy
|
||||
annotations:
|
||||
haproxy-cfg-checksum: "{{ haproxy_stat.stat.checksum }}"
|
||||
haproxy-cfg-checksum: "{{ haproxy_conf.checksum }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user