mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-01 17:48:12 -03:30
Compare commits
444 Commits
release-2.
...
v2.21.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4346e590f | ||
|
|
bd81c615c3 | ||
|
|
3d9fd082ff | ||
|
|
826282fe89 | ||
|
|
73774326b3 | ||
|
|
374438a3d6 | ||
|
|
fd80ef1ff1 | ||
|
|
235173bb5f | ||
|
|
1750dec254 | ||
|
|
52f52db8f3 | ||
|
|
db94812163 | ||
|
|
4a6eb7eaa2 | ||
|
|
58fe1a0ed6 | ||
|
|
c80bb0007a | ||
|
|
8a03bb1bb4 | ||
|
|
d919c58e21 | ||
|
|
19bc610f44 | ||
|
|
85a5a79ef5 | ||
|
|
c7cffb14a7 | ||
|
|
6f61f3d9cb | ||
|
|
6b4bb2a121 | ||
|
|
e288449c5d | ||
|
|
ea35021c96 | ||
|
|
754424eca7 | ||
|
|
4ad56e2772 | ||
|
|
6f1352eb53 | ||
|
|
bf8c64af08 | ||
|
|
a98ab40434 | ||
|
|
6549bb12fc | ||
|
|
1329d3f03b | ||
|
|
843e908fa4 | ||
|
|
0ff883afeb | ||
|
|
0d5bcd3e20 | ||
|
|
a8cef962e2 | ||
|
|
b50890172b | ||
|
|
ffad2152b3 | ||
|
|
6674438849 | ||
|
|
4bc5e8d912 | ||
|
|
8ca0bfffe0 | ||
|
|
48282a344f | ||
|
|
050fde6327 | ||
|
|
4d3104b334 | ||
|
|
85fa6af313 | ||
|
|
1c4db6132d | ||
|
|
744c81d451 | ||
|
|
61be93b173 | ||
|
|
406fbdb4e7 | ||
|
|
136f14dec4 | ||
|
|
ab80342750 | ||
|
|
2c2e608eac | ||
|
|
8267922a16 | ||
|
|
90719a9990 | ||
|
|
93f71df628 | ||
|
|
791064a3d9 | ||
|
|
e90f32bdee | ||
|
|
9fe89a0641 | ||
|
|
14699f5e98 | ||
|
|
2f81bfa25e | ||
|
|
438da0c8e6 | ||
|
|
25f317233c | ||
|
|
5e4d68b848 | ||
|
|
4728739597 | ||
|
|
fc0d58ff48 | ||
|
|
491e260d20 | ||
|
|
a132733b2d | ||
|
|
b377dbb96f | ||
|
|
c4d753c931 | ||
|
|
ee3b7c5da5 | ||
|
|
dcc267f6f4 | ||
|
|
ccf60fc9ca | ||
|
|
a38a3e7ddf | ||
|
|
beb4aa52ea | ||
|
|
f7d0fb9ab2 | ||
|
|
ff331f4eba | ||
|
|
94eae6a8dc | ||
|
|
f8d6b54dbb | ||
|
|
67c4f2d95e | ||
|
|
03fefa8933 | ||
|
|
c8ec77a734 | ||
|
|
4f32f94a51 | ||
|
|
3dc384a17a | ||
|
|
f1d0d1a9fe | ||
|
|
c036a7d871 | ||
|
|
6e63f3d2b4 | ||
|
|
09748e80e9 | ||
|
|
44a4f356ba | ||
|
|
a0f41bf82a | ||
|
|
5ae3e2818b | ||
|
|
1a0b81ac64 | ||
|
|
20d99886ca | ||
|
|
b9fe301036 | ||
|
|
b5844018f2 | ||
|
|
30508502d3 | ||
|
|
bca601d377 | ||
|
|
65191375b8 | ||
|
|
a534eb45ce | ||
|
|
e796f08184 | ||
|
|
ed38d8d3a1 | ||
|
|
07ad5ecfce | ||
|
|
4db5e663c3 | ||
|
|
529faeea9e | ||
|
|
47510899c7 | ||
|
|
4cd949c7e1 | ||
|
|
31d7e64073 | ||
|
|
7c1ee142dd | ||
|
|
25e86c5ca9 | ||
|
|
c41dd92007 | ||
|
|
a564d89d46 | ||
|
|
6c6a6e85da | ||
|
|
ed0acd8027 | ||
|
|
b9a690463d | ||
|
|
cbf4586c4c | ||
|
|
c3986957c4 | ||
|
|
8795cf6494 | ||
|
|
80af8a5e79 | ||
|
|
b60f65c1e8 | ||
|
|
943107115a | ||
|
|
ddbe9956e4 | ||
|
|
fdbcce3a5e | ||
|
|
f007c77641 | ||
|
|
9439487219 | ||
|
|
df6da52195 | ||
|
|
6ca89c80af | ||
|
|
7fe0b87d83 | ||
|
|
8a654b6955 | ||
|
|
5a8cf824f6 | ||
|
|
5c25b57989 | ||
|
|
5d1fe64bc8 | ||
|
|
a731e25778 | ||
|
|
0d6dc08578 | ||
|
|
40261fdf14 | ||
|
|
590b4aa240 | ||
|
|
2a696ddb34 | ||
|
|
d7f08d1b0c | ||
|
|
4aa1ef28ea | ||
|
|
58faef6ff6 | ||
|
|
34a52a7028 | ||
|
|
ce751cb89d | ||
|
|
5cf2883444 | ||
|
|
6bff338bad | ||
|
|
c78862052c | ||
|
|
1f54cef71c | ||
|
|
d00508105b | ||
|
|
c272421910 | ||
|
|
78624c5bcb | ||
|
|
c681435432 | ||
|
|
4d3f637684 | ||
|
|
5e14398af4 | ||
|
|
990f87acc8 | ||
|
|
eeb376460d | ||
|
|
ef707b3461 | ||
|
|
2af918132e | ||
|
|
b9b654714e | ||
|
|
fe399e0e0c | ||
|
|
b192053e28 | ||
|
|
a84271aa7e | ||
|
|
1901b512d2 | ||
|
|
9fdda7eca8 | ||
|
|
a68ed897f0 | ||
|
|
582ff96d19 | ||
|
|
0374a55eb3 | ||
|
|
ccbe38f78c | ||
|
|
958840da89 | ||
|
|
1530411218 | ||
|
|
e5ec0f18c0 | ||
|
|
0f44e8c812 | ||
|
|
1cc0f3c8c9 | ||
|
|
d9c39c274e | ||
|
|
c38fb866b7 | ||
|
|
5ad1d9db5e | ||
|
|
32f3d92d6b | ||
|
|
72b45eec2e | ||
|
|
23716b0eff | ||
|
|
859df84b45 | ||
|
|
131bd933a6 | ||
|
|
52904ee6ad | ||
|
|
e3339fe3d8 | ||
|
|
547ef747da | ||
|
|
63b27ea067 | ||
|
|
bc5881b70a | ||
|
|
f4b95d42a6 | ||
|
|
ef76a578a4 | ||
|
|
3b99d24ceb | ||
|
|
4701abff4c | ||
|
|
717b8daafe | ||
|
|
c346e46022 | ||
|
|
24632ae81b | ||
|
|
befde271eb | ||
|
|
d689f57c94 | ||
|
|
ad3f503c0c | ||
|
|
ae6c780af6 | ||
|
|
8b9cd3959a | ||
|
|
dffeab320e | ||
|
|
999586a110 | ||
|
|
f8d5487f8e | ||
|
|
4189008245 | ||
|
|
44115d7d7a | ||
|
|
841e2f44c0 | ||
|
|
a8e4984cf7 | ||
|
|
49196c2ec4 | ||
|
|
3646dc0bd2 | ||
|
|
694de1d67b | ||
|
|
31caab5f92 | ||
|
|
472996c8b3 | ||
|
|
d62c67a5f5 | ||
|
|
e486151aea | ||
|
|
9c407e667d | ||
|
|
18efdc2c51 | ||
|
|
6dff39344b | ||
|
|
c4de3df492 | ||
|
|
f2e11f088b | ||
|
|
782f0511b9 | ||
|
|
fa093ee609 | ||
|
|
612bcc4bb8 | ||
|
|
4ad67acedd | ||
|
|
467dc19cbd | ||
|
|
726711513f | ||
|
|
9468642269 | ||
|
|
d387d4811f | ||
|
|
1b3c2dab2e | ||
|
|
76573bf293 | ||
|
|
5d3326b93f | ||
|
|
68dac4e181 | ||
|
|
262c96ec0b | ||
|
|
2acdc33aa1 | ||
|
|
8acd33d0df | ||
|
|
a2e23c1a71 | ||
|
|
1b5cc175b9 | ||
|
|
a71da25b57 | ||
|
|
5ac614f97d | ||
|
|
b8b8b82ff4 | ||
|
|
7da3dbcb39 | ||
|
|
680293e79c | ||
|
|
023b16349e | ||
|
|
c4976437a8 | ||
|
|
97ca2f3c78 | ||
|
|
e76385e7cd | ||
|
|
7c2fb227f4 | ||
|
|
08bfa0b18f | ||
|
|
952cad8d63 | ||
|
|
5bce39abf8 | ||
|
|
fc57c0b27e | ||
|
|
dd4bc5fbfe | ||
|
|
d2a7434c67 | ||
|
|
5fa885b150 | ||
|
|
f3fb758f0c | ||
|
|
6386ec029c | ||
|
|
ad7cefa352 | ||
|
|
09d9bc910e | ||
|
|
e2f1f8d69d | ||
|
|
be2bfd867c | ||
|
|
133a7a0e1b | ||
|
|
efb47edb9f | ||
|
|
36bec19a84 | ||
|
|
6db6c8678c | ||
|
|
5603f9f374 | ||
|
|
7ebb8c3f2e | ||
|
|
acb6f243fd | ||
|
|
220f149299 | ||
|
|
1baabb3c05 | ||
|
|
617b17ad46 | ||
|
|
8af86e4c1e | ||
|
|
9dc9a670a5 | ||
|
|
b46ddf35fc | ||
|
|
de762400ad | ||
|
|
e60ece2b5e | ||
|
|
e6976a54e1 | ||
|
|
64daaf1887 | ||
|
|
1c75ec9ec1 | ||
|
|
c8a61ec98c | ||
|
|
aeeae76750 | ||
|
|
30b062fd43 | ||
|
|
8f899a1101 | ||
|
|
386c739d5b | ||
|
|
fddff783c8 | ||
|
|
bbd1161147 | ||
|
|
ab938602a9 | ||
|
|
e31890806c | ||
|
|
30c77ea4c1 | ||
|
|
175cdba9b1 | ||
|
|
ea29cd0890 | ||
|
|
68653c31c0 | ||
|
|
be5fdab3aa | ||
|
|
f4daf5856e | ||
|
|
49d869f662 | ||
|
|
b36bb9115a | ||
|
|
9ad2d24ad8 | ||
|
|
0088fe0ab7 | ||
|
|
ab93b17a7e | ||
|
|
9f1b980844 | ||
|
|
86d05ac180 | ||
|
|
bf6fcf6347 | ||
|
|
b9e4e27195 | ||
|
|
8585134db4 | ||
|
|
7e862939db | ||
|
|
0d3bd69a17 | ||
|
|
2b97b661d8 | ||
|
|
24f12b024d | ||
|
|
f7d363dc96 | ||
|
|
47050003a0 | ||
|
|
4df6e35270 | ||
|
|
307f598bc8 | ||
|
|
eb10249a75 | ||
|
|
b4318e9967 | ||
|
|
c53561c9a0 | ||
|
|
f2f9f1d377 | ||
|
|
4487a374b1 | ||
|
|
06f8368ce6 | ||
|
|
5b976a8d80 | ||
|
|
e73803c72c | ||
|
|
b3876142d2 | ||
|
|
9f11946f8a | ||
|
|
9c28f61dbd | ||
|
|
09291bbdd2 | ||
|
|
7fa6314791 | ||
|
|
65d95d767a | ||
|
|
8306adb102 | ||
|
|
4b3db07cdb | ||
|
|
c24a3a3b15 | ||
|
|
aca6be3adf | ||
|
|
9617532561 | ||
|
|
ff5e487e32 | ||
|
|
9c51ac5157 | ||
|
|
07eab539a6 | ||
|
|
a608a048ad | ||
|
|
0cfa03fa8a | ||
|
|
6525461d97 | ||
|
|
f592fa1235 | ||
|
|
2e1863af78 | ||
|
|
2a282711df | ||
|
|
91073d7379 | ||
|
|
3ce5458f32 | ||
|
|
98c194735c | ||
|
|
626ea64f66 | ||
|
|
0d32c0d92b | ||
|
|
ce04fdde72 | ||
|
|
4ed3c85a88 | ||
|
|
14063b023c | ||
|
|
3d32f0e953 | ||
|
|
d821bed2ea | ||
|
|
058e05df41 | ||
|
|
a7ba7cdcd5 | ||
|
|
c01656b1e3 | ||
|
|
5071529a74 | ||
|
|
6d543b830a | ||
|
|
e6154998fd | ||
|
|
01c6239043 | ||
|
|
4607ac2e93 | ||
|
|
9ca5632582 | ||
|
|
51195212b4 | ||
|
|
7414409aa0 | ||
|
|
adfd77f11d | ||
|
|
f3ea8cf45e | ||
|
|
3bb9542606 | ||
|
|
1d0b3829ed | ||
|
|
a5d7178bf8 | ||
|
|
cbef8ea407 | ||
|
|
2ff4ae1f08 | ||
|
|
edf7f53f76 | ||
|
|
f58816c33c | ||
|
|
1562a9c2ec | ||
|
|
6cd243f14e | ||
|
|
4b03f6c20f | ||
|
|
d0a2ba37e8 | ||
|
|
e8ccbebd6f | ||
|
|
d4de9d096f | ||
|
|
e1f06dd406 | ||
|
|
6f82cf12f5 | ||
|
|
ca8080a695 | ||
|
|
55d14090d0 | ||
|
|
da8498bb6f | ||
|
|
b33896844e | ||
|
|
ca212c08de | ||
|
|
784439dccf | ||
|
|
d818c1c6d9 | ||
|
|
b9384ad913 | ||
|
|
76b0cbcb4e | ||
|
|
6bf3306401 | ||
|
|
bf477c24d3 | ||
|
|
79f6cd774a | ||
|
|
c3c9a42502 | ||
|
|
4a92b7221a | ||
|
|
9d5d945bdb | ||
|
|
475ce05979 | ||
|
|
57d7029317 | ||
|
|
e4fe679916 | ||
|
|
123632f5ed | ||
|
|
56d83c931b | ||
|
|
a22ae6143a | ||
|
|
a1ec0571b2 | ||
|
|
2db39d4856 | ||
|
|
e7729daefc | ||
|
|
97b4d79ed5 | ||
|
|
890fad389d | ||
|
|
0c203ece2d | ||
|
|
9e7f89d2a2 | ||
|
|
24c8ba832a | ||
|
|
c2700266b0 | ||
|
|
2cd8c51a07 | ||
|
|
589823bdc1 | ||
|
|
5dc8be9aa2 | ||
|
|
fad296616c | ||
|
|
ec01b40e85 | ||
|
|
2de5c4821c | ||
|
|
9efe145688 | ||
|
|
51bc64fb35 | ||
|
|
6380483e8b | ||
|
|
ae1dcb031f | ||
|
|
9535a41187 | ||
|
|
47495c336b | ||
|
|
d69d4a8303 | ||
|
|
ab4d590547 | ||
|
|
85271fc2e5 | ||
|
|
f6159c5677 | ||
|
|
668b9b026c | ||
|
|
77de7cb785 | ||
|
|
e5d6c042a9 | ||
|
|
3ae397019c | ||
|
|
7d3e59cf2e | ||
|
|
4eb83bb7f6 | ||
|
|
1429ba9a07 | ||
|
|
889454f2bc | ||
|
|
2fba94c5e5 | ||
|
|
4726a110fc | ||
|
|
6b43d6aff2 | ||
|
|
024a3ee551 | ||
|
|
cd7381d8de | ||
|
|
f53764f949 | ||
|
|
57c3aa4560 | ||
|
|
bb530da5c2 | ||
|
|
cc6cbfbe71 | ||
|
|
6f556f5451 | ||
|
|
9074bd297b | ||
|
|
8030e6f76c | ||
|
|
27bd7fd737 | ||
|
|
77f436fa39 | ||
|
|
814760ba25 | ||
|
|
14c0f368b6 | ||
|
|
0761659a43 | ||
|
|
a4f752fb02 | ||
|
|
b2346cdaec | ||
|
|
01ca7293f5 | ||
|
|
4dfce51ded | ||
|
|
f82ed24c03 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,7 +3,10 @@
|
|||||||
**/vagrant_ansible_inventory
|
**/vagrant_ansible_inventory
|
||||||
*.iml
|
*.iml
|
||||||
temp
|
temp
|
||||||
|
contrib/offline/offline-files
|
||||||
|
contrib/offline/offline-files.tar.gz
|
||||||
.idea
|
.idea
|
||||||
|
.vscode
|
||||||
.tox
|
.tox
|
||||||
.cache
|
.cache
|
||||||
*.bak
|
*.bak
|
||||||
@@ -11,6 +14,7 @@ temp
|
|||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
.terraform/
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
|
.terraform.lock.hcl
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
*~
|
*~
|
||||||
@@ -108,3 +112,4 @@ roles/**/molecule/**/__pycache__/
|
|||||||
|
|
||||||
# Temp location used by our scripts
|
# Temp location used by our scripts
|
||||||
scripts/tmp/
|
scripts/tmp/
|
||||||
|
tmp.md
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
|
- build
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- deploy-part1
|
- deploy-part1
|
||||||
- moderator
|
- moderator
|
||||||
@@ -8,7 +9,7 @@ stages:
|
|||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.18.1
|
KUBESPRAY_VERSION: v2.20.0
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
@@ -34,7 +35,8 @@ variables:
|
|||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||||
TERRAFORM_VERSION: 1.0.8
|
TERRAFORM_VERSION: 1.0.8
|
||||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||||
|
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
@@ -46,7 +48,7 @@ before_script:
|
|||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- packet
|
- packet
|
||||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
@@ -76,6 +78,7 @@ ci-authorized:
|
|||||||
only: []
|
only: []
|
||||||
|
|
||||||
include:
|
include:
|
||||||
|
- .gitlab-ci/build.yml
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
- .gitlab-ci/shellcheck.yml
|
- .gitlab-ci/shellcheck.yml
|
||||||
- .gitlab-ci/terraform.yml
|
- .gitlab-ci/terraform.yml
|
||||||
|
|||||||
16
.gitlab-ci/build.yml
Normal file
16
.gitlab-ci/build.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
pipeline image:
|
||||||
|
stage: build
|
||||||
|
image: docker:20.10.22-cli
|
||||||
|
variables:
|
||||||
|
DOCKER_TLS_CERTDIR: ""
|
||||||
|
services:
|
||||||
|
- name: docker:20.10.22-dind
|
||||||
|
# See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27300 for why this is required
|
||||||
|
command: ["--tls=false"]
|
||||||
|
before_script:
|
||||||
|
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
|
||||||
|
script:
|
||||||
|
# DOCKER_HOST is overwritten if we set it as a GitLab variable
|
||||||
|
- DOCKER_HOST=tcp://docker:2375; docker build --network host --file pipeline.Dockerfile --tag $PIPELINE_IMAGE .
|
||||||
|
- docker push $PIPELINE_IMAGE
|
||||||
@@ -68,6 +68,20 @@ markdownlint:
|
|||||||
script:
|
script:
|
||||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||||
|
|
||||||
|
check-readme-versions:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/check_readme_versions.sh
|
||||||
|
|
||||||
|
check-typo:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/check_typo.sh
|
||||||
|
|
||||||
ci-matrix:
|
ci-matrix:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
before_script:
|
before_script:
|
||||||
@@ -44,7 +44,7 @@ molecule_no_container_engines:
|
|||||||
molecule_docker:
|
molecule_docker:
|
||||||
extends: .molecule
|
extends: .molecule
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
molecule_containerd:
|
molecule_containerd:
|
||||||
@@ -60,13 +60,6 @@ molecule_cri-o:
|
|||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
molecule_cri-dockerd:
|
|
||||||
extends: .molecule
|
|
||||||
stage: deploy-part2
|
|
||||||
script:
|
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||||
molecule_kata:
|
molecule_kata:
|
||||||
extends: .molecule
|
extends: .molecule
|
||||||
|
|||||||
@@ -31,23 +31,6 @@ packet_ubuntu20-calico-aio:
|
|||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
# Exericse ansible variants during the nightly jobs
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
|
||||||
stage: deploy-part1
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
|
||||||
stage: deploy-part1
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
@@ -68,11 +51,26 @@ packet_ubuntu20-aio-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu20-calico-aio-hardening:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
packet_ubuntu18-calico-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu22-aio-docker:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu22-calico-aio:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_centos7-flannel-addons-ha:
|
packet_centos7-flannel-addons-ha:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
@@ -153,12 +151,29 @@ packet_almalinux8-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_rockylinux8-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_rockylinux9-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_rockylinux9-cilium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_almalinux8-docker:
|
packet_almalinux8-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora34-docker-weave:
|
packet_fedora36-docker-weave:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -216,19 +231,19 @@ packet_centos7-multus-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_oracle7-canal-ha:
|
packet_centos7-canal-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora35-docker-calico:
|
packet_fedora36-docker-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_fedora34-calico-selinux:
|
packet_fedora35-calico-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -248,7 +263,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora34-kube-ovn:
|
packet_fedora36-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -256,7 +271,7 @@ packet_fedora34-kube-ovn:
|
|||||||
# ### PR JOBS PART3
|
# ### PR JOBS PART3
|
||||||
# Long jobs (45min+)
|
# Long jobs (45min+)
|
||||||
|
|
||||||
packet_centos7-docker-weave-upgrade-ha:
|
packet_centos7-weave-upgrade-ha:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -276,7 +291,7 @@ packet_ubuntu20-calico-ha-wireguard:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_debian10-calico-upgrade:
|
packet_debian11-calico-upgrade:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -291,7 +306,12 @@ packet_almalinux8-calico-remove-node:
|
|||||||
REMOVE_NODE_CHECK: "true"
|
REMOVE_NODE_CHECK: "true"
|
||||||
REMOVE_NODE_NAME: "instance-3"
|
REMOVE_NODE_NAME: "instance-3"
|
||||||
|
|
||||||
packet_debian10-calico-upgrade-once:
|
packet_ubuntu20-calico-etcd-kubeadm:
|
||||||
|
stage: deploy-part3
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian11-calico-upgrade-once:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|||||||
@@ -11,6 +11,6 @@ shellcheck:
|
|||||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||||
- shellcheck --version
|
- shellcheck --version
|
||||||
script:
|
script:
|
||||||
# Run shellcheck for all *.sh except contrib/
|
# Run shellcheck for all *.sh
|
||||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update && apt-get install -y python3-pip
|
- apt-get update && apt-get install -y python3-pip
|
||||||
@@ -43,6 +43,7 @@ vagrant_ubuntu20-flannel:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: false
|
||||||
|
|
||||||
vagrant_ubuntu16-kube-router-sep:
|
vagrant_ubuntu16-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
---
|
---
|
||||||
MD013: false
|
MD013: false
|
||||||
|
MD029: false
|
||||||
|
|||||||
48
.pre-commit-config.yaml
Normal file
48
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
|
rev: v1.27.1
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [--strict]
|
||||||
|
|
||||||
|
- repo: https://github.com/markdownlint/markdownlint
|
||||||
|
rev: v0.11.0
|
||||||
|
hooks:
|
||||||
|
- id: markdownlint
|
||||||
|
args: [ -r, "~MD013,~MD029" ]
|
||||||
|
exclude: "^.git"
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: ansible-lint
|
||||||
|
name: ansible-lint
|
||||||
|
entry: ansible-lint -v
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
additional_dependencies:
|
||||||
|
- .[community]
|
||||||
|
|
||||||
|
- id: ansible-syntax-check
|
||||||
|
name: ansible-syntax-check
|
||||||
|
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||||
|
language: python
|
||||||
|
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||||
|
|
||||||
|
- id: tox-inventory-builder
|
||||||
|
name: tox-inventory-builder
|
||||||
|
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: check-readme-versions
|
||||||
|
name: check-readme-versions
|
||||||
|
entry: tests/scripts/check_readme_versions.sh
|
||||||
|
language: script
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: ci-matrix
|
||||||
|
name: ci-matrix
|
||||||
|
entry: tests/scripts/md-table/test.sh
|
||||||
|
language: script
|
||||||
|
pass_filenames: false
|
||||||
@@ -16,7 +16,12 @@ pip install -r tests/requirements.txt
|
|||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
|
|
||||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
pre-commit install
|
||||||
|
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||||
|
```
|
||||||
|
|
||||||
#### Molecule
|
#### Molecule
|
||||||
|
|
||||||
@@ -33,7 +38,9 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
|||||||
1. Submit an issue describing your proposed change to the repo in question.
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||||
5. Submit a pull request.
|
5. Addess any pre-commit validation failures.
|
||||||
6. Work with the reviewers on their suggestions.
|
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
7. Submit a pull request.
|
||||||
|
8. Work with the reviewers on their suggestions.
|
||||||
|
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||||
|
|||||||
31
Dockerfile
31
Dockerfile
@@ -1,5 +1,5 @@
|
|||||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||||
FROM ubuntu:focal-20220316
|
FROM ubuntu:focal-20220531
|
||||||
|
|
||||||
ARG ARCH=amd64
|
ARG ARCH=amd64
|
||||||
ARG TZ=Etc/UTC
|
ARG TZ=Etc/UTC
|
||||||
@@ -7,15 +7,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|||||||
|
|
||||||
RUN apt update -y \
|
RUN apt update -y \
|
||||||
&& apt install -y \
|
&& apt install -y \
|
||||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
curl python3 python3-pip sshpass \
|
||||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|
||||||
&& add-apt-repository \
|
|
||||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
|
||||||
$(lsb_release -cs) \
|
|
||||||
stable" \
|
|
||||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
@@ -25,13 +17,20 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
COPY . .
|
COPY *yml /kubespray/
|
||||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
COPY roles /kubespray/roles
|
||||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
COPY inventory /kubespray/inventory
|
||||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
COPY library /kubespray/library
|
||||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
COPY extra_playbooks /kubespray/extra_playbooks
|
||||||
|
|
||||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
RUN python3 -m pip install --no-cache-dir \
|
||||||
|
ansible==5.7.1 \
|
||||||
|
ansible-core==2.12.5 \
|
||||||
|
cryptography==3.4.8 \
|
||||||
|
jinja2==2.11.3 \
|
||||||
|
netaddr==0.7.19 \
|
||||||
|
MarkupSafe==1.1.1 \
|
||||||
|
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||||
&& chmod a+x kubectl \
|
&& chmod a+x kubectl \
|
||||||
&& mv kubectl /usr/local/bin/kubectl
|
&& mv kubectl /usr/local/bin/kubectl
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ aliases:
|
|||||||
- floryut
|
- floryut
|
||||||
- oomichi
|
- oomichi
|
||||||
- cristicalin
|
- cristicalin
|
||||||
|
- liupeng0518
|
||||||
|
- yankay
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- holmsten
|
- holmsten
|
||||||
- bozzo
|
- bozzo
|
||||||
@@ -16,6 +18,9 @@ aliases:
|
|||||||
- jayonlau
|
- jayonlau
|
||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
|
- yankay
|
||||||
|
- cyclinder
|
||||||
|
- mzaian
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- atoms
|
- atoms
|
||||||
|
|||||||
105
README.md
105
README.md
@@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
To deploy the cluster you can use :
|
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
@@ -41,34 +41,46 @@ cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
|||||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
Note: When Ansible is already installed via system packages on the control node,
|
||||||
As a consequence, `ansible-playbook` command will fail with:
|
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||||
|
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||||
|
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||||
|
buntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||||
|
|
||||||
```raw
|
```raw
|
||||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
```
|
```
|
||||||
|
|
||||||
probably pointing on a task depending on a module present in requirements.txt.
|
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||||
|
|
||||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
One way of addressing this is to uninstall the system Ansible package then
|
||||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||||
|
take care regarding package versions.
|
||||||
|
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||||
|
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||||
|
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||||
|
installation location, which is the ``Location`` shown by running
|
||||||
|
`pip show [package]` before executing `ansible-playbook`.
|
||||||
|
|
||||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
A simple way to ensure you get all the correct version of Ansible is to use
|
||||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||||
|
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||||
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
docker pull quay.io/kubespray/kubespray:v2.19.1
|
git checkout v2.20.0
|
||||||
|
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.19.1 bash
|
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||||
Check if Python and pip are installed:
|
Check that ``Python`` and ``pip`` are installed:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
python -V && pip -V
|
python -V && pip -V
|
||||||
@@ -111,59 +123,77 @@ vagrant up
|
|||||||
- [Adding/replacing a node](docs/nodes.md)
|
- [Adding/replacing a node](docs/nodes.md)
|
||||||
- [Upgrades basics](docs/upgrades.md)
|
- [Upgrades basics](docs/upgrades.md)
|
||||||
- [Air-Gap installation](docs/offline-environment.md)
|
- [Air-Gap installation](docs/offline-environment.md)
|
||||||
|
- [NTP](docs/ntp.md)
|
||||||
- [Hardening](docs/hardening.md)
|
- [Hardening](docs/hardening.md)
|
||||||
|
- [Mirror](docs/mirror.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||||
- **Ubuntu** 16.04, 18.04, 20.04
|
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Fedora** 34, 35
|
- **Fedora** 35, 36
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Alma Linux** [8](docs/centos8.md)
|
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||||
- **Rocky Linux** [8](docs/centos8.md)
|
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||||
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||||
|
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||||
|
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.6
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||||
- [containerd](https://containerd.io/) v1.6.4
|
- [containerd](https://containerd.io/) v1.6.15
|
||||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
- [flannel](https://github.com/flannel-io/flannel) v0.20.2
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||||
- Application
|
- Application
|
||||||
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.0
|
||||||
|
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||||
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||||
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||||
|
- [argocd](https://argoproj.github.io/) v2.5.7
|
||||||
|
- [helm](https://helm.sh/) v3.10.3
|
||||||
|
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||||
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
|
- Storage Plugin
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||||
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||||
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||||
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.21**
|
- **Minimum required version of Kubernetes is v1.23**
|
||||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||||
@@ -174,7 +204,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
- Master
|
- Master
|
||||||
- Memory: 1500 MB
|
- Memory: 1500 MB
|
||||||
@@ -183,7 +213,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
|||||||
|
|
||||||
## Network Plugins
|
## Network Plugins
|
||||||
|
|
||||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
@@ -210,7 +240,7 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
|||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
@@ -231,6 +261,7 @@ See also [Network checker](docs/netcheck.md).
|
|||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
|
- [Kubean](https://github.com/kubean-io/kubean)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
|
|||||||
24
RELEASE.md
24
RELEASE.md
@@ -9,10 +9,10 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||||
7. An approver creates a release branch in the form `release-X.Y`
|
7. An approver creates a release branch in the form `release-X.Y`
|
||||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||||
10. The release issue is closed
|
10. The release issue is closed
|
||||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
|
|
||||||
## Major/minor releases and milestones
|
## Major/minor releases and milestones
|
||||||
@@ -61,3 +61,23 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
|||||||
|
|
||||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||||
|
|
||||||
|
## Container image creation
|
||||||
|
|
||||||
|
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd kubespray/
|
||||||
|
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||||
|
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||||
|
```
|
||||||
|
|
||||||
|
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd kubespray/test-infra/vagrant-docker/
|
||||||
|
./build vX.Y.Z
|
||||||
|
```
|
||||||
|
|
||||||
|
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||||
|
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||||
|
|||||||
@@ -9,5 +9,7 @@
|
|||||||
#
|
#
|
||||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
atoms
|
|
||||||
mattymo
|
mattymo
|
||||||
|
floryut
|
||||||
|
oomichi
|
||||||
|
cristicalin
|
||||||
|
|||||||
5
Vagrantfile
vendored
5
Vagrantfile
vendored
@@ -28,9 +28,10 @@ SUPPORTED_OS = {
|
|||||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||||
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||||
@@ -10,11 +10,11 @@ host_key_checking=False
|
|||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
fact_caching_connection = /tmp
|
fact_caching_connection = /tmp
|
||||||
fact_caching_timeout = 7200
|
fact_caching_timeout = 86400
|
||||||
stdout_callback = default
|
stdout_callback = default
|
||||||
display_skipped_hosts = no
|
display_skipped_hosts = no
|
||||||
library = ./library
|
library = ./library
|
||||||
callback_whitelist = profile_tasks,ara_default
|
callbacks_enabled = profile_tasks,ara_default
|
||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||||
|
|||||||
@@ -3,32 +3,20 @@
|
|||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
become: no
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.9.0
|
minimal_ansible_version: 2.11.0
|
||||||
minimal_ansible_version_2_10: 2.10.11
|
|
||||||
maximal_ansible_version: 2.13.0
|
maximal_ansible_version: 2.13.0
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||||
assert:
|
assert:
|
||||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||||
that:
|
that:
|
||||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||||
tags:
|
tags:
|
||||||
- check
|
- check
|
||||||
|
|
||||||
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
|
||||||
assert:
|
|
||||||
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
|
||||||
that:
|
|
||||||
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
|
||||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
|
||||||
when:
|
|
||||||
- ansible_version.string is version('2.10.0', ">=")
|
|
||||||
tags:
|
|
||||||
- check
|
|
||||||
|
|
||||||
- name: "Check that python netaddr is installed"
|
- name: "Check that python netaddr is installed"
|
||||||
assert:
|
assert:
|
||||||
msg: "Python netaddr is not present"
|
msg: "Python netaddr is not present"
|
||||||
|
|||||||
@@ -35,7 +35,7 @@
|
|||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd:kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
@@ -59,7 +59,10 @@
|
|||||||
vars:
|
vars:
|
||||||
etcd_cluster_setup: false
|
etcd_cluster_setup: false
|
||||||
etcd_events_cluster_setup: false
|
etcd_events_cluster_setup: false
|
||||||
when: etcd_deployment_type != "kubeadm"
|
when:
|
||||||
|
- etcd_deployment_type != "kubeadm"
|
||||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pass_or_fail() {
|
|||||||
test_distro() {
|
test_distro() {
|
||||||
local distro=${1:?};shift
|
local distro=${1:?};shift
|
||||||
local extra="${*:-}"
|
local extra="${*:-}"
|
||||||
local prefix="$distro[${extra}]}"
|
local prefix="${distro[${extra}]}"
|
||||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||||
(cd ../..
|
(cd ../..
|
||||||
@@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
|||||||
echo "Loading file=${spec} ..."
|
echo "Loading file=${spec} ..."
|
||||||
. ${spec} || continue
|
. ${spec} || continue
|
||||||
: ${DISTROS:?} || continue
|
: ${DISTROS:?} || continue
|
||||||
echo "DISTROS=${DISTROS[@]}"
|
echo "DISTROS:" "${DISTROS[@]}"
|
||||||
echo "EXTRAS->"
|
echo "EXTRAS->"
|
||||||
printf " %s\n" "${EXTRAS[@]}"
|
printf " %s\n" "${EXTRAS[@]}"
|
||||||
let n=1
|
let n=1
|
||||||
for distro in ${DISTROS[@]}; do
|
for distro in "${DISTROS[@]}"; do
|
||||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||||
# Magic value to let this for run once:
|
# Magic value to let this for run once:
|
||||||
[[ ${extra} == NULL ]] && unset extra
|
[[ ${extra} == NULL ]] && unset extra
|
||||||
docker rm -f ${NODES[@]}
|
docker rm -f "${NODES[@]}"
|
||||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||||
{
|
{
|
||||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import inventory
|
import inventory
|
||||||
from test import support
|
from io import StringIO
|
||||||
import unittest
|
import unittest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
|||||||
'access_ip': '10.90.0.3'}}}})
|
'access_ip': '10.90.0.3'}}}})
|
||||||
with mock.patch('builtins.open', mock_io):
|
with mock.patch('builtins.open', mock_io):
|
||||||
with self.assertRaises(SystemExit) as cm:
|
with self.assertRaises(SystemExit) as cm:
|
||||||
with support.captured_stdout() as stdout:
|
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||||
inventory.KubesprayInventory(
|
inventory.KubesprayInventory(
|
||||||
changed_hosts=["print_hostnames"],
|
changed_hosts=["print_hostnames"],
|
||||||
config_file="file")
|
config_file="file")
|
||||||
|
|||||||
@@ -14,12 +14,16 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
|||||||
|
|
||||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||||
|
|
||||||
glusterfs_default_release: ""
|
```yaml
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
```
|
||||||
|
|
||||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||||
|
|
||||||
glusterfs_ppa_use: yes
|
```yaml
|
||||||
glusterfs_ppa_version: "3.5"
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.5"
|
||||||
|
```
|
||||||
|
|
||||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||||
|
|
||||||
@@ -29,9 +33,11 @@ None.
|
|||||||
|
|
||||||
## Example Playbook
|
## Example Playbook
|
||||||
|
|
||||||
|
```yaml
|
||||||
- hosts: server
|
- hosts: server
|
||||||
roles:
|
roles:
|
||||||
- geerlingguy.glusterfs
|
- geerlingguy.glusterfs
|
||||||
|
```
|
||||||
|
|
||||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
|
|
||||||
|
|||||||
@@ -45,3 +45,21 @@ temp
|
|||||||
|
|
||||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||||
|
|
||||||
|
## manage-offline-files.sh
|
||||||
|
|
||||||
|
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
|
||||||
|
|
||||||
|
Step(1) generate `files.list`
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./generate_list.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Step(2) download files and run nginx container
|
||||||
|
|
||||||
|
```shell
|
||||||
|
./manage-offline-files.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ function create_container_image_tar() {
|
|||||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||||
# NOTE: etcd and pause cannot be seen as pods.
|
# NOTE: etcd and pause cannot be seen as pods.
|
||||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
||||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||||
|
|
||||||
rm -f ${IMAGE_TAR_FILE}
|
rm -f ${IMAGE_TAR_FILE}
|
||||||
@@ -46,12 +46,12 @@ function create_container_image_tar() {
|
|||||||
|
|
||||||
# NOTE: Here removes the following repo parts from each image
|
# NOTE: Here removes the following repo parts from each image
|
||||||
# so that these parts will be replaced with Kubespray.
|
# so that these parts will be replaced with Kubespray.
|
||||||
# - kube_image_repo: "k8s.gcr.io"
|
# - kube_image_repo: "registry.k8s.io"
|
||||||
# - gcr_image_repo: "gcr.io"
|
# - gcr_image_repo: "gcr.io"
|
||||||
# - docker_image_repo: "docker.io"
|
# - docker_image_repo: "docker.io"
|
||||||
# - quay_image_repo: "quay.io"
|
# - quay_image_repo: "quay.io"
|
||||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||||
|
|||||||
44
contrib/offline/manage-offline-files.sh
Executable file
44
contrib/offline/manage-offline-files.sh
Executable file
@@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
|
||||||
|
OFFLINE_FILES_DIR_NAME="offline-files"
|
||||||
|
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
|
||||||
|
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
|
||||||
|
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
|
||||||
|
NGINX_PORT=8080
|
||||||
|
|
||||||
|
# download files
|
||||||
|
if [ ! -f "${FILES_LIST}" ]; then
|
||||||
|
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "${OFFLINE_FILES_DIR}"
|
||||||
|
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||||
|
mkdir "${OFFLINE_FILES_DIR}"
|
||||||
|
|
||||||
|
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
||||||
|
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||||
|
|
||||||
|
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||||
|
|
||||||
|
# run nginx container server
|
||||||
|
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||||
|
runtime="nerdctl"
|
||||||
|
elif command -v podman 1>/dev/null 2>&1; then
|
||||||
|
runtime="podman"
|
||||||
|
elif command -v docker 1>/dev/null 2>&1; then
|
||||||
|
runtime="docker"
|
||||||
|
else
|
||||||
|
echo "No supported container runtime found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
sudo "${runtime}" run \
|
||||||
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
|
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||||
|
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
|
--name nginx nginx:alpine
|
||||||
|
fi
|
||||||
39
contrib/offline/nginx.conf
Normal file
39
contrib/offline/nginx.conf
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
user nginx;
|
||||||
|
worker_processes auto;
|
||||||
|
error_log /var/log/nginx/error.log;
|
||||||
|
pid /run/nginx.pid;
|
||||||
|
include /usr/share/nginx/modules/*.conf;
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
http {
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
sendfile on;
|
||||||
|
tcp_nopush on;
|
||||||
|
tcp_nodelay on;
|
||||||
|
keepalive_timeout 65;
|
||||||
|
types_hash_max_size 2048;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
include /etc/nginx/conf.d/*.conf;
|
||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
server_name _;
|
||||||
|
include /etc/nginx/default.d/*.conf;
|
||||||
|
location / {
|
||||||
|
root /usr/share/nginx/html/download;
|
||||||
|
autoindex on;
|
||||||
|
autoindex_exact_size off;
|
||||||
|
autoindex_localtime on;
|
||||||
|
}
|
||||||
|
error_page 404 /404.html;
|
||||||
|
location = /40x.html {
|
||||||
|
}
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
location = /50x.html {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -36,8 +36,7 @@ terraform apply -var-file=credentials.tfvars
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||||
Ansible automatically detects bastion and changes ssh_args
|
|
||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
ssh -F ./ssh-bastion.conf user@$ip
|
ssh -F ./ssh-bastion.conf user@$ip
|
||||||
|
|||||||
@@ -31,9 +31,7 @@ The setup looks like following
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
* Terraform 0.13.0 or newer
|
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||||
|
|
||||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ provider "exoscale" {}
|
|||||||
module "kubernetes" {
|
module "kubernetes" {
|
||||||
source = "./modules/kubernetes-cluster"
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
|
zone = var.zone
|
||||||
machines = var.machines
|
machines = var.machines
|
||||||
|
|
||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
|
|||||||
@@ -75,6 +75,11 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
|||||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||||
|
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
||||||
|
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
||||||
|
* `protocol`: Protocol. Example `"tcp"`
|
||||||
|
* `ports`: List of ports, as string. Example `["53"]`
|
||||||
|
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
||||||
|
|
||||||
### Optional
|
### Optional
|
||||||
|
|
||||||
|
|||||||
@@ -34,4 +34,6 @@ module "kubernetes" {
|
|||||||
api_server_whitelist = var.api_server_whitelist
|
api_server_whitelist = var.api_server_whitelist
|
||||||
nodeport_whitelist = var.nodeport_whitelist
|
nodeport_whitelist = var.nodeport_whitelist
|
||||||
ingress_whitelist = var.ingress_whitelist
|
ingress_whitelist = var.ingress_whitelist
|
||||||
|
|
||||||
|
extra_ingress_firewalls = var.extra_ingress_firewalls
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["master"]
|
tags = ["control-plane", "master", each.key]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["worker"]
|
tags = ["worker", each.key]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -398,3 +398,24 @@ resource "google_compute_target_pool" "worker_lb" {
|
|||||||
name = "${var.prefix}-worker-lb-pool"
|
name = "${var.prefix}-worker-lb-pool"
|
||||||
instances = local.worker_target_list
|
instances = local.worker_target_list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "google_compute_firewall" "extra_ingress_firewall" {
|
||||||
|
for_each = {
|
||||||
|
for name, firewall in var.extra_ingress_firewalls :
|
||||||
|
name => firewall
|
||||||
|
}
|
||||||
|
|
||||||
|
name = "${var.prefix}-${each.key}-ingress"
|
||||||
|
network = google_compute_network.main.name
|
||||||
|
|
||||||
|
priority = 100
|
||||||
|
|
||||||
|
source_ranges = each.value.source_ranges
|
||||||
|
|
||||||
|
target_tags = each.value.target_tags
|
||||||
|
|
||||||
|
allow {
|
||||||
|
protocol = each.value.protocol
|
||||||
|
ports = each.value.ports
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ variable "machines" {
|
|||||||
}))
|
}))
|
||||||
boot_disk = object({
|
boot_disk = object({
|
||||||
image_name = string
|
image_name = string
|
||||||
size = number
|
size = number
|
||||||
})
|
})
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -73,3 +73,14 @@ variable "ingress_whitelist" {
|
|||||||
variable "private_network_cidr" {
|
variable "private_network_cidr" {
|
||||||
default = "10.0.10.0/24"
|
default = "10.0.10.0/24"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "extra_ingress_firewalls" {
|
||||||
|
type = map(object({
|
||||||
|
source_ranges = set(string)
|
||||||
|
protocol = string
|
||||||
|
ports = list(string)
|
||||||
|
target_tags = set(string)
|
||||||
|
}))
|
||||||
|
|
||||||
|
default = {}
|
||||||
|
}
|
||||||
|
|||||||
@@ -95,3 +95,14 @@ variable "ingress_whitelist" {
|
|||||||
type = list(string)
|
type = list(string)
|
||||||
default = ["0.0.0.0/0"]
|
default = ["0.0.0.0/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "extra_ingress_firewalls" {
|
||||||
|
type = map(object({
|
||||||
|
source_ranges = set(string)
|
||||||
|
protocol = string
|
||||||
|
ports = list(string)
|
||||||
|
target_tags = set(string)
|
||||||
|
}))
|
||||||
|
|
||||||
|
default = {}
|
||||||
|
}
|
||||||
|
|||||||
@@ -56,11 +56,24 @@ cd inventory/$CLUSTER
|
|||||||
|
|
||||||
Edit `default.tfvars` to match your requirement.
|
Edit `default.tfvars` to match your requirement.
|
||||||
|
|
||||||
|
Flatcar Container Linux instead of the basic Hetzner Images.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ../../contrib/terraform/hetzner
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
|
||||||
|
comment out the `#source = "./modules/kubernetes-cluster"`.
|
||||||
|
|
||||||
|
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
|
||||||
|
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
|
||||||
|
|
||||||
Run Terraform to create the infrastructure.
|
Run Terraform to create the infrastructure.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
terraform init ../../contrib/terraform/hetzner
|
cd ./kubespray
|
||||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
terraform -chdir=./contrib/terraform/hetzner/ init
|
||||||
|
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
|
||||||
```
|
```
|
||||||
|
|
||||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ ssh_public_keys = [
|
|||||||
"ssh-rsa I-did-not-read-the-docs 2",
|
"ssh-rsa I-did-not-read-the-docs 2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||||
|
|
||||||
machines = {
|
machines = {
|
||||||
"master-0" : {
|
"master-0" : {
|
||||||
"node_type" : "master",
|
"node_type" : "master",
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ provider "hcloud" {}
|
|||||||
|
|
||||||
module "kubernetes" {
|
module "kubernetes" {
|
||||||
source = "./modules/kubernetes-cluster"
|
source = "./modules/kubernetes-cluster"
|
||||||
|
#source = "./modules/kubernetes-cluster-flatcar"
|
||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
|
|
||||||
@@ -9,6 +10,9 @@ module "kubernetes" {
|
|||||||
|
|
||||||
machines = var.machines
|
machines = var.machines
|
||||||
|
|
||||||
|
#only for flatcar
|
||||||
|
#ssh_private_key_path = var.ssh_private_key_path
|
||||||
|
|
||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
network_zone = var.network_zone
|
network_zone = var.network_zone
|
||||||
|
|
||||||
@@ -49,4 +53,4 @@ resource "null_resource" "inventories" {
|
|||||||
triggers = {
|
triggers = {
|
||||||
template = data.template_file.inventory.rendered
|
template = data.template_file.inventory.rendered
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,202 @@
|
|||||||
|
resource "hcloud_network" "kubernetes" {
|
||||||
|
name = "${var.prefix}-network"
|
||||||
|
ip_range = var.private_network_cidr
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_network_subnet" "kubernetes" {
|
||||||
|
type = "cloud"
|
||||||
|
network_id = hcloud_network.kubernetes.id
|
||||||
|
network_zone = var.network_zone
|
||||||
|
ip_range = var.private_subnet_cidr
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_ssh_key" "first" {
|
||||||
|
name = var.prefix
|
||||||
|
public_key = var.ssh_public_keys.0
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server" "master" {
|
||||||
|
for_each = {
|
||||||
|
for name, machine in var.machines :
|
||||||
|
name => machine
|
||||||
|
if machine.node_type == "master"
|
||||||
|
}
|
||||||
|
name = "${var.prefix}-${each.key}"
|
||||||
|
ssh_keys = [hcloud_ssh_key.first.id]
|
||||||
|
# boot into rescue OS
|
||||||
|
rescue = "linux64"
|
||||||
|
# dummy value for the OS because Flatcar is not available
|
||||||
|
image = each.value.image
|
||||||
|
server_type = each.value.size
|
||||||
|
location = var.zone
|
||||||
|
connection {
|
||||||
|
host = self.ipv4_address
|
||||||
|
timeout = "5m"
|
||||||
|
private_key = file(var.ssh_private_key_path)
|
||||||
|
}
|
||||||
|
firewall_ids = [hcloud_firewall.machine.id]
|
||||||
|
provisioner "file" {
|
||||||
|
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||||
|
destination = "/root/ignition.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"set -ex",
|
||||||
|
"apt update",
|
||||||
|
"apt install -y gawk",
|
||||||
|
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/kinvolk/init/flatcar-master/bin/flatcar-install",
|
||||||
|
"chmod +x flatcar-install",
|
||||||
|
"./flatcar-install -s -i /root/ignition.json",
|
||||||
|
"shutdown -r +1",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional:
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
connection {
|
||||||
|
host = self.ipv4_address
|
||||||
|
timeout = "3m"
|
||||||
|
user = var.user_flatcar
|
||||||
|
}
|
||||||
|
|
||||||
|
inline = [
|
||||||
|
"sudo hostnamectl set-hostname ${self.name}",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server_network" "master" {
|
||||||
|
for_each = hcloud_server.master
|
||||||
|
server_id = each.value.id
|
||||||
|
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server" "worker" {
|
||||||
|
for_each = {
|
||||||
|
for name, machine in var.machines :
|
||||||
|
name => machine
|
||||||
|
if machine.node_type == "worker"
|
||||||
|
}
|
||||||
|
name = "${var.prefix}-${each.key}"
|
||||||
|
ssh_keys = [hcloud_ssh_key.first.id]
|
||||||
|
# boot into rescue OS
|
||||||
|
rescue = "linux64"
|
||||||
|
# dummy value for the OS because Flatcar is not available
|
||||||
|
image = each.value.image
|
||||||
|
server_type = each.value.size
|
||||||
|
location = var.zone
|
||||||
|
connection {
|
||||||
|
host = self.ipv4_address
|
||||||
|
timeout = "5m"
|
||||||
|
private_key = file(var.ssh_private_key_path)
|
||||||
|
}
|
||||||
|
firewall_ids = [hcloud_firewall.machine.id]
|
||||||
|
provisioner "file" {
|
||||||
|
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||||
|
destination = "/root/ignition.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
inline = [
|
||||||
|
"set -ex",
|
||||||
|
"apt update",
|
||||||
|
"apt install -y gawk",
|
||||||
|
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/kinvolk/init/flatcar-master/bin/flatcar-install",
|
||||||
|
"chmod +x flatcar-install",
|
||||||
|
"./flatcar-install -s -i /root/ignition.json",
|
||||||
|
"shutdown -r +1",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# optional:
|
||||||
|
provisioner "remote-exec" {
|
||||||
|
connection {
|
||||||
|
host = self.ipv4_address
|
||||||
|
timeout = "3m"
|
||||||
|
user = var.user_flatcar
|
||||||
|
}
|
||||||
|
|
||||||
|
inline = [
|
||||||
|
"sudo hostnamectl set-hostname ${self.name}",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server_network" "worker" {
|
||||||
|
for_each = hcloud_server.worker
|
||||||
|
server_id = each.value.id
|
||||||
|
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||||
|
}
|
||||||
|
|
||||||
|
data "ct_config" "machine-ignitions" {
|
||||||
|
for_each = {
|
||||||
|
for name, machine in var.machines :
|
||||||
|
name => machine
|
||||||
|
}
|
||||||
|
content = data.template_file.machine-configs[each.key].rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "machine-configs" {
|
||||||
|
for_each = {
|
||||||
|
for name, machine in var.machines :
|
||||||
|
name => machine
|
||||||
|
}
|
||||||
|
template = file("${path.module}/templates/machine.yaml.tmpl")
|
||||||
|
|
||||||
|
vars = {
|
||||||
|
ssh_keys = jsonencode(var.ssh_public_keys)
|
||||||
|
user_flatcar = jsonencode(var.user_flatcar)
|
||||||
|
name = each.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_firewall" "machine" {
|
||||||
|
name = "${var.prefix}-machine-firewall"
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "22"
|
||||||
|
source_ips = var.ssh_whitelist
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "6443"
|
||||||
|
source_ips = var.api_server_whitelist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_firewall" "worker" {
|
||||||
|
name = "${var.prefix}-worker-firewall"
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "22"
|
||||||
|
source_ips = var.ssh_whitelist
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "80"
|
||||||
|
source_ips = var.ingress_whitelist
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "443"
|
||||||
|
source_ips = var.ingress_whitelist
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
direction = "in"
|
||||||
|
protocol = "tcp"
|
||||||
|
port = "30000-32767"
|
||||||
|
source_ips = var.nodeport_whitelist
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
output "master_ip_addresses" {
|
||||||
|
value = {
|
||||||
|
for key, instance in hcloud_server.master :
|
||||||
|
instance.name => {
|
||||||
|
"private_ip" = hcloud_server_network.master[key].ip
|
||||||
|
"public_ip" = hcloud_server.master[key].ipv4_address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "worker_ip_addresses" {
|
||||||
|
value = {
|
||||||
|
for key, instance in hcloud_server.worker :
|
||||||
|
instance.name => {
|
||||||
|
"private_ip" = hcloud_server_network.worker[key].ip
|
||||||
|
"public_ip" = hcloud_server.worker[key].ipv4_address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "cluster_private_network_cidr" {
|
||||||
|
value = var.private_subnet_cidr
|
||||||
|
}
|
||||||
|
|
||||||
|
output "network_id" {
|
||||||
|
value = hcloud_network.kubernetes.id
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
passwd:
|
||||||
|
users:
|
||||||
|
- name: ${user_flatcar}
|
||||||
|
ssh_authorized_keys: ${ssh_keys}
|
||||||
|
storage:
|
||||||
|
files:
|
||||||
|
- path: /home/core/works
|
||||||
|
filesystem: root
|
||||||
|
mode: 0755
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
hostname="$(hostname)"
|
||||||
|
echo My name is ${name} and the hostname is $${hostname}
|
||||||
@@ -0,0 +1,60 @@
|
|||||||
|
|
||||||
|
variable "zone" {
|
||||||
|
type = string
|
||||||
|
default = "fsn1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "prefix" {
|
||||||
|
default = "k8s"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "user_flatcar" {
|
||||||
|
type = string
|
||||||
|
default = "core"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "machines" {
|
||||||
|
type = map(object({
|
||||||
|
node_type = string
|
||||||
|
size = string
|
||||||
|
image = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
variable "ssh_public_keys" {
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_private_key_path" {
|
||||||
|
type = string
|
||||||
|
default = "~/.ssh/id_rsa"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ssh_whitelist" {
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "api_server_whitelist" {
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "nodeport_whitelist" {
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ingress_whitelist" {
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_network_cidr" {
|
||||||
|
default = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_subnet_cidr" {
|
||||||
|
default = "10.0.10.0/24"
|
||||||
|
}
|
||||||
|
variable "network_zone" {
|
||||||
|
default = "eu-central"
|
||||||
|
}
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
hcloud = {
|
||||||
|
source = "hetznercloud/hcloud"
|
||||||
|
}
|
||||||
|
ct = {
|
||||||
|
source = "poseidon/ct"
|
||||||
|
}
|
||||||
|
null = {
|
||||||
|
source = "hashicorp/null"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,18 +2,18 @@
|
|||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_worker}
|
${connection_strings_worker}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-master
|
kube-master
|
||||||
kube-node
|
kube-node
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s_cluster:vars]
|
||||||
network_id=${network_id}
|
network_id=${network_id}
|
||||||
|
|||||||
@@ -25,6 +25,12 @@ variable "ssh_public_keys" {
|
|||||||
type = list(string)
|
type = list(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "ssh_private_key_path" {
|
||||||
|
description = "Private SSH key which connect to the VMs."
|
||||||
|
type = string
|
||||||
|
default = "~/.ssh/id_rsa"
|
||||||
|
}
|
||||||
|
|
||||||
variable "ssh_whitelist" {
|
variable "ssh_whitelist" {
|
||||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||||
type = list(string)
|
type = list(string)
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later
|
||||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||||
- you already have a suitable OS image in Glance
|
- you already have a suitable OS image in Glance
|
||||||
- you already have a floating IP pool created
|
- you already have a floating IP pool created
|
||||||
@@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|
||||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||||
@@ -283,6 +284,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||||
|
|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) |
|
||||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||||
@@ -291,10 +293,32 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|
|
||||||
##### k8s_nodes
|
##### k8s_nodes
|
||||||
|
|
||||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement.
|
||||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0.
|
||||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
Then define your desired worker node configuration using the `k8s_nodes` variable.
|
||||||
using the `k8s_nodes` variable.
|
The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||||
|
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
k8s_nodes:
|
||||||
|
node-name:
|
||||||
|
az: string # Name of the AZ
|
||||||
|
flavor: string # Flavor ID to use
|
||||||
|
floating_ip: bool # If floating IPs should be created or not
|
||||||
|
extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups
|
||||||
|
image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image
|
||||||
|
root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise
|
||||||
|
volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type
|
||||||
|
network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name
|
||||||
|
server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy
|
||||||
|
cloudinit: # (optional) Options for cloud-init
|
||||||
|
extra_partitions: # List of extra partitions (other than the root partition) to setup during creation
|
||||||
|
volume_path: string # Path to the volume to create partition for (e.g. /dev/vda )
|
||||||
|
partition_path: string # Path to the partition (e.g. /dev/vda2 )
|
||||||
|
mount_path: string # Path to where the partition should be mounted
|
||||||
|
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
|
||||||
|
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
|
||||||
|
```
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
@@ -314,6 +338,7 @@ k8s_nodes = {
|
|||||||
"az" = "sto3"
|
"az" = "sto3"
|
||||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
"floating_ip" = true
|
"floating_ip" = true
|
||||||
|
"extra_groups" = "calico_rr"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -424,7 +449,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
|||||||
|
|
||||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||||
One common template is used for all instances. Adjust the file shown below:
|
One common template is used for all instances. Adjust the file shown below:
|
||||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl`
|
||||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ module "compute" {
|
|||||||
supplementary_node_groups = var.supplementary_node_groups
|
supplementary_node_groups = var.supplementary_node_groups
|
||||||
master_allowed_ports = var.master_allowed_ports
|
master_allowed_ports = var.master_allowed_ports
|
||||||
worker_allowed_ports = var.worker_allowed_ports
|
worker_allowed_ports = var.worker_allowed_ports
|
||||||
|
bastion_allowed_ports = var.bastion_allowed_ports
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
master_server_group_policy = var.master_server_group_policy
|
master_server_group_policy = var.master_server_group_policy
|
||||||
node_server_group_policy = var.node_server_group_policy
|
node_server_group_policy = var.node_server_group_policy
|
||||||
@@ -96,6 +97,12 @@ module "compute" {
|
|||||||
network_router_id = module.network.router_id
|
network_router_id = module.network.router_id
|
||||||
network_id = module.network.network_id
|
network_id = module.network.network_id
|
||||||
use_existing_network = var.use_existing_network
|
use_existing_network = var.use_existing_network
|
||||||
|
private_subnet_id = module.network.subnet_id
|
||||||
|
additional_server_groups = var.additional_server_groups
|
||||||
|
|
||||||
|
depends_on = [
|
||||||
|
module.network.subnet_id
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "private_subnet_id" {
|
output "private_subnet_id" {
|
||||||
@@ -111,7 +118,7 @@ output "router_id" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_master_fips" {
|
output "k8s_master_fips" {
|
||||||
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
|
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_node_fips" {
|
output "k8s_node_fips" {
|
||||||
|
|||||||
@@ -15,8 +15,14 @@ data "openstack_images_image_v2" "image_master" {
|
|||||||
name = var.image_master == "" ? var.image : var.image_master
|
name = var.image_master == "" ? var.image : var.image_master
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "cloudinit" {
|
data "cloudinit_config" "cloudinit" {
|
||||||
template = file("${path.module}/templates/cloudinit.yaml")
|
part {
|
||||||
|
content_type = "text/cloud-config"
|
||||||
|
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||||
|
# template_file doesn't support lists
|
||||||
|
extra_partitions = ""
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data "openstack_networking_network_v2" "k8s_network" {
|
data "openstack_networking_network_v2" "k8s_network" {
|
||||||
@@ -82,6 +88,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
|||||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
|
||||||
|
count = length(var.bastion_allowed_ports)
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
|
||||||
|
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
|
||||||
|
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
|
||||||
|
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
|
||||||
|
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-k8s"
|
name = "${var.cluster_name}-k8s"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
@@ -156,6 +173,12 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
|||||||
policies = [var.etcd_server_group_policy]
|
policies = [var.etcd_server_group_policy]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_node_additional" {
|
||||||
|
for_each = var.additional_server_groups
|
||||||
|
name = "k8s-${each.key}-srvgrp"
|
||||||
|
policies = [each.value.policy]
|
||||||
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
# master groups
|
# master groups
|
||||||
master_sec_groups = compact([
|
master_sec_groups = compact([
|
||||||
@@ -185,6 +208,29 @@ locals {
|
|||||||
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
||||||
# image_master uuidimage_gfs_uuid
|
# image_master uuidimage_gfs_uuid
|
||||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||||
|
|
||||||
|
k8s_nodes_settings = {
|
||||||
|
for name, node in var.k8s_nodes :
|
||||||
|
name => {
|
||||||
|
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0,
|
||||||
|
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_node,
|
||||||
|
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb,
|
||||||
|
"volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type,
|
||||||
|
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||||
|
"server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
k8s_masters_settings = {
|
||||||
|
for name, node in var.k8s_masters :
|
||||||
|
name => {
|
||||||
|
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0,
|
||||||
|
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_master,
|
||||||
|
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb,
|
||||||
|
"volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type,
|
||||||
|
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_port_v2" "bastion_port" {
|
resource "openstack_networking_port_v2" "bastion_port" {
|
||||||
@@ -195,6 +241,12 @@ resource "openstack_networking_port_v2" "bastion_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -207,7 +259,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_bastion
|
flavor_id = var.flavor_bastion
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
@@ -245,6 +297,12 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -258,7 +316,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
@@ -300,11 +358,17 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||||
name = "${var.cluster_name}-k8s-${each.key}"
|
name = "${var.cluster_name}-k8s-${each.key}"
|
||||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
network_id = local.k8s_masters_settings[each.key].network_id
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -315,17 +379,17 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
|||||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||||
name = "${var.cluster_name}-k8s-${each.key}"
|
name = "${var.cluster_name}-k8s-${each.key}"
|
||||||
availability_zone = each.value.az
|
availability_zone = each.value.az
|
||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null
|
||||||
flavor_id = each.value.flavor
|
flavor_id = each.value.flavor
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : []
|
||||||
content {
|
content {
|
||||||
uuid = local.image_to_use_master
|
uuid = block_device.value
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = var.master_root_volume_size_in_gb
|
volume_size = local.k8s_masters_settings[each.key].volume_size
|
||||||
volume_type = var.master_volume_type
|
volume_type = local.k8s_masters_settings[each.key].volume_type
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
destination_type = "volume"
|
destination_type = "volume"
|
||||||
delete_on_termination = true
|
delete_on_termination = true
|
||||||
@@ -351,7 +415,7 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -363,6 +427,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -376,7 +446,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
@@ -423,6 +493,12 @@ resource "openstack_networking_port_v2" "etcd_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -436,7 +512,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_etcd
|
flavor_id = var.flavor_etcd
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||||
@@ -477,6 +553,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -531,6 +613,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -544,7 +632,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||||
flavor_id = var.flavor_k8s_master
|
flavor_id = var.flavor_k8s_master
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||||
@@ -586,6 +674,12 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -599,7 +693,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_k8s_node
|
flavor_id = var.flavor_k8s_node
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
@@ -646,6 +740,12 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -659,7 +759,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||||
flavor_id = var.flavor_k8s_node
|
flavor_id = var.flavor_k8s_node
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||||
@@ -679,9 +779,9 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = scheduler_hints.value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -696,11 +796,17 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
network_id = local.k8s_nodes_settings[each.key].network_id
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
@@ -711,18 +817,20 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||||
availability_zone = each.value.az
|
availability_zone = each.value.az
|
||||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null
|
||||||
flavor_id = each.value.flavor
|
flavor_id = each.value.flavor
|
||||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||||
user_data = data.template_file.cloudinit.rendered
|
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||||
|
extra_partitions = each.value.cloudinit.extra_partitions
|
||||||
|
}) : data.cloudinit_config.cloudinit.rendered
|
||||||
|
|
||||||
dynamic "block_device" {
|
dynamic "block_device" {
|
||||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : []
|
||||||
content {
|
content {
|
||||||
uuid = local.image_to_use_node
|
uuid = block_device.value
|
||||||
source_type = "image"
|
source_type = "image"
|
||||||
volume_size = var.node_root_volume_size_in_gb
|
volume_size = local.k8s_nodes_settings[each.key].volume_size
|
||||||
volume_type = var.node_volume_type
|
volume_type = local.k8s_nodes_settings[each.key].volume_type
|
||||||
boot_index = 0
|
boot_index = 0
|
||||||
destination_type = "volume"
|
destination_type = "volume"
|
||||||
delete_on_termination = true
|
delete_on_termination = true
|
||||||
@@ -734,15 +842,15 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = local.k8s_nodes_settings[each.key].server_group
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = scheduler_hints.value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}"
|
||||||
depends_on = var.network_router_id
|
depends_on = var.network_router_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
@@ -760,6 +868,12 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
|||||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||||
no_security_groups = var.port_security_enabled ? null : false
|
no_security_groups = var.port_security_enabled ? null : false
|
||||||
|
dynamic "fixed_ip" {
|
||||||
|
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||||
|
content {
|
||||||
|
subnet_id = var.private_subnet_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
var.network_router_id
|
var.network_router_id
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
# yamllint disable rule:comments
|
|
||||||
#cloud-config
|
|
||||||
## in some cases novnc console access is required
|
|
||||||
## it requires ssh password to be set
|
|
||||||
#ssh_pwauth: yes
|
|
||||||
#chpasswd:
|
|
||||||
# list: |
|
|
||||||
# root:secret
|
|
||||||
# expire: False
|
|
||||||
|
|
||||||
## in some cases direct root ssh access via ssh key is required
|
|
||||||
#disable_root: false
|
|
||||||
|
|
||||||
## in some cases additional CA certs are required
|
|
||||||
#ca-certs:
|
|
||||||
# trusted: |
|
|
||||||
# -----BEGIN CERTIFICATE-----
|
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
%{~ if length(extra_partitions) > 0 }
|
||||||
|
#cloud-config
|
||||||
|
bootcmd:
|
||||||
|
%{~ for idx, partition in extra_partitions }
|
||||||
|
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ]
|
||||||
|
- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ]
|
||||||
|
- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ]
|
||||||
|
%{~ endfor }
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
%{~ for idx, partition in extra_partitions }
|
||||||
|
- mkdir -p ${partition.mount_path}
|
||||||
|
- chown nobody:nogroup ${partition.mount_path}
|
||||||
|
- mount ${partition.partition_path} ${partition.mount_path}
|
||||||
|
%{~ endfor }
|
||||||
|
|
||||||
|
mounts:
|
||||||
|
%{~ for idx, partition in extra_partitions }
|
||||||
|
- [ ${partition.partition_path}, ${partition.mount_path} ]
|
||||||
|
%{~ endfor }
|
||||||
|
%{~ else ~}
|
||||||
|
# yamllint disable rule:comments
|
||||||
|
#cloud-config
|
||||||
|
## in some cases novnc console access is required
|
||||||
|
## it requires ssh password to be set
|
||||||
|
#ssh_pwauth: yes
|
||||||
|
#chpasswd:
|
||||||
|
# list: |
|
||||||
|
# root:secret
|
||||||
|
# expire: False
|
||||||
|
|
||||||
|
## in some cases direct root ssh access via ssh key is required
|
||||||
|
#disable_root: false
|
||||||
|
|
||||||
|
## in some cases additional CA certs are required
|
||||||
|
#ca-certs:
|
||||||
|
# trusted: |
|
||||||
|
# -----BEGIN CERTIFICATE-----
|
||||||
|
%{~ endif }
|
||||||
@@ -116,9 +116,48 @@ variable "k8s_allowed_egress_ips" {
|
|||||||
type = list
|
type = list
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "k8s_masters" {}
|
variable "k8s_masters" {
|
||||||
|
type = map(object({
|
||||||
|
az = string
|
||||||
|
flavor = string
|
||||||
|
floating_ip = bool
|
||||||
|
etcd = bool
|
||||||
|
image_id = optional(string)
|
||||||
|
root_volume_size_in_gb = optional(number)
|
||||||
|
volume_type = optional(string)
|
||||||
|
network_id = optional(string)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
variable "k8s_nodes" {}
|
variable "k8s_nodes" {
|
||||||
|
type = map(object({
|
||||||
|
az = string
|
||||||
|
flavor = string
|
||||||
|
floating_ip = bool
|
||||||
|
extra_groups = optional(string)
|
||||||
|
image_id = optional(string)
|
||||||
|
root_volume_size_in_gb = optional(number)
|
||||||
|
volume_type = optional(string)
|
||||||
|
network_id = optional(string)
|
||||||
|
additional_server_groups = optional(list(string))
|
||||||
|
server_group = optional(string)
|
||||||
|
cloudinit = optional(object({
|
||||||
|
extra_partitions = list(object({
|
||||||
|
volume_path = string
|
||||||
|
partition_path = string
|
||||||
|
partition_start = string
|
||||||
|
partition_end = string
|
||||||
|
mount_path = string
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "additional_server_groups" {
|
||||||
|
type = map(object({
|
||||||
|
policy = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
variable "supplementary_master_groups" {
|
variable "supplementary_master_groups" {
|
||||||
default = ""
|
default = ""
|
||||||
@@ -136,6 +175,10 @@ variable "worker_allowed_ports" {
|
|||||||
type = list
|
type = list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_ports" {
|
||||||
|
type = list
|
||||||
|
}
|
||||||
|
|
||||||
variable "use_access_ip" {}
|
variable "use_access_ip" {}
|
||||||
|
|
||||||
variable "master_server_group_policy" {
|
variable "master_server_group_policy" {
|
||||||
@@ -185,3 +228,7 @@ variable "port_security_enabled" {
|
|||||||
variable "force_null_port_security" {
|
variable "force_null_port_security" {
|
||||||
type = bool
|
type = bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "private_subnet_id" {
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,5 +4,6 @@ terraform {
|
|||||||
source = "terraform-provider-openstack/openstack"
|
source = "terraform-provider-openstack/openstack"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.12.26"
|
experiments = [module_variable_optional_attrs]
|
||||||
|
required_version = ">= 0.14.0"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_ports" {
|
||||||
|
type = list(any)
|
||||||
|
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
variable "use_access_ip" {
|
variable "use_access_ip" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
@@ -294,6 +300,13 @@ variable "k8s_nodes" {
|
|||||||
default = {}
|
default = {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "additional_server_groups" {
|
||||||
|
default = {}
|
||||||
|
type = map(object({
|
||||||
|
policy = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
variable "extra_sec_groups" {
|
variable "extra_sec_groups" {
|
||||||
default = false
|
default = false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,5 +5,6 @@ terraform {
|
|||||||
version = "~> 1.17"
|
version = "~> 1.17"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.12.26"
|
experiments = [module_variable_optional_attrs]
|
||||||
|
required_version = ">= 0.14.0"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -212,7 +212,7 @@ def metal_device(resource, tfvars=None):
|
|||||||
'project_id': raw_attrs['project_id'],
|
'project_id': raw_attrs['project_id'],
|
||||||
'state': raw_attrs['state'],
|
'state': raw_attrs['state'],
|
||||||
# ansible
|
# ansible
|
||||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
'ansible_host': raw_attrs['network.0.address'],
|
||||||
'ansible_ssh_user': 'root', # Use root by default in metal
|
'ansible_ssh_user': 'root', # Use root by default in metal
|
||||||
# generic
|
# generic
|
||||||
'ipv4_address': raw_attrs['network.0.address'],
|
'ipv4_address': raw_attrs['network.0.address'],
|
||||||
@@ -292,16 +292,16 @@ def openstack_host(resource, module_name):
|
|||||||
try:
|
try:
|
||||||
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||||
attrs.update({
|
attrs.update({
|
||||||
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||||
'publicly_routable': True,
|
'publicly_routable': True,
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
attrs.update({
|
attrs.update({
|
||||||
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
'ansible_host': raw_attrs['access_ip_v4'],
|
||||||
'publicly_routable': True,
|
'publicly_routable': True,
|
||||||
})
|
})
|
||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
attrs.update({'ansible_host': '', 'publicly_routable': False})
|
||||||
|
|
||||||
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||||
|
|
||||||
@@ -349,7 +349,7 @@ def iter_host_ips(hosts, ips):
|
|||||||
'access_ip_v4': ip,
|
'access_ip_v4': ip,
|
||||||
'access_ip': ip,
|
'access_ip': ip,
|
||||||
'public_ipv4': ip,
|
'public_ipv4': ip,
|
||||||
'ansible_ssh_host': ip,
|
'ansible_host': ip,
|
||||||
})
|
})
|
||||||
|
|
||||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
||||||
@@ -389,7 +389,7 @@ def query_list(hosts):
|
|||||||
def query_hostfile(hosts):
|
def query_hostfile(hosts):
|
||||||
out = ['## begin hosts generated by terraform.py ##']
|
out = ['## begin hosts generated by terraform.py ##']
|
||||||
out.extend(
|
out.extend(
|
||||||
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
|
'{}\t{}'.format(attrs['ansible_host'].ljust(16), name)
|
||||||
for name, attrs, _ in hosts
|
for name, attrs, _ in hosts
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -112,12 +112,26 @@ terraform destroy --var-file cluster-settings.tfvars \
|
|||||||
* `size`: The size of the additional disk in GB
|
* `size`: The size of the additional disk in GB
|
||||||
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
||||||
* `firewall_enabled`: Enable firewall rules
|
* `firewall_enabled`: Enable firewall rules
|
||||||
|
* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting.
|
||||||
|
* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default.
|
||||||
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
|
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
|
||||||
* `start_address`: Start of address range to allow
|
* `start_address`: Start of address range to allow
|
||||||
* `end_address`: End of address range to allow
|
* `end_address`: End of address range to allow
|
||||||
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
|
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
|
||||||
* `start_address`: Start of address range to allow
|
* `start_address`: Start of address range to allow
|
||||||
* `end_address`: End of address range to allow
|
* `end_address`: End of address range to allow
|
||||||
|
* `master_allowed_ports`: List of port ranges that should be allowed to access the masters
|
||||||
|
* `protocol`: Protocol *(tcp|udp|icmp)*
|
||||||
|
* `port_range_min`: Start of port range to allow
|
||||||
|
* `port_range_max`: End of port range to allow
|
||||||
|
* `start_address`: Start of address range to allow
|
||||||
|
* `end_address`: End of address range to allow
|
||||||
|
* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers
|
||||||
|
* `protocol`: Protocol *(tcp|udp|icmp)*
|
||||||
|
* `port_range_min`: Start of port range to allow
|
||||||
|
* `port_range_max`: End of port range to allow
|
||||||
|
* `start_address`: Start of address range to allow
|
||||||
|
* `end_address`: End of address range to allow
|
||||||
* `loadbalancer_enabled`: Enable managed load balancer
|
* `loadbalancer_enabled`: Enable managed load balancer
|
||||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||||
|
|||||||
@@ -95,7 +95,9 @@ machines = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
firewall_enabled = false
|
firewall_enabled = false
|
||||||
|
firewall_default_deny_in = false
|
||||||
|
firewall_default_deny_out = false
|
||||||
|
|
||||||
master_allowed_remote_ips = [
|
master_allowed_remote_ips = [
|
||||||
{
|
{
|
||||||
@@ -111,6 +113,9 @@ k8s_allowed_remote_ips = [
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
master_allowed_ports = []
|
||||||
|
worker_allowed_ports = []
|
||||||
|
|
||||||
loadbalancer_enabled = false
|
loadbalancer_enabled = false
|
||||||
loadbalancer_plan = "development"
|
loadbalancer_plan = "development"
|
||||||
loadbalancers = {
|
loadbalancers = {
|
||||||
|
|||||||
@@ -24,8 +24,12 @@ module "kubernetes" {
|
|||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
|
|
||||||
firewall_enabled = var.firewall_enabled
|
firewall_enabled = var.firewall_enabled
|
||||||
|
firewall_default_deny_in = var.firewall_default_deny_in
|
||||||
|
firewall_default_deny_out = var.firewall_default_deny_out
|
||||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||||
|
master_allowed_ports = var.master_allowed_ports
|
||||||
|
worker_allowed_ports = var.worker_allowed_ports
|
||||||
|
|
||||||
loadbalancer_enabled = var.loadbalancer_enabled
|
loadbalancer_enabled = var.loadbalancer_enabled
|
||||||
loadbalancer_plan = var.loadbalancer_plan
|
loadbalancer_plan = var.loadbalancer_plan
|
||||||
|
|||||||
@@ -228,6 +228,126 @@ resource "upcloud_firewall_rules" "master" {
|
|||||||
source_address_start = "0.0.0.0"
|
source_address_start = "0.0.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.master_allowed_ports
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "Allow access on this port"
|
||||||
|
destination_port_end = firewall_rule.value.port_range_max
|
||||||
|
destination_port_start = firewall_rule.value.port_range_min
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value.protocol
|
||||||
|
source_address_end = firewall_rule.value.end_address
|
||||||
|
source_address_start = firewall_rule.value.start_address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "94.237.40.9"
|
||||||
|
source_address_start = "94.237.40.9"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "94.237.127.9"
|
||||||
|
source_address_start = "94.237.127.9"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "2a04:3540:53::1"
|
||||||
|
source_address_start = "2a04:3540:53::1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "2a04:3544:53::1"
|
||||||
|
source_address_start = "2a04:3544:53::1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "255.255.255.255"
|
||||||
|
source_address_start = "0.0.0.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
firewall_rule {
|
||||||
|
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||||
|
direction = "in"
|
||||||
|
}
|
||||||
|
|
||||||
|
firewall_rule {
|
||||||
|
action = var.firewall_default_deny_out ? "drop" : "accept"
|
||||||
|
direction = "out"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "upcloud_firewall_rules" "k8s" {
|
resource "upcloud_firewall_rules" "k8s" {
|
||||||
@@ -265,6 +385,126 @@ resource "upcloud_firewall_rules" "k8s" {
|
|||||||
source_address_start = "0.0.0.0"
|
source_address_start = "0.0.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.worker_allowed_ports
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "Allow access on this port"
|
||||||
|
destination_port_end = firewall_rule.value.port_range_max
|
||||||
|
destination_port_start = firewall_rule.value.port_range_min
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value.protocol
|
||||||
|
source_address_end = firewall_rule.value.end_address
|
||||||
|
source_address_start = firewall_rule.value.start_address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "94.237.40.9"
|
||||||
|
source_address_start = "94.237.40.9"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "94.237.127.9"
|
||||||
|
source_address_start = "94.237.127.9"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "2a04:3540:53::1"
|
||||||
|
source_address_start = "2a04:3540:53::1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "UpCloud DNS"
|
||||||
|
source_port_end = "53"
|
||||||
|
source_port_start = "53"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "2a04:3544:53::1"
|
||||||
|
source_address_start = "2a04:3544:53::1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv4"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
source_address_end = "255.255.255.255"
|
||||||
|
source_address_start = "0.0.0.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dynamic firewall_rule {
|
||||||
|
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||||
|
|
||||||
|
content {
|
||||||
|
action = "accept"
|
||||||
|
comment = "NTP Port"
|
||||||
|
source_port_end = "123"
|
||||||
|
source_port_start = "123"
|
||||||
|
direction = "in"
|
||||||
|
family = "IPv6"
|
||||||
|
protocol = firewall_rule.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
firewall_rule {
|
||||||
|
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||||
|
direction = "in"
|
||||||
|
}
|
||||||
|
|
||||||
|
firewall_rule {
|
||||||
|
action = var.firewall_default_deny_out ? "drop" : "accept"
|
||||||
|
direction = "out"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "upcloud_loadbalancer" "lb" {
|
resource "upcloud_loadbalancer" "lb" {
|
||||||
|
|||||||
@@ -49,6 +49,34 @@ variable "k8s_allowed_remote_ips" {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "master_allowed_ports" {
|
||||||
|
type = list(object({
|
||||||
|
protocol = string
|
||||||
|
port_range_min = number
|
||||||
|
port_range_max = number
|
||||||
|
start_address = string
|
||||||
|
end_address = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_allowed_ports" {
|
||||||
|
type = list(object({
|
||||||
|
protocol = string
|
||||||
|
port_range_min = number
|
||||||
|
port_range_max = number
|
||||||
|
start_address = string
|
||||||
|
end_address = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "firewall_default_deny_in" {
|
||||||
|
type = bool
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "firewall_default_deny_out" {
|
||||||
|
type = bool
|
||||||
|
}
|
||||||
|
|
||||||
variable "loadbalancer_enabled" {
|
variable "loadbalancer_enabled" {
|
||||||
type = bool
|
type = bool
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>2.4.0"
|
version = "~>2.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -95,7 +95,10 @@ machines = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
firewall_enabled = false
|
firewall_enabled = false
|
||||||
|
firewall_default_deny_in = false
|
||||||
|
firewall_default_deny_out = false
|
||||||
|
|
||||||
|
|
||||||
master_allowed_remote_ips = [
|
master_allowed_remote_ips = [
|
||||||
{
|
{
|
||||||
@@ -111,6 +114,9 @@ k8s_allowed_remote_ips = [
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
master_allowed_ports = []
|
||||||
|
worker_allowed_ports = []
|
||||||
|
|
||||||
loadbalancer_enabled = false
|
loadbalancer_enabled = false
|
||||||
loadbalancer_plan = "development"
|
loadbalancer_plan = "development"
|
||||||
loadbalancers = {
|
loadbalancers = {
|
||||||
|
|||||||
@@ -79,6 +79,38 @@ variable "k8s_allowed_remote_ips" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "master_allowed_ports" {
|
||||||
|
description = "List of ports to allow on masters"
|
||||||
|
type = list(object({
|
||||||
|
protocol = string
|
||||||
|
port_range_min = number
|
||||||
|
port_range_max = number
|
||||||
|
start_address = string
|
||||||
|
end_address = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_allowed_ports" {
|
||||||
|
description = "List of ports to allow on workers"
|
||||||
|
type = list(object({
|
||||||
|
protocol = string
|
||||||
|
port_range_min = number
|
||||||
|
port_range_max = number
|
||||||
|
start_address = string
|
||||||
|
end_address = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "firewall_default_deny_in" {
|
||||||
|
description = "Add firewall policies that deny all inbound traffic by default"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "firewall_default_deny_out" {
|
||||||
|
description = "Add firewall policies that deny all outbound traffic by default"
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
variable "loadbalancer_enabled" {
|
variable "loadbalancer_enabled" {
|
||||||
description = "Enable load balancer"
|
description = "Enable load balancer"
|
||||||
default = false
|
default = false
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>2.4.0"
|
version = "~>2.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -35,9 +35,7 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
* Terraform 0.13.0 or newer
|
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||||
|
|
||||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ variable "vsphere_datastore" {}
|
|||||||
|
|
||||||
variable "vsphere_user" {}
|
variable "vsphere_user" {}
|
||||||
|
|
||||||
variable "vsphere_password" {}
|
variable "vsphere_password" {
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
variable "vsphere_server" {}
|
variable "vsphere_server" {}
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,6 @@ terraform {
|
|||||||
source = "hashicorp/vsphere"
|
source = "hashicorp/vsphere"
|
||||||
version = ">= 1.24.3"
|
version = ">= 1.24.3"
|
||||||
}
|
}
|
||||||
null = {
|
|
||||||
source = "hashicorp/null"
|
|
||||||
}
|
|
||||||
template = {
|
|
||||||
source = "hashicorp/template"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,10 @@
|
|||||||
* [OpenSUSE](docs/opensuse.md)
|
* [OpenSUSE](docs/opensuse.md)
|
||||||
* [RedHat Enterprise Linux](docs/rhel.md)
|
* [RedHat Enterprise Linux](docs/rhel.md)
|
||||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||||
|
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
||||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||||
|
* [UOS Linux](docs/uoslinux.md)
|
||||||
|
* [openEuler notes](docs/openeuler.md))
|
||||||
* CRI
|
* CRI
|
||||||
* [Containerd](docs/containerd.md)
|
* [Containerd](docs/containerd.md)
|
||||||
* [Docker](docs/docker.md)
|
* [Docker](docs/docker.md)
|
||||||
@@ -50,6 +53,7 @@
|
|||||||
* [DNS Stack](docs/dns-stack.md)
|
* [DNS Stack](docs/dns-stack.md)
|
||||||
* [Kubernetes reliability](docs/kubernetes-reliability.md)
|
* [Kubernetes reliability](docs/kubernetes-reliability.md)
|
||||||
* [Local Registry](docs/kubernetes-apps/registry.md)
|
* [Local Registry](docs/kubernetes-apps/registry.md)
|
||||||
|
* [NTP](docs/ntp.md)
|
||||||
* External Storage Provisioners
|
* External Storage Provisioners
|
||||||
* [RBD Provisioner](docs/kubernetes-apps/rbd_provisioner.md)
|
* [RBD Provisioner](docs/kubernetes-apps/rbd_provisioner.md)
|
||||||
* [CEPHFS Provisioner](docs/kubernetes-apps/cephfs_provisioner.md)
|
* [CEPHFS Provisioner](docs/kubernetes-apps/cephfs_provisioner.md)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ Amazon Linux is supported with docker,containerd and cri-o runtimes.
|
|||||||
**Note:** that Amazon Linux is not currently covered in kubespray CI and
|
**Note:** that Amazon Linux is not currently covered in kubespray CI and
|
||||||
support for it is currently considered experimental.
|
support for it is currently considered experimental.
|
||||||
|
|
||||||
Amazon Linux 2, while derrived from the Redhat OS family, does not keep in
|
Amazon Linux 2, while derived from the Redhat OS family, does not keep in
|
||||||
sync with RHEL upstream like CentOS/AlmaLinux/Oracle Linux. In order to use
|
sync with RHEL upstream like CentOS/AlmaLinux/Oracle Linux. In order to use
|
||||||
Amazon Linux as the ansible host for your kubespray deployments you need to
|
Amazon Linux as the ansible host for your kubespray deployments you need to
|
||||||
manually install `python3` and deploy ansible and kubespray dependencies in
|
manually install `python3` and deploy ansible and kubespray dependencies in
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
## Installing Ansible
|
## Installing Ansible
|
||||||
|
|
||||||
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
||||||
Depending on your available python version you may be limited in chooding which ansible version to use.
|
Depending on your available python version you may be limited in choosing which ansible version to use.
|
||||||
|
|
||||||
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ KUBESPRAYDIR=kubespray
|
|||||||
ANSIBLE_VERSION=2.12
|
ANSIBLE_VERSION=2.12
|
||||||
virtualenv --python=$(which python3) $VENVDIR
|
virtualenv --python=$(which python3) $VENVDIR
|
||||||
source $VENVDIR/bin/activate
|
source $VENVDIR/bin/activate
|
||||||
cd $KUESPRAYDIR
|
cd $KUBESPRAYDIR
|
||||||
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
||||||
test -f requirements-$ANSIBLE_VERSION.yml && \
|
test -f requirements-$ANSIBLE_VERSION.yml && \
|
||||||
ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \
|
ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \
|
||||||
@@ -26,8 +26,6 @@ Based on the table below and the available python version for your ansible host
|
|||||||
|
|
||||||
| Ansible Version | Python Version |
|
| Ansible Version | Python Version |
|
||||||
| --------------- | -------------- |
|
| --------------- | -------------- |
|
||||||
| 2.9 | 2.7,3.5-3.8 |
|
|
||||||
| 2.10 | 2.7,3.5-3.8 |
|
|
||||||
| 2.11 | 2.7,3.5-3.9 |
|
| 2.11 | 2.7,3.5-3.9 |
|
||||||
| 2.12 | 3.8-3.10 |
|
| 2.12 | 3.8-3.10 |
|
||||||
|
|
||||||
@@ -269,7 +267,7 @@ Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you'
|
|||||||
## Bastion host
|
## Bastion host
|
||||||
|
|
||||||
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||||
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
you can use a so-called _bastion_ host to connect to your nodes. To specify and use a bastion,
|
||||||
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||||
bastion host.
|
bastion host.
|
||||||
|
|
||||||
@@ -283,7 +281,7 @@ For more information about Ansible and bastion hosts, read
|
|||||||
|
|
||||||
## Mitogen
|
## Mitogen
|
||||||
|
|
||||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
|
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
|
||||||
|
|
||||||
## Beyond ansible 2.9
|
## Beyond ansible 2.9
|
||||||
|
|
||||||
@@ -292,7 +290,7 @@ two projects which are now joined under the Ansible umbrella.
|
|||||||
|
|
||||||
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
||||||
ansible modules that were previously bundled into a single repository will be part of the
|
ansible modules that were previously bundled into a single repository will be part of the
|
||||||
ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
ansible 3.x package. Please see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||||
that explains in detail the need and the evolution plan.
|
that explains in detail the need and the evolution plan.
|
||||||
|
|
||||||
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ Check the associated storage class (if you enabled persistent_volumes):
|
|||||||
```ShellSession
|
```ShellSession
|
||||||
$ kubectl get storageclass
|
$ kubectl get storageclass
|
||||||
NAME PROVISIONER AGE
|
NAME PROVISIONER AGE
|
||||||
ebs-sc ebs.csi.aws.com 45s
|
ebs-sc ebs.csi.aws.com 45s
|
||||||
```
|
```
|
||||||
|
|
||||||
You can run a PVC and an example Pod using this file `ebs-pod.yml`:
|
You can run a PVC and an example Pod using this file `ebs-pod.yml`:
|
||||||
@@ -71,8 +71,8 @@ You should see the PVC provisioned and bound:
|
|||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ kubectl get pvc
|
$ kubectl get pvc
|
||||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
|
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
|
||||||
```
|
```
|
||||||
|
|
||||||
And the volume mounted to the example Pod (wait until the Pod is Running):
|
And the volume mounted to the example Pod (wait until the Pod is Running):
|
||||||
|
|||||||
@@ -57,19 +57,28 @@ The name of the network security group your instances are in, can be retrieved v
|
|||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
|
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
|
||||||
|
```
|
||||||
|
|
||||||
Display name, identifier-uri, homepage and the password can be chosen
|
Display name, identifier-uri, homepage and the password can be chosen
|
||||||
|
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
|
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`az ad sp create --id AppId`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad sp create --id AppId
|
||||||
|
```
|
||||||
|
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
|
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
|
||||||
|
```ShellSession
|
||||||
|
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
|
||||||
|
```
|
||||||
|
|
||||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
|||||||
@@ -71,14 +71,27 @@ The name of the resource group that contains the route table. Defaults to `azur
|
|||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
|
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
|
||||||
|
```
|
||||||
|
|
||||||
display name, identifier-uri, homepage and the password can be chosen
|
display name, identifier-uri, homepage and the password can be chosen
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
|
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`az ad sp create --id AppId`
|
|
||||||
|
```ShellSession
|
||||||
|
az ad sp create --id AppId
|
||||||
|
```
|
||||||
|
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
|
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
|
||||||
|
```ShellSession
|
||||||
|
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
|
||||||
|
```
|
||||||
|
|
||||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
|||||||
@@ -48,11 +48,13 @@ The `kubespray-defaults` role is expected to be run before this role.
|
|||||||
|
|
||||||
Remember to disable fact gathering since Python might not be present on hosts.
|
Remember to disable fact gathering since Python might not be present on hosts.
|
||||||
|
|
||||||
- hosts: all
|
```yaml
|
||||||
gather_facts: false # not all hosts might be able to run modules yet
|
- hosts: all
|
||||||
roles:
|
gather_facts: false # not all hosts might be able to run modules yet
|
||||||
- kubespray-defaults
|
roles:
|
||||||
- bootstrap-os
|
- kubespray-defaults
|
||||||
|
- bootstrap-os
|
||||||
|
```
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
|||||||
|
|
||||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
The following variables need to be set:
|
The following variables need to be set as follow:
|
||||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
|
||||||
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
```yml
|
||||||
|
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
|
||||||
|
nat_outgoing: false # (optional) NAT outgoing (default value: true).
|
||||||
|
```
|
||||||
|
|
||||||
|
And you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
@@ -124,8 +129,7 @@ You need to edit your inventory and add:
|
|||||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||||
group of `k8s_cluster` group.
|
group of `k8s_cluster` group.
|
||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
|
||||||
|
|
||||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||||
|
|
||||||
@@ -172,6 +176,8 @@ node5
|
|||||||
|
|
||||||
[rack0:vars]
|
[rack0:vars]
|
||||||
cluster_id="1.0.0.1"
|
cluster_id="1.0.0.1"
|
||||||
|
calico_rr_id=rr1
|
||||||
|
calico_group_id=rr1
|
||||||
```
|
```
|
||||||
|
|
||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
@@ -199,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
|
|||||||
calico_healthhost: "0.0.0.0"
|
calico_healthhost: "0.0.0.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Optional : Configure VXLAN hardware Offload
|
||||||
|
|
||||||
|
The VXLAN Offload is disable by default. It can be configured like this to enabled it:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
calico_feature_detect_override: "ChecksumOffloadBroken=false" # The vxlan offload will enabled (It may cause problem on buggy NIC driver)
|
||||||
|
```
|
||||||
|
|
||||||
### Optional : Configure Calico Node probe timeouts
|
### Optional : Configure Calico Node probe timeouts
|
||||||
|
|
||||||
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
||||||
@@ -212,7 +226,7 @@ calico_node_readinessprobe_timeout: 10
|
|||||||
|
|
||||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
||||||
|
|
||||||
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
*IP in IP* and *VXLAN* is mutually exclusive modes.
|
||||||
|
|
||||||
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
||||||
|
|
||||||
@@ -221,6 +235,8 @@ If you are running your cluster with the default calico settings and are upgradi
|
|||||||
* perform a manual migration to vxlan before upgrading kubespray (see migrating from IP in IP to VXLAN below)
|
* perform a manual migration to vxlan before upgrading kubespray (see migrating from IP in IP to VXLAN below)
|
||||||
* pin the pre-2.19 settings in your ansible inventory (see IP in IP mode settings below)
|
* pin the pre-2.19 settings in your ansible inventory (see IP in IP mode settings below)
|
||||||
|
|
||||||
|
**Note:**: Vxlan in ipv6 only supported when kernel >= 3.12. So if your kernel version < 3.12, Please don't set `calico_vxlan_mode_ipv6: vxlanAlways`. More details see [#Issue 6877](https://github.com/projectcalico/calico/issues/6877).
|
||||||
|
|
||||||
### IP in IP mode
|
### IP in IP mode
|
||||||
|
|
||||||
To configure Ip in Ip mode you need to use the bird network backend.
|
To configure Ip in Ip mode you need to use the bird network backend.
|
||||||
@@ -245,14 +261,14 @@ calico_network_backend: 'bird'
|
|||||||
|
|
||||||
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
||||||
|
|
||||||
Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
||||||
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
|
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray.
|
||||||
|
|
||||||
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
||||||
|
|
||||||
@@ -315,6 +331,13 @@ calico_ipam_host_local: true
|
|||||||
|
|
||||||
Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information.
|
Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information.
|
||||||
|
|
||||||
|
### Optional : Disable CNI logging to disk
|
||||||
|
|
||||||
|
Calico CNI plugin logs to /var/log/calico/cni/cni.log and to stderr.
|
||||||
|
stderr of CNI plugins can be found in the logs of container runtime.
|
||||||
|
|
||||||
|
You can disable Calico CNI logging to disk by setting `calico_cni_log_file_path: false`.
|
||||||
|
|
||||||
## eBPF Support
|
## eBPF Support
|
||||||
|
|
||||||
Calico supports eBPF for its data plane see [an introduction to the Calico eBPF Dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) for further information.
|
Calico supports eBPF for its data plane see [an introduction to the Calico eBPF Dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) for further information.
|
||||||
@@ -362,7 +385,7 @@ use_localhost_as_kubeapi_loadbalancer: true
|
|||||||
|
|
||||||
### Tunneled versus Direct Server Return
|
### Tunneled versus Direct Server Return
|
||||||
|
|
||||||
By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||||
|
|
||||||
To configure DSR:
|
To configure DSR:
|
||||||
|
|
||||||
@@ -388,7 +411,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
|
|||||||
|
|
||||||
## Wireguard Encryption
|
## Wireguard Encryption
|
||||||
|
|
||||||
Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||||
|
|
||||||
To enable wireguard support:
|
To enable wireguard support:
|
||||||
|
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
|
|
||||||
## CentOS 7
|
## CentOS 7
|
||||||
|
|
||||||
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||||
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
||||||
|
|
||||||
## CentOS 8
|
## CentOS 8
|
||||||
|
|
||||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||||
The only tested configuration for now is using Calico CNI
|
The only tested configuration for now is using Calico CNI
|
||||||
You need to add `calico_iptables_backend: "NFT"` or `calico_iptables_backend: "Auto"` to your configuration.
|
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||||
|
|
||||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||||
you need to ensure they are using iptables-nft.
|
you need to ensure they are using iptables-nft.
|
||||||
|
|||||||
72
docs/cgroups.md
Normal file
72
docs/cgroups.md
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# cgroups
|
||||||
|
|
||||||
|
To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the container’s resources usage.
|
||||||
|
|
||||||
|
## Enforcing Node Allocatable
|
||||||
|
|
||||||
|
You can use `kubelet_enforce_node_allocatable` to set node allocatable enforcement.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||||
|
kubelet_enforce_node_allocatable: "pods"
|
||||||
|
# kubelet_enforce_node_allocatable: "pods,kube-reserved"
|
||||||
|
# kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that to enforce kube-reserved or system-reserved, `kube_reserved_cgroups` or `system_reserved_cgroups` needs to be specified respectively.
|
||||||
|
|
||||||
|
Here is an example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||||
|
|
||||||
|
# Reserve this space for kube resources
|
||||||
|
# Set to true to reserve resources for kube daemons
|
||||||
|
kube_reserved: true
|
||||||
|
kube_reserved_cgroups_for_service_slice: kube.slice
|
||||||
|
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||||
|
kube_memory_reserved: 256Mi
|
||||||
|
kube_cpu_reserved: 100m
|
||||||
|
# kube_ephemeral_storage_reserved: 2Gi
|
||||||
|
# kube_pid_reserved: "1000"
|
||||||
|
# Reservation for master hosts
|
||||||
|
kube_master_memory_reserved: 512Mi
|
||||||
|
kube_master_cpu_reserved: 200m
|
||||||
|
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
# kube_master_pid_reserved: "1000"
|
||||||
|
|
||||||
|
# Set to true to reserve resources for system daemons
|
||||||
|
system_reserved: true
|
||||||
|
system_reserved_cgroups_for_service_slice: system.slice
|
||||||
|
system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
||||||
|
system_memory_reserved: 512Mi
|
||||||
|
system_cpu_reserved: 500m
|
||||||
|
# system_ephemeral_storage_reserved: 2Gi
|
||||||
|
# system_pid_reserved: "1000"
|
||||||
|
# Reservation for master hosts
|
||||||
|
system_master_memory_reserved: 256Mi
|
||||||
|
system_master_cpu_reserved: 250m
|
||||||
|
# system_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
# system_master_pid_reserved: "1000"
|
||||||
|
```
|
||||||
|
|
||||||
|
After the setup, the cgroups hierarchy is as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/ (Cgroups Root)
|
||||||
|
├── kubepods.slice
|
||||||
|
│ ├── ...
|
||||||
|
│ ├── kubepods-besteffort.slice
|
||||||
|
│ ├── kubepods-burstable.slice
|
||||||
|
│ └── ...
|
||||||
|
├── kube.slice
|
||||||
|
│ ├── ...
|
||||||
|
│ ├── {{container_manager}}.service
|
||||||
|
│ ├── kubelet.service
|
||||||
|
│ └── ...
|
||||||
|
├── system.slice
|
||||||
|
│ └── ...
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
You can learn more in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/).
|
||||||
24
docs/ci.md
24
docs/ci.md
@@ -8,17 +8,19 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
|||||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
fedora34 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
|
||||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
|
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
## crio
|
## crio
|
||||||
|
|
||||||
@@ -30,13 +32,15 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
## docker
|
## docker
|
||||||
|
|
||||||
@@ -44,14 +48,16 @@ ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ cilium_operator_extra_volume_mounts:
|
|||||||
## Choose Cilium version
|
## Choose Cilium version
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
cilium_version: v1.11.3
|
cilium_version: v1.12.1
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add variable to config
|
## Add variable to config
|
||||||
@@ -121,6 +121,23 @@ cilium_encryption_type: "wireguard"
|
|||||||
|
|
||||||
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
|
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
|
||||||
|
|
||||||
|
## Bandwidth Manager
|
||||||
|
|
||||||
|
Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||||
|
|
||||||
|
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||||
|
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||||
|
|
||||||
|
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||||
|
|
||||||
|
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
|
||||||
|
|
||||||
|
To use this function, set the following parameters
|
||||||
|
|
||||||
|
```yml
|
||||||
|
cilium_enable_bandwidth_manager: true
|
||||||
|
```
|
||||||
|
|
||||||
## Install Cilium Hubble
|
## Install Cilium Hubble
|
||||||
|
|
||||||
k8s-net-cilium.yml:
|
k8s-net-cilium.yml:
|
||||||
@@ -153,3 +170,32 @@ cilium_hubble_metrics:
|
|||||||
```
|
```
|
||||||
|
|
||||||
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
|
[More](https://docs.cilium.io/en/v1.9/operations/metrics/#hubble-exported-metrics)
|
||||||
|
|
||||||
|
## Upgrade considerations
|
||||||
|
|
||||||
|
### Rolling-restart timeouts
|
||||||
|
|
||||||
|
Cilium relies on the kernel's BPF support, which is extremely fast at runtime but incurs a compilation penalty on initialization and update.
|
||||||
|
|
||||||
|
As a result, the Cilium DaemonSet pods can take a significant time to start, which scales with the number of nodes and endpoints in your cluster.
|
||||||
|
|
||||||
|
As part of cluster.yml, this DaemonSet is restarted, and Kubespray's [default timeouts for this operation](../roles/network_plugin/cilium/defaults/main.yml)
|
||||||
|
are not appropriate for large clusters.
|
||||||
|
|
||||||
|
This means that you will likely want to update these timeouts to a value more in-line with your cluster's number of nodes and their respective CPU performance.
|
||||||
|
This is configured by the following values:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Configure how long to wait for the Cilium DaemonSet to be ready again
|
||||||
|
cilium_rolling_restart_wait_retries_count: 30
|
||||||
|
cilium_rolling_restart_wait_retries_delay_seconds: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
The total time allowed (count * delay) should be at least `($number_of_nodes_in_cluster * $cilium_pod_start_time)` for successful rolling updates. There are no
|
||||||
|
drawbacks to making it higher and giving yourself a time buffer to accommodate transient slowdowns.
|
||||||
|
|
||||||
|
Note: To find the `$cilium_pod_start_time` for your cluster, you can simply restart a Cilium pod on a node of your choice and look at how long it takes for it
|
||||||
|
to become ready.
|
||||||
|
|
||||||
|
Note 2: The default CPU requests/limits for Cilium pods is set to a very conservative 100m:500m which will likely yield very slow startup for Cilium pods. You
|
||||||
|
probably want to significantly increase the CPU limit specifically if short bursts of CPU from Cilium are acceptable to you.
|
||||||
|
|||||||
@@ -39,4 +39,68 @@ containerd_registries:
|
|||||||
image_command_tool: crictl
|
image_command_tool: crictl
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Containerd Runtimes
|
||||||
|
|
||||||
|
Containerd supports multiple runtime configurations that can be used with
|
||||||
|
[RuntimeClass] Kubernetes feature. See [runtime classes in containerd] for the
|
||||||
|
details of containerd configuration.
|
||||||
|
|
||||||
|
In kubespray, the default runtime name is "runc", and it can be configured with the `containerd_runc_runtime` dictionary:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_runc_runtime:
|
||||||
|
name: runc
|
||||||
|
type: "io.containerd.runc.v2"
|
||||||
|
engine: ""
|
||||||
|
root: ""
|
||||||
|
options:
|
||||||
|
systemdCgroup: "false"
|
||||||
|
binaryName: /usr/local/bin/my-runc
|
||||||
|
base_runtime_spec: cri-base.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Further runtimes can be configured with `containerd_additional_runtimes`, which
|
||||||
|
is a list of such dictionaries.
|
||||||
|
|
||||||
|
Default runtime can be changed by setting `containerd_default_runtime`.
|
||||||
|
|
||||||
|
#### Base runtime specs and limiting number of open files
|
||||||
|
|
||||||
|
`base_runtime_spec` key in a runtime dictionary is used to explicitly
|
||||||
|
specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`,
|
||||||
|
which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and
|
||||||
|
updated to include a custom setting for maximum number of file descriptors per
|
||||||
|
container.
|
||||||
|
|
||||||
|
You can change maximum number of file descriptors per container for the default
|
||||||
|
`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile`
|
||||||
|
variable.
|
||||||
|
|
||||||
|
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_base_runtime_specs:
|
||||||
|
cri-spec-custom.json: |
|
||||||
|
{
|
||||||
|
"ociVersion": "1.0.2-dev",
|
||||||
|
"process": {
|
||||||
|
"user": {
|
||||||
|
"uid": 0,
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The files in this dict will be placed in containerd config directory,
|
||||||
|
`/etc/containerd` by default. The files can then be referenced by filename in a
|
||||||
|
runtime:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_runc_runtime:
|
||||||
|
name: runc
|
||||||
|
base_runtime_spec: cri-spec-custom.json
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
[containerd]: https://containerd.io/
|
[containerd]: https://containerd.io/
|
||||||
|
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||||
|
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||||
|
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||||
|
|||||||
@@ -3,34 +3,39 @@
|
|||||||
Debian Jessie installation Notes:
|
Debian Jessie installation Notes:
|
||||||
|
|
||||||
- Add
|
- Add
|
||||||
|
|
||||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
```ini
|
||||||
|
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||||
to /etc/default/grub. Then update with
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
sudo update-grub
|
|
||||||
sudo update-grub2
|
|
||||||
sudo reboot
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
to `/etc/default/grub`. Then update with
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
sudo update-grub
|
||||||
|
sudo update-grub2
|
||||||
|
sudo reboot
|
||||||
|
```
|
||||||
|
|
||||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||||
|
|
||||||
```apt-get -t jessie-backports install systemd```
|
```ShellSession
|
||||||
|
apt-get -t jessie-backports install systemd
|
||||||
|
```
|
||||||
|
|
||||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||||
|
|
||||||
- Add the Ansible repository and install Ansible to get a proper version
|
- Add the Ansible repository and install Ansible to get a proper version
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
sudo add-apt-repository ppa:ansible/ansible
|
sudo add-apt-repository ppa:ansible/ansible
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install ansible
|
sudo apt-get install ansible
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- Install Jinja2 and Python-Netaddr
|
- Install Jinja2 and Python-Netaddr
|
||||||
|
|
||||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
```ShellSession
|
||||||
|
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
|
||||||
|
```
|
||||||
|
|
||||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
|
|||||||
@@ -19,6 +19,14 @@ ndots value to be used in ``/etc/resolv.conf``
|
|||||||
It is important to note that multiple search domains combined with high ``ndots``
|
It is important to note that multiple search domains combined with high ``ndots``
|
||||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||||
|
|
||||||
|
## dns_timeout
|
||||||
|
|
||||||
|
timeout value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
|
## dns_attempts
|
||||||
|
|
||||||
|
attempts value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
### searchdomains
|
### searchdomains
|
||||||
|
|
||||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
@@ -26,6 +34,8 @@ Custom search domains to be added in addition to the cluster search domains (``d
|
|||||||
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||||
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
||||||
|
|
||||||
|
`remove_default_searchdomains: true` will remove the default cluster search domains.
|
||||||
|
|
||||||
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||||
additional search domains. Please take this into the accounts for the limits.
|
additional search domains. Please take this into the accounts for the limits.
|
||||||
|
|
||||||
@@ -40,6 +50,20 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
|
|||||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||||
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||||
|
|
||||||
|
### dns_upstream_forward_extra_opts
|
||||||
|
|
||||||
|
Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see <https://coredns.io/plugins/forward/> for details).
|
||||||
|
These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable.
|
||||||
|
By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`).
|
||||||
|
|
||||||
|
### coredns_kubernetes_extra_opts
|
||||||
|
|
||||||
|
Custom options to be added to the kubernetes coredns plugin.
|
||||||
|
|
||||||
|
### coredns_kubernetes_extra_domains
|
||||||
|
|
||||||
|
Extra domains to be forwarded to the kubernetes coredns plugin.
|
||||||
|
|
||||||
### coredns_external_zones
|
### coredns_external_zones
|
||||||
|
|
||||||
Array of optional external zones to coredns forward queries to. It's injected into
|
Array of optional external zones to coredns forward queries to. It's injected into
|
||||||
@@ -62,6 +86,13 @@ coredns_external_zones:
|
|||||||
nameservers:
|
nameservers:
|
||||||
- 192.168.0.53
|
- 192.168.0.53
|
||||||
cache: 0
|
cache: 0
|
||||||
|
- zones:
|
||||||
|
- mydomain.tld
|
||||||
|
nameservers:
|
||||||
|
- 10.233.0.3
|
||||||
|
cache: 5
|
||||||
|
rewrite:
|
||||||
|
- name stop website.tld website.namespace.svc.cluster.local
|
||||||
```
|
```
|
||||||
|
|
||||||
or as INI
|
or as INI
|
||||||
@@ -207,7 +238,7 @@ cluster service names.
|
|||||||
|
|
||||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||||
|
|
||||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md).
|
||||||
|
|
||||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||||
|
|
||||||
@@ -236,7 +267,7 @@ See [dns_etchosts](#dns_etchosts-coredns) above.
|
|||||||
|
|
||||||
### Nodelocal DNS HA
|
### Nodelocal DNS HA
|
||||||
|
|
||||||
Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tollerate this behavior you can enable a redundant nodelocal DNS pod on each node:
|
Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tolerate this behavior you can enable a redundant nodelocal DNS pod on each node:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
enable_nodelocaldns_secondary: true
|
enable_nodelocaldns_secondary: true
|
||||||
@@ -263,7 +294,8 @@ nodelocaldns_secondary_skew_seconds: 5
|
|||||||
|
|
||||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||||
limits are a 4 names and 239 chars respectively.
|
limits are a 4 names and 239 chars respectively. If `remove_default_searchdomains: true`
|
||||||
|
added you are back to 6 names.
|
||||||
|
|
||||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||||
|
|||||||
@@ -8,13 +8,7 @@ Using the docker container manager:
|
|||||||
container_manager: docker
|
container_manager: docker
|
||||||
```
|
```
|
||||||
|
|
||||||
Using `cri-dockerd` instead of `dockershim`:
|
*Note:* `cri-dockerd` has replaced `dockershim` across supported kubernetes version in kubespray 2.20.
|
||||||
|
|
||||||
```yaml
|
|
||||||
cri_dockerd_enabled: false
|
|
||||||
```
|
|
||||||
|
|
||||||
*Note:* The `cri_dockerd_enabled: true` setting will become the default in a future kubespray release once kubespray 1.24+ is supported and `dockershim` is removed. At that point, changing this option will be deprecated and silently ignored.
|
|
||||||
|
|
||||||
Enabling the `overlay2` graph driver:
|
Enabling the `overlay2` graph driver:
|
||||||
|
|
||||||
@@ -61,7 +55,7 @@ Docker log options:
|
|||||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
||||||
```
|
```
|
||||||
|
|
||||||
Changre the docker `bin_dir`, this should not be changed unless you use a custom docker package:
|
Change the docker `bin_dir`, this should not be changed unless you use a custom docker package:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
docker_bin_dir: "/usr/bin"
|
docker_bin_dir: "/usr/bin"
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ Prepare ignition and serve via http (a.e. python -m http.server )
|
|||||||
|
|
||||||
### create guest
|
### create guest
|
||||||
|
|
||||||
```shell script
|
```ShellSeasion
|
||||||
machine_name=myfcos1
|
machine_name=myfcos1
|
||||||
ignition_url=http://mywebserver/fcos.ign
|
ignition_url=http://mywebserver/fcos.ign
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
Flannel is a network fabric for containers, designed for Kubernetes
|
Flannel is a network fabric for containers, designed for Kubernetes
|
||||||
|
|
||||||
|
Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard`
|
||||||
|
|
||||||
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
||||||
|
|
||||||
## Verifying flannel install
|
## Verifying flannel install
|
||||||
|
|||||||
@@ -2,15 +2,19 @@
|
|||||||
|
|
||||||
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
||||||
|
|
||||||
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
|
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
--cloud-provider=gce
|
--cloud-provider=gce
|
||||||
--cloud-config=/etc/kubernetes/cloud-config
|
--cloud-config=/etc/kubernetes/cloud-config
|
||||||
|
```
|
||||||
|
|
||||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
|
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
cloud_provider: gce
|
cloud_provider: gce
|
||||||
gce_node_tags: k8s-lb
|
gce_node_tags: k8s-lb
|
||||||
|
```
|
||||||
|
|
||||||
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
|
When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall.
|
||||||
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
||||||
|
|||||||
@@ -36,12 +36,6 @@ The following diagram shows how traffic to the apiserver is directed.
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
Note: Kubernetes master nodes still use insecure localhost access because
|
|
||||||
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
|
||||||
services. This makes backends receiving unencrypted traffic and may be a
|
|
||||||
security issue when interconnecting different nodes, or maybe not, if those
|
|
||||||
belong to the isolated management network without external access.
|
|
||||||
|
|
||||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||||
provides access for external clients, while the internal LB accepts client
|
provides access for external clients, while the internal LB accepts client
|
||||||
connections only to the localhost.
|
connections only to the localhost.
|
||||||
@@ -129,11 +123,6 @@ Kubespray has nothing to do with it, this is informational only.
|
|||||||
As you can see, the masters' internal API endpoints are always
|
As you can see, the masters' internal API endpoints are always
|
||||||
contacted via the local bind IP, which is `https://bip:sp`.
|
contacted via the local bind IP, which is `https://bip:sp`.
|
||||||
|
|
||||||
**Note** that for some cases, like healthchecks of applications deployed by
|
|
||||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
|
||||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
|
||||||
`kube_apiserver_insecure_port`.
|
|
||||||
|
|
||||||
## Optional configurations
|
## Optional configurations
|
||||||
|
|
||||||
### ETCD with a LB
|
### ETCD with a LB
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
|
|||||||
---
|
---
|
||||||
|
|
||||||
## kube-apiserver
|
## kube-apiserver
|
||||||
authorization_modes: ['Node','RBAC']
|
authorization_modes: ['Node', 'RBAC']
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_apiserver_feature_gates: ['AppArmor=true']
|
# kube_apiserver_feature_gates: ['AppArmor=true']
|
||||||
kube_apiserver_request_timeout: 120s
|
kube_apiserver_request_timeout: 120s
|
||||||
kube_apiserver_service_account_lookup: true
|
kube_apiserver_service_account_lookup: true
|
||||||
|
|
||||||
@@ -41,7 +41,18 @@ kube_encrypt_secret_data: true
|
|||||||
kube_encryption_resources: [secrets]
|
kube_encryption_resources: [secrets]
|
||||||
kube_encryption_algorithm: "secretbox"
|
kube_encryption_algorithm: "secretbox"
|
||||||
|
|
||||||
kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
|
kube_apiserver_enable_admission_plugins:
|
||||||
|
- EventRateLimit
|
||||||
|
- AlwaysPullImages
|
||||||
|
- ServiceAccount
|
||||||
|
- NamespaceLifecycle
|
||||||
|
- NodeRestriction
|
||||||
|
- LimitRanger
|
||||||
|
- ResourceQuota
|
||||||
|
- MutatingAdmissionWebhook
|
||||||
|
- ValidatingAdmissionWebhook
|
||||||
|
- PodNodeSelector
|
||||||
|
- PodSecurity
|
||||||
kube_apiserver_admission_control_config_file: true
|
kube_apiserver_admission_control_config_file: true
|
||||||
# EventRateLimit plugin configuration
|
# EventRateLimit plugin configuration
|
||||||
kube_apiserver_admission_event_rate_limits:
|
kube_apiserver_admission_event_rate_limits:
|
||||||
@@ -60,7 +71,7 @@ kube_profiling: false
|
|||||||
kube_controller_manager_bind_address: 127.0.0.1
|
kube_controller_manager_bind_address: 127.0.0.1
|
||||||
kube_controller_terminated_pod_gc_threshold: 50
|
kube_controller_terminated_pod_gc_threshold: 50
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"]
|
# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
|
||||||
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||||
|
|
||||||
## kube-scheduler
|
## kube-scheduler
|
||||||
@@ -68,12 +79,13 @@ kube_scheduler_bind_address: 127.0.0.1
|
|||||||
kube_kubeadm_scheduler_extra_args:
|
kube_kubeadm_scheduler_extra_args:
|
||||||
profiling: false
|
profiling: false
|
||||||
# AppArmor-based OS
|
# AppArmor-based OS
|
||||||
#kube_scheduler_feature_gates: ["AppArmor=true"]
|
# kube_scheduler_feature_gates: ["AppArmor=true"]
|
||||||
|
|
||||||
## etcd
|
## etcd
|
||||||
etcd_deployment_type: kubeadm
|
etcd_deployment_type: kubeadm
|
||||||
|
|
||||||
## kubelet
|
## kubelet
|
||||||
|
kubelet_authorization_mode_webhook: true
|
||||||
kubelet_authentication_token_webhook: true
|
kubelet_authentication_token_webhook: true
|
||||||
kube_read_only_port: 0
|
kube_read_only_port: 0
|
||||||
kubelet_rotate_server_certificates: true
|
kubelet_rotate_server_certificates: true
|
||||||
@@ -82,7 +94,24 @@ kubelet_event_record_qps: 1
|
|||||||
kubelet_rotate_certificates: true
|
kubelet_rotate_certificates: true
|
||||||
kubelet_streaming_connection_idle_timeout: "5m"
|
kubelet_streaming_connection_idle_timeout: "5m"
|
||||||
kubelet_make_iptables_util_chains: true
|
kubelet_make_iptables_util_chains: true
|
||||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
|
||||||
|
kubelet_seccomp_default: true
|
||||||
|
kubelet_systemd_hardening: true
|
||||||
|
# In case you have multiple interfaces in your
|
||||||
|
# control plane nodes and you want to specify the right
|
||||||
|
# IP addresses, kubelet_secure_addresses allows you
|
||||||
|
# to specify the IP from which the kubelet
|
||||||
|
# will receive the packets.
|
||||||
|
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
|
||||||
|
|
||||||
|
# additional configurations
|
||||||
|
kube_owner: root
|
||||||
|
kube_cert_group: root
|
||||||
|
|
||||||
|
# create a default Pod Security Configuration and deny running of insecure pods
|
||||||
|
# kube_system namespace is exempted by default
|
||||||
|
kube_pod_security_use_default: true
|
||||||
|
kube_pod_security_default_enforce: restricted
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's take a deep look to the resultant **kubernetes** configuration:
|
Let's take a deep look to the resultant **kubernetes** configuration:
|
||||||
@@ -92,6 +121,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
|||||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
|
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself, but you need to manually approve them or at least using an operator to do this (for more details, please take a look here: <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/>).
|
||||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
||||||
|
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||||
|

|
||||||
|
|
||||||
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
||||||
|
|
||||||
|
|||||||
BIN
docs/img/kubelet-hardening.png
Normal file
BIN
docs/img/kubelet-hardening.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.5 MiB |
@@ -124,7 +124,7 @@ By default NGINX `keepalive_timeout` is set to `75s`.
|
|||||||
The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified,
|
The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified,
|
||||||
in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.
|
in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.
|
||||||
|
|
||||||
_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._
|
*Please Note: An idle timeout of `3600s` is recommended when using WebSockets.*
|
||||||
|
|
||||||
More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).
|
More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).
|
||||||
|
|
||||||
|
|||||||
@@ -6,84 +6,100 @@
|
|||||||
* List of all forked repos could be retrieved from github page of original project.
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
||||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
|
||||||
Git will create `.gitmodules` file in your existent ansible repo:
|
```ShellSession
|
||||||
|
git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray
|
||||||
|
```
|
||||||
|
|
||||||
|
Git will create `.gitmodules` file in your existent ansible repo:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
[submodule "3d/kubespray"]
|
[submodule "3d/kubespray"]
|
||||||
path = 3d/kubespray
|
path = 3d/kubespray
|
||||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Configure git to show submodule status:
|
3. Configure git to show submodule status:
|
||||||
```git config --global status.submoduleSummary true```
|
|
||||||
|
```ShellSession
|
||||||
|
git config --global status.submoduleSummary true
|
||||||
|
```
|
||||||
|
|
||||||
4. Add *original* kubespray repo as upstream:
|
4. Add *original* kubespray repo as upstream:
|
||||||
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
|
||||||
|
```ShellSession
|
||||||
|
cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git
|
||||||
|
```
|
||||||
|
|
||||||
5. Sync your master branch with upstream:
|
5. Sync your master branch with upstream:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout master
|
git checkout master
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
git push origin master
|
git push origin master
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Create a new branch which you will use in your working environment:
|
6. Create a new branch which you will use in your working environment:
|
||||||
```git checkout -b work```
|
|
||||||
|
```ShellSession
|
||||||
|
git checkout -b work
|
||||||
|
```
|
||||||
|
|
||||||
***Never*** use master branch of your repository for your commits.
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
||||||
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
||||||
|
|
||||||
8. ```ini
|
```ini
|
||||||
...
|
...
|
||||||
library = ./library/:3d/kubespray/library/
|
library = ./library/:3d/kubespray/library/
|
||||||
roles_path = ./roles/:3d/kubespray/roles/
|
roles_path = ./roles/:3d/kubespray/roles/
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
|
||||||
|
|
||||||
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
...
|
...
|
||||||
#Kargo groups:
|
#Kubespray groups:
|
||||||
[kube_node:children]
|
[kube_node:children]
|
||||||
kubenode
|
kubenode
|
||||||
|
|
||||||
[k8s_cluster:children]
|
[k8s_cluster:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
|
|
||||||
[etcd:children]
|
[etcd:children]
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
[kube_control_plane:children]
|
[kube_control_plane:children]
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
[kubespray:children]
|
[kubespray:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
```
|
```
|
||||||
|
|
||||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
- name: Include kubespray tasks
|
- name: Import kubespray playbook
|
||||||
include: 3d/kubespray/cluster.yml
|
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
|
||||||
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
@@ -95,37 +111,78 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
|
|||||||
2. Change working directory to git submodule directory (3d/kubespray).
|
2. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
3. Setup desired user.name and user.email for submodule.
|
3. Setup desired user.name and user.email for submodule.
|
||||||
If kubespray is only one submodule in your repo you could use something like:
|
|
||||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'```
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'
|
||||||
|
```
|
||||||
|
|
||||||
4. Sync with upstream master:
|
4. Sync with upstream master:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
git push origin master
|
git push origin master
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Create new branch for the specific fixes that you want to contribute:
|
5. Create new branch for the specific fixes that you want to contribute:
|
||||||
```git checkout -b fixes-name-date-index```
|
|
||||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
```ShellSession
|
||||||
|
git checkout -b fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git cherry-pick <COMMIT_HASH>
|
git cherry-pick <COMMIT_HASH>
|
||||||
```
|
```
|
||||||
|
|
||||||
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
|
||||||
|
Also you could use interactive rebase
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git rebase -i HEAD~10
|
||||||
|
```
|
||||||
|
|
||||||
|
to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
Check that you're on correct branch:
|
|
||||||
```git status```
|
|
||||||
And pull changes from upstream (if any):
|
|
||||||
```git pull --rebase upstream master```
|
|
||||||
|
|
||||||
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
Check that you're on correct branch:
|
||||||
|
|
||||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
```ShellSession
|
||||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
git status
|
||||||
|
```
|
||||||
|
|
||||||
|
And pull changes from upstream (if any):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git pull --rebase upstream master
|
||||||
|
```
|
||||||
|
|
||||||
|
9. Now push your changes to your **fork** repo with
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push
|
||||||
|
```
|
||||||
|
|
||||||
|
If your branch doesn't exists on github, git will propose you to use something like
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push --set-upstream origin fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
git push origin --delete fixes-name-date-index
|
||||||
|
git branch -D fixes-name-date-index
|
||||||
|
```
|
||||||
|
|
||||||
|
and start whole process from the beginning.
|
||||||
|
|
||||||
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
|
|||||||
@@ -2,6 +2,14 @@
|
|||||||
|
|
||||||
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
|
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You have to configure `kube_proxy_strict_arp` when the kube_proxy_mode is `ipvs` and kube-vip ARP is enabled.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kube_proxy_strict_arp: true
|
||||||
|
```
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
You have to explicitly enable the kube-vip extension:
|
You have to explicitly enable the kube-vip extension:
|
||||||
@@ -11,7 +19,7 @@ kube_vip_enabled: true
|
|||||||
```
|
```
|
||||||
|
|
||||||
You also need to enable
|
You also need to enable
|
||||||
[kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
[kube-vip as HA, Load Balancer, or both](https://kube-vip.io/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# HA for control-plane, requires a VIP
|
# HA for control-plane, requires a VIP
|
||||||
@@ -28,16 +36,22 @@ kube_vip_services_enabled: false
|
|||||||
```
|
```
|
||||||
|
|
||||||
> Note: When using `kube-vip` as LoadBalancer for services,
|
> Note: When using `kube-vip` as LoadBalancer for services,
|
||||||
[additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
|
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/)
|
||||||
are needed.
|
are needed.
|
||||||
|
|
||||||
If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
|
If using [local traffic policy](https://kube-vip.io/docs/usage/kubernetes-services/#external-traffic-policy-kube-vip-v050):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kube_vip_enableServicesElection: true
|
||||||
|
```
|
||||||
|
|
||||||
|
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) :
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kube_vip_arp_enabled: true
|
kube_vip_arp_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
|
If using [BGP mode](https://kube-vip.io/docs/installation/static/#bgp) :
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kube_vip_bgp_enabled: true
|
kube_vip_bgp_enabled: true
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
# Local Storage Provisioner
|
# Local Static Storage Provisioner
|
||||||
|
|
||||||
The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume)
|
The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner)
|
||||||
is NOT a dynamic storage provisioner as you would
|
is NOT a dynamic storage provisioner as you would
|
||||||
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
||||||
all mounts under the host_dir of the specified storage class.
|
all mounts under the `host_dir` of the specified storage class.
|
||||||
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
|
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -16,15 +17,18 @@ local_volume_provisioner_storage_classes:
|
|||||||
host_dir: /mnt/fast-disks
|
host_dir: /mnt/fast-disks
|
||||||
mount_dir: /mnt/fast-disks
|
mount_dir: /mnt/fast-disks
|
||||||
block_cleaner_command:
|
block_cleaner_command:
|
||||||
- "/scripts/shred.sh"
|
- "/scripts/shred.sh"
|
||||||
- "2"
|
- "2"
|
||||||
volume_mode: Filesystem
|
volume_mode: Filesystem
|
||||||
fs_type: ext4
|
fs_type: ext4
|
||||||
```
|
```
|
||||||
|
|
||||||
For each key in `local_volume_provisioner_storage_classes` a storageClass with the
|
For each key in `local_volume_provisioner_storage_classes` a "storage class" with
|
||||||
same name is created. The subkeys of each storage class are converted to camelCase and added
|
the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`.
|
||||||
as attributes to the storageClass.
|
The subkeys of each storage class in `local_volume_provisioner_storage_classes`
|
||||||
|
are converted to camelCase and added as attributes to the storage class in the
|
||||||
|
ConfigMap.
|
||||||
|
|
||||||
The result of the above example is:
|
The result of the above example is:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -43,80 +47,85 @@ data:
|
|||||||
fsType: ext4
|
fsType: ext4
|
||||||
```
|
```
|
||||||
|
|
||||||
The default StorageClass is local-storage on /mnt/disks,
|
Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also
|
||||||
the rest of this doc will use that path as an example.
|
created for each storage class:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kubectl get storageclasses.storage.k8s.io
|
||||||
|
NAME PROVISIONER RECLAIMPOLICY
|
||||||
|
fast-disks kubernetes.io/no-provisioner Delete
|
||||||
|
local-storage kubernetes.io/no-provisioner Delete
|
||||||
|
```
|
||||||
|
|
||||||
|
The default StorageClass is `local-storage` on `/mnt/disks`;
|
||||||
|
the rest of this documentation will use that path as an example.
|
||||||
|
|
||||||
## Examples to create local storage volumes
|
## Examples to create local storage volumes
|
||||||
|
|
||||||
1. tmpfs method:
|
1. Using tmpfs
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
for vol in vol1 vol2 vol3; do
|
for vol in vol1 vol2 vol3; do
|
||||||
mkdir /mnt/disks/$vol
|
mkdir /mnt/disks/$vol
|
||||||
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
The tmpfs method is not recommended for production because the mount is not
|
The tmpfs method is not recommended for production because the mounts are not
|
||||||
persistent and data will be deleted on reboot.
|
persistent and data will be deleted on reboot.
|
||||||
|
|
||||||
1. Mount physical disks
|
1. Mount physical disks
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
mkdir /mnt/disks/ssd1
|
mkdir /mnt/disks/ssd1
|
||||||
mount /dev/vdb1 /mnt/disks/ssd1
|
mount /dev/vdb1 /mnt/disks/ssd1
|
||||||
```
|
```
|
||||||
|
|
||||||
Physical disks are recommended for production environments because it offers
|
Physical disks are recommended for production environments because it offers
|
||||||
complete isolation in terms of I/O and capacity.
|
complete isolation in terms of I/O and capacity.
|
||||||
|
|
||||||
1. Mount unpartitioned physical devices
|
1. Mount unpartitioned physical devices
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
||||||
ln -s $disk /mnt/disks
|
ln -s $disk /mnt/disks
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
This saves time of precreating filesystems. Note that your storageclass must have
|
This saves time of precreating filesystems. Note that your storageclass must have
|
||||||
volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
|
`volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the
|
||||||
disk will be added as a raw block device.
|
disk will be added as a raw block device.
|
||||||
|
|
||||||
|
1. PersistentVolumes with `volumeMode="Block"`
|
||||||
|
|
||||||
|
Just like above, you can create PersistentVolumes with volumeMode `Block`
|
||||||
|
by creating a symbolic link under discovery directory to the block device on
|
||||||
|
the node, if you set `volume_mode` to `"Block"`. This will create a volume
|
||||||
|
presented into a Pod as a block device, without any filesystem on it.
|
||||||
|
|
||||||
1. File-backed sparsefile method
|
1. File-backed sparsefile method
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
truncate /mnt/disks/disk5 --size 2G
|
truncate /mnt/disks/disk5 --size 2G
|
||||||
mkfs.ext4 /mnt/disks/disk5
|
mkfs.ext4 /mnt/disks/disk5
|
||||||
mkdir /mnt/disks/vol5
|
mkdir /mnt/disks/vol5
|
||||||
mount /mnt/disks/disk5 /mnt/disks/vol5
|
mount /mnt/disks/disk5 /mnt/disks/vol5
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have a development environment and only one disk, this is the best way
|
If you have a development environment and only one disk, this is the best way
|
||||||
to limit the quota of persistent volumes.
|
to limit the quota of persistent volumes.
|
||||||
|
|
||||||
1. Simple directories
|
1. Simple directories
|
||||||
|
|
||||||
In a development environment using `mount --bind` works also, but there is no capacity
|
In a development environment, using `mount --bind` works also, but there is no capacity
|
||||||
management.
|
management.
|
||||||
|
|
||||||
1. Block volumeMode PVs
|
|
||||||
|
|
||||||
Create a symbolic link under discovery directory to the block device on the node. To use
|
|
||||||
raw block devices in pods, volume_type should be set to "Block".
|
|
||||||
|
|
||||||
## Usage notes
|
## Usage notes
|
||||||
|
|
||||||
Beta PV.NodeAffinity field is used by default. If running against an older K8s
|
Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for
|
||||||
version, the useAlphaAPI flag must be set in the configMap.
|
Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be
|
||||||
|
|
||||||
The volume provisioner cannot calculate volume sizes correctly, so you should
|
|
||||||
delete the daemonset pod on the relevant host after creating volumes. The pod
|
|
||||||
will be recreated and read the size correctly.
|
|
||||||
|
|
||||||
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
|
|
||||||
Flatcar Container Linux). Pods with persistent volume claims will not be
|
|
||||||
able to start if the mounts become unavailable.
|
able to start if the mounts become unavailable.
|
||||||
|
|
||||||
## Further reading
|
## Further reading
|
||||||
|
|
||||||
Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>
|
Refer to the upstream docs here: <https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner>
|
||||||
|
|||||||
@@ -29,8 +29,7 @@ use Kubernetes's `PersistentVolume` abstraction. The following template is
|
|||||||
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
||||||
other situations:
|
other situations:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
kind: PersistentVolume
|
kind: PersistentVolume
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@@ -46,7 +45,6 @@ spec:
|
|||||||
fsType: "ext4"
|
fsType: "ext4"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
|
|
||||||
|
|
||||||
If, for example, you wanted to use NFS you would just need to change the
|
If, for example, you wanted to use NFS you would just need to change the
|
||||||
`gcePersistentDisk` block to `nfs`. See
|
`gcePersistentDisk` block to `nfs`. See
|
||||||
@@ -68,8 +66,7 @@ Now that the Kubernetes cluster knows that some storage exists, you can put a
|
|||||||
claim on that storage. As with the `PersistentVolume` above, you can start
|
claim on that storage. As with the `PersistentVolume` above, you can start
|
||||||
with the `salt` template:
|
with the `salt` template:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@@ -82,7 +79,6 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: {{ pillar['cluster_registry_disk_size'] }}
|
storage: {{ pillar['cluster_registry_disk_size'] }}
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
|
||||||
|
|
||||||
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
||||||
you created before will be bound to this claim (unless you have other
|
you created before will be bound to this claim (unless you have other
|
||||||
@@ -93,8 +89,7 @@ gives you the right to use this storage until you release the claim.
|
|||||||
|
|
||||||
Now we can run a Docker registry:
|
Now we can run a Docker registry:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
@@ -138,7 +133,6 @@ spec:
|
|||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: kube-registry-pvc
|
claimName: kube-registry-pvc
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
|
|
||||||
|
|
||||||
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
||||||
|
|
||||||
@@ -146,8 +140,7 @@ spec:
|
|||||||
|
|
||||||
Now that we have a registry `Pod` running, we can expose it as a Service:
|
Now that we have a registry `Pod` running, we can expose it as a Service:
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
@@ -164,7 +157,6 @@ spec:
|
|||||||
port: 5000
|
port: 5000
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
|
|
||||||
|
|
||||||
## Expose the registry on each node
|
## Expose the registry on each node
|
||||||
|
|
||||||
@@ -172,8 +164,7 @@ Now that we have a running `Service`, we need to expose it onto each Kubernetes
|
|||||||
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
||||||
node by creating following daemonset.
|
node by creating following daemonset.
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
```yaml
|
||||||
``` yaml
|
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
@@ -207,7 +198,6 @@ spec:
|
|||||||
containerPort: 80
|
containerPort: 80
|
||||||
hostPort: 5000
|
hostPort: 5000
|
||||||
```
|
```
|
||||||
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
|
||||||
|
|
||||||
When modifying replication-controller, service and daemon-set definitions, take
|
When modifying replication-controller, service and daemon-set definitions, take
|
||||||
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
||||||
@@ -219,7 +209,7 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
|
|||||||
You should be able to verify that it is running by hitting port 5000 with a web
|
You should be able to verify that it is running by hitting port 5000 with a web
|
||||||
browser and getting a 404 error:
|
browser and getting a 404 error:
|
||||||
|
|
||||||
``` console
|
```ShellSession
|
||||||
$ curl localhost:5000
|
$ curl localhost:5000
|
||||||
404 page not found
|
404 page not found
|
||||||
```
|
```
|
||||||
@@ -229,7 +219,7 @@ $ curl localhost:5000
|
|||||||
To use an image hosted by this registry, simply say this in your `Pod`'s
|
To use an image hosted by this registry, simply say this in your `Pod`'s
|
||||||
`spec.containers[].image` field:
|
`spec.containers[].image` field:
|
||||||
|
|
||||||
``` yaml
|
```yaml
|
||||||
image: localhost:5000/user/container
|
image: localhost:5000/user/container
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -241,7 +231,7 @@ building locally and want to push to your cluster.
|
|||||||
You can use `kubectl` to set up a port-forward from your local node to a
|
You can use `kubectl` to set up a port-forward from your local node to a
|
||||||
running Pod:
|
running Pod:
|
||||||
|
|
||||||
``` console
|
```ShellSession
|
||||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
||||||
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
||||||
| grep Running | head -1 | cut -f1 -d' ')
|
| grep Running | head -1 | cut -f1 -d' ')
|
||||||
|
|||||||
11
docs/kylinlinux.md
Normal file
11
docs/kylinlinux.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Kylin Linux
|
||||||
|
|
||||||
|
Kylin Linux is supported with docker and containerd runtimes.
|
||||||
|
|
||||||
|
**Note:** that Kylin Linux is not currently covered in kubespray CI and
|
||||||
|
support for it is currently considered experimental.
|
||||||
|
|
||||||
|
At present, only `Kylin Linux Advanced Server V10 (Sword)` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
|
||||||
|
|
||||||
|
There are no special considerations for using Kylin Linux as the target OS
|
||||||
|
for Kubespray deployments.
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation.
|
||||||
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
|
It allows you to create Kubernetes services of type "LoadBalancer" in clusters that don't run on a cloud provider, and thus cannot simply hook into 3rd party products to provide load-balancers.
|
||||||
The default operationg mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
The default operating mode of MetalLB is in ["Layer2"](https://metallb.universe.tf/concepts/layer2/) but it can also operate in ["BGP"](https://metallb.universe.tf/concepts/bgp/) mode.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@@ -19,6 +19,7 @@ You have to explicitly enable the MetalLB extension and set an IP address range
|
|||||||
```yaml
|
```yaml
|
||||||
metallb_enabled: true
|
metallb_enabled: true
|
||||||
metallb_speaker_enabled: true
|
metallb_speaker_enabled: true
|
||||||
|
metallb_avoid_buggy_ips: true
|
||||||
metallb_ip_range:
|
metallb_ip_range:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
```
|
```
|
||||||
@@ -69,16 +70,17 @@ metallb_peers:
|
|||||||
|
|
||||||
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
|
When using calico >= 3.18 you can replace MetalLB speaker by calico Service LoadBalancer IP advertisement.
|
||||||
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
|
See [calico service IPs advertisement documentation](https://docs.projectcalico.org/archive/v3.18/networking/advertise-service-ips#advertise-service-load-balancer-ip-addresses).
|
||||||
In this scenarion you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
In this scenario you should disable the MetalLB speaker and configure the `calico_advertise_service_loadbalancer_ips` to match your `metallb_ip_range`
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_speaker_enabled: false
|
metallb_speaker_enabled: false
|
||||||
|
metallb_avoid_buggy_ips: true
|
||||||
metallb_ip_range:
|
metallb_ip_range:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
|
calico_advertise_service_loadbalancer_ips: "{{ metallb_ip_range }}"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have additional loadbalancer IP pool in `metallb_additional_address_pools`, ensure to add them to the list.
|
If you have additional loadbalancer IP pool in `metallb_additional_address_pools` , ensure to add them to the list.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_speaker_enabled: false
|
metallb_speaker_enabled: false
|
||||||
@@ -90,11 +92,13 @@ metallb_additional_address_pools:
|
|||||||
- 10.6.0.0/16
|
- 10.6.0.0/16
|
||||||
protocol: "bgp"
|
protocol: "bgp"
|
||||||
auto_assign: false
|
auto_assign: false
|
||||||
|
avoid_buggy_ips: true
|
||||||
kube_service_pool_2:
|
kube_service_pool_2:
|
||||||
ip_range:
|
ip_range:
|
||||||
- 10.10.0.0/16
|
- 10.10.0.0/16
|
||||||
protocol: "bgp"
|
protocol: "bgp"
|
||||||
auto_assign: false
|
auto_assign: false
|
||||||
|
avoid_buggy_ips: true
|
||||||
calico_advertise_service_loadbalancer_ips:
|
calico_advertise_service_loadbalancer_ips:
|
||||||
- 10.5.0.0/16
|
- 10.5.0.0/16
|
||||||
- 10.6.0.0/16
|
- 10.6.0.0/16
|
||||||
|
|||||||
66
docs/mirror.md
Normal file
66
docs/mirror.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Public Download Mirror
|
||||||
|
|
||||||
|
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
|
||||||
|
|
||||||
|
## Configuring Kubespray to use a mirror site
|
||||||
|
|
||||||
|
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
gcr_image_repo: "gcr.m.daocloud.io"
|
||||||
|
kube_image_repo: "k8s.m.daocloud.io"
|
||||||
|
docker_image_repo: "docker.m.daocloud.io"
|
||||||
|
quay_image_repo: "quay.m.daocloud.io"
|
||||||
|
github_image_repo: "ghcr.m.daocloud.io"
|
||||||
|
|
||||||
|
files_repo: "https://files.m.daocloud.io"
|
||||||
|
```
|
||||||
|
|
||||||
|
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
|
||||||
|
You can replace the `m.daocloud.io` with any site you want.
|
||||||
|
|
||||||
|
## Example Usage Full Steps
|
||||||
|
|
||||||
|
You can follow the full steps to use the kubesray with mirror. for example:
|
||||||
|
|
||||||
|
Install Ansible according to Ansible installation guide then run the following steps:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
|
cp -rfp inventory/sample inventory/mycluster
|
||||||
|
|
||||||
|
# Update Ansible inventory file with inventory builder
|
||||||
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
|
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
|
# Use the download mirror
|
||||||
|
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
|
||||||
|
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
|
||||||
|
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
|
||||||
|
gcr_image_repo: "gcr.m.daocloud.io"
|
||||||
|
kube_image_repo: "k8s.m.daocloud.io"
|
||||||
|
docker_image_repo: "docker.m.daocloud.io"
|
||||||
|
quay_image_repo: "quay.m.daocloud.io"
|
||||||
|
github_image_repo: "ghcr.m.daocloud.io"
|
||||||
|
files_repo: "https://files.m.daocloud.io"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
|
# installing packages and interacting with various systemd daemons.
|
||||||
|
# Without --become the playbook will fail to run!
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
|
||||||
|
|
||||||
|
## Community-run mirror sites
|
||||||
|
|
||||||
|
DaoCloud(China)
|
||||||
|
|
||||||
|
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
|
||||||
|
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)
|
||||||
@@ -124,7 +124,7 @@ to
|
|||||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
||||||
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
||||||
|
|
||||||
### 3) Edit cluster-info configmap in kube-system namespace
|
### 3) Edit cluster-info configmap in kube-public namespace
|
||||||
|
|
||||||
`kubectl edit cm -n kube-public cluster-info`
|
`kubectl edit cm -n kube-public cluster-info`
|
||||||
|
|
||||||
|
|||||||
50
docs/ntp.md
Normal file
50
docs/ntp.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# NTP synchronization
|
||||||
|
|
||||||
|
The Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems. Time synchronization is important to Kubernetes and Etcd.
|
||||||
|
|
||||||
|
## Enable the NTP
|
||||||
|
|
||||||
|
To start the ntpd(or chrony) service and enable it at system boot. There are related specific variables:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
The NTP service would be enabled and sync time automatically.
|
||||||
|
|
||||||
|
## Customize the NTP configure file
|
||||||
|
|
||||||
|
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_enabled: true
|
||||||
|
ntp_manage_config: true
|
||||||
|
ntp_servers:
|
||||||
|
- "0.your-ntp-server.org iburst"
|
||||||
|
- "1.your-ntp-server.org iburst"
|
||||||
|
- "2.your-ntp-server.org iburst"
|
||||||
|
- "3.your-ntp-server.org iburst"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting the TimeZone
|
||||||
|
|
||||||
|
The timezone can also be set by the `ntp_timezone` , eg: "Etc/UTC","Asia/Shanghai". If not set, the timezone will not change.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_enabled: true
|
||||||
|
ntp_timezone: Etc/UTC
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Configure
|
||||||
|
|
||||||
|
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_tinker_panic: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Force sync time immediately by NTP after the ntp installed, which is useful in newly installed system.
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ntp_force_sync_immediately: true
|
||||||
|
```
|
||||||
@@ -1,12 +1,25 @@
|
|||||||
# Offline environment
|
# Offline environment
|
||||||
|
|
||||||
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
|
In case your servers don't have access to the internet directly (for example
|
||||||
|
when deploying on premises with security constraints), you need to get the
|
||||||
|
following artifacts in advance from another environment where has access to the internet.
|
||||||
|
|
||||||
|
* Some static files (zips and binaries)
|
||||||
|
* OS packages (rpm/deb files)
|
||||||
|
* Container images used by Kubespray. Exhaustive list depends on your setup
|
||||||
|
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
||||||
|
* [Optional] Helm chart files (only required if `helm_enabled=true`)
|
||||||
|
|
||||||
|
Then you need to setup the following services on your offline environment:
|
||||||
|
|
||||||
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
|
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
|
||||||
* an internal Yum/Deb repository for OS packages
|
* an internal Yum/Deb repository for OS packages
|
||||||
* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
|
* an internal container image registry that need to be populated with all container images used by Kubespray
|
||||||
* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
* [Optional] an internal PyPi server for python packages used by Kubespray
|
||||||
* [Optional] an internal Helm registry (only required if `helm_enabled=true`)
|
* [Optional] an internal Helm registry for Helm chart files
|
||||||
|
|
||||||
|
You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script.
|
||||||
|
In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md).
|
||||||
|
|
||||||
## Configure Inventory
|
## Configure Inventory
|
||||||
|
|
||||||
@@ -23,7 +36,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
|||||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||||
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||||
# etcd is optional if you **DON'T** use etcd_deployment=host
|
# etcd is optional if you **DON'T** use etcd_deployment=host
|
||||||
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||||
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
# If using Calico
|
# If using Calico
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user