mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-01 17:48:12 -03:30
Compare commits
427 Commits
test/flatc
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d4058ee8e | ||
|
|
f071fccc33 | ||
|
|
70daea701a | ||
|
|
3e42b84e94 | ||
|
|
868ff3cea9 | ||
|
|
0b69a18e35 | ||
|
|
e30076016c | ||
|
|
f4ccdb5e72 | ||
|
|
fcecaf6943 | ||
|
|
37f7a86014 | ||
|
|
fff7f10a85 | ||
|
|
dc09298f7e | ||
|
|
680db0c921 | ||
|
|
9977d4dc10 | ||
|
|
1b6129566b | ||
|
|
c3404c3685 | ||
|
|
fba8708486 | ||
|
|
8dacb9cd16 | ||
|
|
df3f0a2341 | ||
|
|
62e90b3122 | ||
|
|
6b5cc5bdfb | ||
|
|
a277cfdee7 | ||
|
|
bc5528f585 | ||
|
|
2740c13c0c | ||
|
|
52b68bccad | ||
|
|
82c4c0afdf | ||
|
|
63a43cf6db | ||
|
|
666a3a9500 | ||
|
|
28f9c126bf | ||
|
|
d41b629be3 | ||
|
|
851abbc2e3 | ||
|
|
17c72367bc | ||
|
|
d91c7d7576 | ||
|
|
14b20ad2a2 | ||
|
|
72cb1356ef | ||
|
|
51304d57e2 | ||
|
|
a0d7bef90e | ||
|
|
a1ec88e290 | ||
|
|
c9ff62944e | ||
|
|
20ab9179af | ||
|
|
5be35c811a | ||
|
|
ad522d4aab | ||
|
|
9c511069cc | ||
|
|
ed270fcab4 | ||
|
|
0615929727 | ||
|
|
48c25d9ebf | ||
|
|
0bffcacbe7 | ||
|
|
c857252225 | ||
|
|
a0f00761ac | ||
|
|
3a3e5d6954 | ||
|
|
2d6e508084 | ||
|
|
6d850a0dc5 | ||
|
|
6a517e165e | ||
|
|
aaaf82f308 | ||
|
|
e80087df93 | ||
|
|
b7491b957b | ||
|
|
5cf8f3eefc | ||
|
|
1cbccf40a5 | ||
|
|
bcdd702e19 | ||
|
|
20693afe82 | ||
|
|
1bbcfd8dd6 | ||
|
|
8d948f918f | ||
|
|
4d8d1b8aff | ||
|
|
d80318301d | ||
|
|
31cce09fbc | ||
|
|
9a90c9d6c8 | ||
|
|
b9e1e8577f | ||
|
|
5d1dd83b07 | ||
|
|
b203586d6b | ||
|
|
88df61357b | ||
|
|
2edf176294 | ||
|
|
39744146b4 | ||
|
|
118b2dce02 | ||
|
|
4c5eda9f1e | ||
|
|
2512e0c50c | ||
|
|
633d39448e | ||
|
|
4d87ac1032 | ||
|
|
2342d0cd57 | ||
|
|
e6a5266bad | ||
|
|
57f7c44718 | ||
|
|
5789dc839c | ||
|
|
3de6fa7220 | ||
|
|
9a9e8814e6 | ||
|
|
87a4f61d76 | ||
|
|
9975b5d525 | ||
|
|
9d06ce1a8d | ||
|
|
bce107ce3d | ||
|
|
7d7a42d931 | ||
|
|
5183679a89 | ||
|
|
b4fe577203 | ||
|
|
bde51ebddf | ||
|
|
381426d6d5 | ||
|
|
b3ee6d6b75 | ||
|
|
7436d63faa | ||
|
|
6138c6a1a2 | ||
|
|
6115eba3c3 | ||
|
|
1c008d79b1 | ||
|
|
b4bbec6772 | ||
|
|
5c6ee4852a | ||
|
|
8190f952c1 | ||
|
|
3edc3d7a36 | ||
|
|
2f3f1d7e65 | ||
|
|
71c69ec12c | ||
|
|
dab0947150 | ||
|
|
5488e7d805 | ||
|
|
ca9873cfcb | ||
|
|
65f33c3ef0 | ||
|
|
5eccf9ea6c | ||
|
|
db599b3475 | ||
|
|
47140083dc | ||
|
|
2d179879a0 | ||
|
|
61b8e4ce84 | ||
|
|
97a3776d8e | ||
|
|
990695de7b | ||
|
|
4059c699dc | ||
|
|
e22ce15429 | ||
|
|
452d4e63e0 | ||
|
|
d2a46b4ff8 | ||
|
|
e090c9ee26 | ||
|
|
0d6d3f5828 | ||
|
|
b9662dbd86 | ||
|
|
f5a480fdc4 | ||
|
|
5dce75d29b | ||
|
|
5acde6cfe2 | ||
|
|
c6926eb2f9 | ||
|
|
1930ab7ed6 | ||
|
|
3edc979384 | ||
|
|
cde7b2b022 | ||
|
|
0d88532f3d | ||
|
|
1fb14b7463 | ||
|
|
a66d00a535 | ||
|
|
9991412b45 | ||
|
|
ee6a792ec0 | ||
|
|
fbf957ab5d | ||
|
|
202a0f3461 | ||
|
|
8c16c0f2b9 | ||
|
|
deaabb694d | ||
|
|
e39e005306 | ||
|
|
6d6633a905 | ||
|
|
fd7f39043b | ||
|
|
f8e74aafb9 | ||
|
|
aa255f8831 | ||
|
|
9ded45f703 | ||
|
|
270ff65992 | ||
|
|
324e7f50c9 | ||
|
|
055274937b | ||
|
|
b98ed6ddf8 | ||
|
|
05c3e2c87c | ||
|
|
b0571ccbf9 | ||
|
|
8b62a71f31 | ||
|
|
411fdddaae | ||
|
|
51a1f08624 | ||
|
|
67632844cd | ||
|
|
13c70d3a58 | ||
|
|
fae4e08f35 | ||
|
|
1d91e47878 | ||
|
|
6b973d072c | ||
|
|
a36912e2c4 | ||
|
|
8d7d9907a1 | ||
|
|
643087fea5 | ||
|
|
2955dfe69f | ||
|
|
0a35c624ad | ||
|
|
456a3dda09 | ||
|
|
efd30981f8 | ||
|
|
aabe063490 | ||
|
|
50c5f39a9d | ||
|
|
8e401f94ea | ||
|
|
0b082ac2f4 | ||
|
|
fe7592dd0c | ||
|
|
eb26449e80 | ||
|
|
4ab213bc44 | ||
|
|
66cab15498 | ||
|
|
c03c68e8c7 | ||
|
|
72c983c41e | ||
|
|
a01e96e21a | ||
|
|
e52e262e78 | ||
|
|
84504d156f | ||
|
|
56c830713e | ||
|
|
acdc338fa4 | ||
|
|
72877d68ec | ||
|
|
0f158e4e28 | ||
|
|
7d79f17b12 | ||
|
|
f973deb95f | ||
|
|
4a4201c84d | ||
|
|
80e0ad0fac | ||
|
|
303dd1cbc1 | ||
|
|
eb4f6d73fb | ||
|
|
44f511814b | ||
|
|
e2046749ac | ||
|
|
f832271f5c | ||
|
|
dc9d3bf39d | ||
|
|
7d3e0d4fe5 | ||
|
|
9dca520b33 | ||
|
|
fa22f9e5ab | ||
|
|
082507cff2 | ||
|
|
1e327b4747 | ||
|
|
3ece592b51 | ||
|
|
bae7278fa8 | ||
|
|
cf2332c38f | ||
|
|
51764b208b | ||
|
|
936f9faeaf | ||
|
|
707616178e | ||
|
|
155c1c1531 | ||
|
|
7f64758592 | ||
|
|
4e1205958f | ||
|
|
2081df24ec | ||
|
|
7a72031d1e | ||
|
|
622ed15532 | ||
|
|
b4d3be482f | ||
|
|
92f57e0811 | ||
|
|
6c147dfe3c | ||
|
|
502ba663c5 | ||
|
|
5e54fd4da3 | ||
|
|
f347c12145 | ||
|
|
95640819f5 | ||
|
|
5b1334102b | ||
|
|
96c39ae7fd | ||
|
|
d198b2ca53 | ||
|
|
9e8bf18aa1 | ||
|
|
fcaaee537e | ||
|
|
97946cfdb7 | ||
|
|
72518b4497 | ||
|
|
18d7a02280 | ||
|
|
8d275dcb4f | ||
|
|
ff2179985c | ||
|
|
b1cc016cc0 | ||
|
|
263e8b24cf | ||
|
|
ce2ba28dec | ||
|
|
784bf36c66 | ||
|
|
cbdfad8e80 | ||
|
|
d02910c675 | ||
|
|
1e523a267c | ||
|
|
15c8a4768d | ||
|
|
6ca9f1f731 | ||
|
|
3311ceaa7b | ||
|
|
6354aa686e | ||
|
|
90d5b34eca | ||
|
|
7f6db0cbfa | ||
|
|
8d7cbe732e | ||
|
|
1e5a203ddc | ||
|
|
cde6e815dd | ||
|
|
c1c52002cf | ||
|
|
5cd3f40cbc | ||
|
|
f9385ec918 | ||
|
|
7ead3e2f11 | ||
|
|
e0018268d6 | ||
|
|
d4cb5da017 | ||
|
|
62f49822dd | ||
|
|
878da9fb16 | ||
|
|
f55de03fa6 | ||
|
|
7b6ff769f0 | ||
|
|
e369ac2f24 | ||
|
|
4a0a73b307 | ||
|
|
253fc5ee59 | ||
|
|
bf41d3bfea | ||
|
|
ede92b0654 | ||
|
|
048967e3b0 | ||
|
|
8cc5897d5c | ||
|
|
479e239016 | ||
|
|
39e0fc64ba | ||
|
|
5ed7042808 | ||
|
|
48cc0e1cde | ||
|
|
854dbef25e | ||
|
|
95998e437b | ||
|
|
fc0206e313 | ||
|
|
26acce9cec | ||
|
|
d3c3ccd168 | ||
|
|
58e302ec31 | ||
|
|
3cda93405a | ||
|
|
540cfd1087 | ||
|
|
f58315f69e | ||
|
|
dca2a5ecb3 | ||
|
|
85cf0014cd | ||
|
|
170b3dc55d | ||
|
|
50a32acf51 | ||
|
|
b372a6f0f3 | ||
|
|
5671037b0e | ||
|
|
1ccb3a38a2 | ||
|
|
68c4ee23cb | ||
|
|
3f26203ed0 | ||
|
|
a5ede2a5c7 | ||
|
|
69c4c90634 | ||
|
|
06d8d48488 | ||
|
|
9c621970ff | ||
|
|
7bb9d57dc9 | ||
|
|
f866fd76f8 | ||
|
|
fa880b6bcc | ||
|
|
6fc1abba2e | ||
|
|
1abadd8caa | ||
|
|
ad31de4220 | ||
|
|
144742cbce | ||
|
|
f77aea13e9 | ||
|
|
f810e80b6c | ||
|
|
b04ceba89b | ||
|
|
f6d29a27fc | ||
|
|
28d23ffc3b | ||
|
|
ac0b0e7d6e | ||
|
|
e618d71f2a | ||
|
|
cd82ac552b | ||
|
|
b981e2f740 | ||
|
|
739e5e1c6b | ||
|
|
1f9020f0b4 | ||
|
|
7bb9552e94 | ||
|
|
d1bd610049 | ||
|
|
5243b33bd7 | ||
|
|
d5b2a9b5ba | ||
|
|
2152022926 | ||
|
|
f13b80cac0 | ||
|
|
a87b86c6d3 | ||
|
|
d287420e8e | ||
|
|
85b0be144a | ||
|
|
6f7822d25c | ||
|
|
b1fc870750 | ||
|
|
d0e9088976 | ||
|
|
ce26f17e9e | ||
|
|
a9f600ffa2 | ||
|
|
3454cd2c69 | ||
|
|
0d5e18053e | ||
|
|
2fbbf2e1e4 | ||
|
|
3597b8d7fe | ||
|
|
68d8f14f0d | ||
|
|
32675695d7 | ||
|
|
c7c3d2ba95 | ||
|
|
c89c34f4d6 | ||
|
|
92e8ac9de2 | ||
|
|
73b3e9b557 | ||
|
|
b79f7d79f0 | ||
|
|
490dece3bf | ||
|
|
c1e3f3120c | ||
|
|
16c05338d9 | ||
|
|
8ad1253b4f | ||
|
|
cee065920f | ||
|
|
871941f663 | ||
|
|
63cdf87915 | ||
|
|
175babc4df | ||
|
|
6c5c45b328 | ||
|
|
019cf2ab42 | ||
|
|
571e747689 | ||
|
|
1266527014 | ||
|
|
5e2e63ebe3 | ||
|
|
db290ca686 | ||
|
|
6619d98682 | ||
|
|
b771d73fe0 | ||
|
|
65751e8193 | ||
|
|
4c16fc155f | ||
|
|
dcd3461bce | ||
|
|
48f75c2c2b | ||
|
|
a4b73c09a7 | ||
|
|
af62570110 | ||
|
|
bebba47eb4 | ||
|
|
86437730de | ||
|
|
6fe64323db | ||
|
|
1e471d5eeb | ||
|
|
3a2862ea19 | ||
|
|
8a4f4d13f7 | ||
|
|
46a0dc9a51 | ||
|
|
faae36086c | ||
|
|
9c2bdeec63 | ||
|
|
e4c0c427a3 | ||
|
|
bca5a4ce3b | ||
|
|
5c07c6e6d3 | ||
|
|
c6dfe22a41 | ||
|
|
ec85b7e2c9 | ||
|
|
acd6872c80 | ||
|
|
22d3cf9c2b | ||
|
|
2d3bd8686f | ||
|
|
2c3b6c9199 | ||
|
|
a55932e1de | ||
|
|
973bd2e520 | ||
|
|
ea7331f5fc | ||
|
|
df241800ce | ||
|
|
8cc5694580 | ||
|
|
1d15baf405 | ||
|
|
47508d5c6e | ||
|
|
2a1ae14275 | ||
|
|
e361def9cd | ||
|
|
fa6888df4c | ||
|
|
373b952a0c | ||
|
|
9bbd597e20 | ||
|
|
fceb1516b8 | ||
|
|
43e19ab281 | ||
|
|
4052cd5237 | ||
|
|
e1be469995 | ||
|
|
23d8c9a820 | ||
|
|
e618421697 | ||
|
|
7db2aa1cba | ||
|
|
0c8dfb8e43 | ||
|
|
25e4fa17a8 | ||
|
|
bb4b2af02e | ||
|
|
27e93ee9f6 | ||
|
|
65bcddb9fd | ||
|
|
76707073c4 | ||
|
|
a104fb6a00 | ||
|
|
1c4b18b089 | ||
|
|
d6d87e9a83 | ||
|
|
985e4ebb23 | ||
|
|
fcc294600c | ||
|
|
9631b5fd44 | ||
|
|
a7d681abff | ||
|
|
5867fa1b9f | ||
|
|
1e79c7b3cb | ||
|
|
34d64d4d04 | ||
|
|
87726faab4 | ||
|
|
1b9919547a | ||
|
|
84d96d5195 | ||
|
|
1374a97787 | ||
|
|
6f0fc020e8 | ||
|
|
f58a6e2057 | ||
|
|
09fad4886a | ||
|
|
c47711c2f2 | ||
|
|
a3e6e66204 | ||
|
|
2907936c85 | ||
|
|
71a323039f | ||
|
|
5e5e509698 | ||
|
|
4a598c1ef3 | ||
|
|
1da9f0dec4 | ||
|
|
629a690886 | ||
|
|
16841a1fb0 | ||
|
|
22c19a40fa | ||
|
|
8f41a2886d | ||
|
|
38cea5b866 | ||
|
|
4177289ef6 | ||
|
|
4ad9f9b535 | ||
|
|
6f58b33de0 | ||
|
|
9456e792f1 | ||
|
|
7f60dda565 | ||
|
|
582fe2cbde |
@@ -1,5 +1,4 @@
|
|||||||
---
|
---
|
||||||
parseable: true
|
|
||||||
skip_list:
|
skip_list:
|
||||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||||
|
|
||||||
@@ -12,10 +11,12 @@ skip_list:
|
|||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'role-name'
|
- 'role-name'
|
||||||
|
|
||||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
# [var-naming]
|
||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'var-naming'
|
- 'var-naming[pattern]'
|
||||||
|
# Variables names from within roles in kubespray don't need role name as a prefix
|
||||||
|
- 'var-naming[no-role-prefix]'
|
||||||
|
|
||||||
# [fqcn-builtins]
|
# [fqcn-builtins]
|
||||||
# Roles in kubespray don't need fully qualified collection names
|
# Roles in kubespray don't need fully qualified collection names
|
||||||
@@ -32,6 +33,8 @@ skip_list:
|
|||||||
# Disable run-once check with free strategy
|
# Disable run-once check with free strategy
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
- 'run-once[task]'
|
- 'run-once[task]'
|
||||||
|
|
||||||
|
- 'jinja[spacing]'
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# Generated files
|
# Generated files
|
||||||
- tests/files/custom_cni/cilium.yaml
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
1
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -108,7 +108,6 @@ body:
|
|||||||
- meta
|
- meta
|
||||||
- multus
|
- multus
|
||||||
- ovn4nfv
|
- ovn4nfv
|
||||||
- weave
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
|||||||
6
.github/workflows/auto-label-os.yml
vendored
6
.github/workflows/auto-label-os.yml
vendored
@@ -13,16 +13,16 @@ jobs:
|
|||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||||
|
|
||||||
- name: Parse issue form
|
- name: Parse issue form
|
||||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
uses: stefanbuck/github-issue-parser@10dcc54158ba4c137713d9d69d70a2da63b6bda3
|
||||||
id: issue-parser
|
id: issue-parser
|
||||||
with:
|
with:
|
||||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||||
|
|
||||||
- name: Set labels based on OS field
|
- name: Set labels based on OS field
|
||||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@39087a4b30cb98d57f25f34d617a6af8163c17d9
|
uses: redhat-plumbers-in-action/advanced-issue-labeler@b80ae64e3e156e9c111b075bfa04b295d54e8e2e
|
||||||
with:
|
with:
|
||||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||||
section: os
|
section: os
|
||||||
|
|||||||
@@ -8,18 +8,19 @@ on:
|
|||||||
permissions: {}
|
permissions: {}
|
||||||
jobs:
|
jobs:
|
||||||
get-releases-branches:
|
get-releases-branches:
|
||||||
|
if: github.repository == 'kubernetes-sigs/kubespray'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
branches: ${{ steps.get-branches.outputs.data }}
|
branches: ${{ steps.get-branches.outputs.data }}
|
||||||
steps:
|
steps:
|
||||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
- uses: octokit/graphql-action@ddde8ebb2493e79f390e6449c725c21663a67505
|
||||||
id: get-branches
|
id: get-branches
|
||||||
with:
|
with:
|
||||||
query: |
|
query: |
|
||||||
query get_release_branches($owner:String!, $name:String!) {
|
query get_release_branches($owner:String!, $name:String!) {
|
||||||
repository(owner:$owner, name:$name) {
|
repository(owner:$owner, name:$name) {
|
||||||
refs(refPrefix: "refs/heads/",
|
refs(refPrefix: "refs/heads/",
|
||||||
first: 0, # TODO increment once we have release branch with the new checksums format
|
first: 3,
|
||||||
query: "release-",
|
query: "release-",
|
||||||
orderBy: {
|
orderBy: {
|
||||||
field: ALPHABETICAL,
|
field: ALPHABETICAL,
|
||||||
|
|||||||
8
.github/workflows/upgrade-patch-versions.yml
vendored
8
.github/workflows/upgrade-patch-versions.yml
vendored
@@ -11,10 +11,10 @@ jobs:
|
|||||||
update-patch-versions:
|
update-patch-versions:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||||
with:
|
with:
|
||||||
ref: ${{ inputs.branch }}
|
ref: ${{ inputs.branch }}
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.13'
|
python-version: '3.13'
|
||||||
cache: 'pip'
|
cache: 'pip'
|
||||||
@@ -22,14 +22,14 @@ jobs:
|
|||||||
- run: update-hashes
|
- run: update-hashes
|
||||||
env:
|
env:
|
||||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: actions/cache@v4
|
- uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
key: pre-commit-hook-propagate
|
key: pre-commit-hook-propagate
|
||||||
path: |
|
path: |
|
||||||
~/.cache/pre-commit
|
~/.cache/pre-commit
|
||||||
- run: pre-commit run --all-files propagate-ansible-variables
|
- run: pre-commit run --all-files propagate-ansible-variables
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
- uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
|
||||||
with:
|
with:
|
||||||
commit-message: Patch versions updates
|
commit-message: Patch versions updates
|
||||||
title: Patch versions updates - ${{ inputs.branch }}
|
title: Patch versions updates - ${{ inputs.branch }}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
- build
|
- build # build docker image used in most other jobs
|
||||||
- test
|
- test # unit tests
|
||||||
- deploy-part1
|
- deploy-part1 # kubespray runs - common setup
|
||||||
- deploy-extended
|
- deploy-extended # kubespray runs - rarer or costlier (to test) setups
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
@@ -24,6 +24,7 @@ variables:
|
|||||||
ANSIBLE_REMOTE_USER: kubespray
|
ANSIBLE_REMOTE_USER: kubespray
|
||||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||||
ANSIBLE_INVENTORY: /tmp/inventory
|
ANSIBLE_INVENTORY: /tmp/inventory
|
||||||
|
ANSIBLE_STDOUT_CALLBACK: "default"
|
||||||
RESET_CHECK: "false"
|
RESET_CHECK: "false"
|
||||||
REMOVE_NODE_CHECK: "false"
|
REMOVE_NODE_CHECK: "false"
|
||||||
UPGRADE_TEST: "false"
|
UPGRADE_TEST: "false"
|
||||||
@@ -31,7 +32,7 @@ variables:
|
|||||||
ANSIBLE_VERBOSITY: 2
|
ANSIBLE_VERBOSITY: 2
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
TF_VERSION: 1.3.7
|
OPENTOFU_VERSION: v1.9.1
|
||||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
@@ -48,44 +49,14 @@ before_script:
|
|||||||
- cluster-dump/
|
- cluster-dump/
|
||||||
needs:
|
needs:
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
variables:
|
|
||||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
|
||||||
|
|
||||||
.job-moderated:
|
.job-moderated:
|
||||||
extends: .job
|
extends: .job
|
||||||
needs:
|
needs:
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
- ci-not-authorized
|
|
||||||
- pre-commit # lint
|
- pre-commit # lint
|
||||||
- vagrant-validate # lint
|
- vagrant-validate # lint
|
||||||
|
|
||||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
|
||||||
# Premoderated with manual actions
|
|
||||||
ci-not-authorized:
|
|
||||||
stage: build
|
|
||||||
before_script: []
|
|
||||||
after_script: []
|
|
||||||
rules:
|
|
||||||
# LGTM or ok-to-test labels
|
|
||||||
- if: $PR_LABELS =~ /.*,(lgtm|approved|ok-to-test).*|^(lgtm|approved|ok-to-test).*/i
|
|
||||||
variables:
|
|
||||||
CI_OK_TO_TEST: '0'
|
|
||||||
when: always
|
|
||||||
- if: $CI_PIPELINE_SOURCE == "schedule" || $CI_PIPELINE_SOURCE == "trigger"
|
|
||||||
variables:
|
|
||||||
CI_OK_TO_TEST: '0'
|
|
||||||
- if: $CI_COMMIT_BRANCH == "master"
|
|
||||||
variables:
|
|
||||||
CI_OK_TO_TEST: '0'
|
|
||||||
- when: always
|
|
||||||
variables:
|
|
||||||
CI_OK_TO_TEST: '1'
|
|
||||||
script:
|
|
||||||
- exit $CI_OK_TO_TEST
|
|
||||||
tags:
|
|
||||||
- ffci
|
|
||||||
needs: []
|
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- .gitlab-ci/build.yml
|
- .gitlab-ci/build.yml
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
|
|||||||
@@ -7,23 +7,24 @@ pipeline-image:
|
|||||||
tags:
|
tags:
|
||||||
- ffci
|
- ffci
|
||||||
stage: build
|
stage: build
|
||||||
image:
|
image: moby/buildkit:rootless
|
||||||
name: gcr.io/kaniko-project/executor:debug
|
|
||||||
entrypoint: ['']
|
|
||||||
variables:
|
variables:
|
||||||
GODEBUG: "http2client=0"
|
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||||
|
CACHE_IMAGE: $CI_REGISTRY_IMAGE/pipeline:cache
|
||||||
# TODO: remove the override
|
# TODO: remove the override
|
||||||
# currently rebase.sh depends on bash (not available in the kaniko image)
|
# currently rebase.sh depends on bash (not available in the kaniko image)
|
||||||
# once we have a simpler rebase (which should be easy if the target branch ref is available as variable
|
# once we have a simpler rebase (which should be easy if the target branch ref is available as variable
|
||||||
# we'll be able to rebase here as well hopefully
|
# we'll be able to rebase here as well hopefully
|
||||||
before_script: []
|
before_script:
|
||||||
|
- mkdir -p ~/.docker
|
||||||
|
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > ~/.docker/config.json
|
||||||
script:
|
script:
|
||||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
- |
|
||||||
- /kaniko/executor --cache=true
|
buildctl-daemonless.sh build \
|
||||||
--cache-dir=image-cache
|
--frontend dockerfile.v0 \
|
||||||
--context $CI_PROJECT_DIR
|
--local context=$CI_PROJECT_DIR \
|
||||||
--dockerfile $CI_PROJECT_DIR/pipeline.Dockerfile
|
--local dockerfile=$CI_PROJECT_DIR \
|
||||||
--label 'git-branch'=$CI_COMMIT_REF_SLUG
|
--opt filename=pipeline.Dockerfile \
|
||||||
--label 'git-tag=$CI_COMMIT_TAG'
|
--export-cache type=registry,ref=$CACHE_IMAGE \
|
||||||
--destination $PIPELINE_IMAGE
|
--import-cache type=registry,ref=$CACHE_IMAGE \
|
||||||
--log-timestamp=true
|
--output type=image,name=$PIPELINE_IMAGE,push=true
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
interruptible: true
|
interruptible: true
|
||||||
script:
|
script:
|
||||||
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
||||||
-c local -e @"tests/files/${TESTCASE}.yml"
|
-e @"tests/files/${TESTCASE}.yml"
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
variables:
|
variables:
|
||||||
ANSIBLE_TIMEOUT: "120"
|
ANSIBLE_TIMEOUT: "120"
|
||||||
@@ -12,10 +12,9 @@
|
|||||||
- ffci
|
- ffci
|
||||||
needs:
|
needs:
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
- ci-not-authorized
|
|
||||||
|
|
||||||
# TODO: generate testcases matrixes from the files in tests/files/
|
# TODO: generate testcases matrixes from the files in tests/files/
|
||||||
# this is needed to avoid the need for PR rebasing when a job was added or remvoed in the target branch
|
# this is needed to avoid the need for PR rebasing when a job was added or removed in the target branch
|
||||||
# (currently, a removed job in the target branch breaks the tests, because the
|
# (currently, a removed job in the target branch breaks the tests, because the
|
||||||
# pipeline definition is parsed by gitlab before the rebase.sh script)
|
# pipeline definition is parsed by gitlab before the rebase.sh script)
|
||||||
# CI template for PRs
|
# CI template for PRs
|
||||||
@@ -27,42 +26,47 @@ pr:
|
|||||||
allow_failure: true
|
allow_failure: true
|
||||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||||
when: on_success
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
- when: manual
|
- when: manual
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .kubevirt
|
extends: .kubevirt
|
||||||
parallel:
|
parallel:
|
||||||
matrix:
|
matrix:
|
||||||
- TESTCASE:
|
- TESTCASE:
|
||||||
- almalinux8-calico
|
|
||||||
- almalinux9-crio
|
- almalinux9-crio
|
||||||
- almalinux9-kube-ovn
|
- almalinux9-kube-ovn
|
||||||
- debian11-calico-collection
|
- debian11-calico-collection
|
||||||
- debian11-macvlan
|
- debian11-macvlan
|
||||||
- debian12-cilium
|
- debian12-cilium
|
||||||
|
- debian13-cilium
|
||||||
- fedora39-kube-router
|
- fedora39-kube-router
|
||||||
# FIXME: this test if broken (perma-failing)
|
|
||||||
- openeuler24-calico
|
- openeuler24-calico
|
||||||
- opensuse15-6-calico
|
|
||||||
- rockylinux8-calico
|
|
||||||
- rockylinux9-cilium
|
- rockylinux9-cilium
|
||||||
- ubuntu20-calico-all-in-one-hardening
|
- rockylinux10-cilium
|
||||||
- ubuntu20-cilium-sep
|
|
||||||
- ubuntu20-flannel-collection
|
|
||||||
- ubuntu20-kube-router-sep
|
|
||||||
- ubuntu20-kube-router-svc-proxy
|
|
||||||
- ubuntu22-calico-all-in-one
|
- ubuntu22-calico-all-in-one
|
||||||
- ubuntu22-calico-all-in-one-upgrade
|
- ubuntu22-calico-all-in-one-upgrade
|
||||||
- ubuntu24-calico-etcd-datastore
|
- ubuntu24-calico-etcd-datastore
|
||||||
|
- ubuntu24-calico-all-in-one-hardening
|
||||||
|
- ubuntu24-cilium-sep
|
||||||
|
- ubuntu24-flannel-collection
|
||||||
|
- ubuntu24-kube-router-sep
|
||||||
|
- ubuntu24-kube-router-svc-proxy
|
||||||
|
- ubuntu24-ha-separate-etcd
|
||||||
|
- flatcar4081-calico
|
||||||
|
- fedora40-flannel-crio-collection-scale
|
||||||
|
|
||||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
# The ubuntu24-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||||
ubuntu20-calico-all-in-one:
|
ubuntu24-calico-all-in-one:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
extends: .kubevirt
|
extends: .kubevirt
|
||||||
variables:
|
variables:
|
||||||
TESTCASE: ubuntu20-calico-all-in-one
|
TESTCASE: ubuntu24-calico-all-in-one
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||||
when: on_success
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
- when: manual
|
- when: manual
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
@@ -72,6 +76,8 @@ pr_full:
|
|||||||
rules:
|
rules:
|
||||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||||
when: on_success
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
# Else run as manual
|
# Else run as manual
|
||||||
- when: manual
|
- when: manual
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
@@ -85,10 +91,9 @@ pr_full:
|
|||||||
- debian12-custom-cni-helm
|
- debian12-custom-cni-helm
|
||||||
- fedora39-calico-swap-selinux
|
- fedora39-calico-swap-selinux
|
||||||
- fedora39-crio
|
- fedora39-crio
|
||||||
- ubuntu20-all-in-one-docker
|
- ubuntu24-calico-ha-wireguard
|
||||||
- ubuntu20-calico-ha-wireguard
|
- ubuntu24-flannel-ha
|
||||||
- ubuntu20-flannel-ha
|
- ubuntu24-flannel-ha-once
|
||||||
- ubuntu20-flannel-ha-once
|
|
||||||
|
|
||||||
# Need an update of the container image to use schema v2
|
# Need an update of the container image to use schema v2
|
||||||
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||||
@@ -108,6 +113,8 @@ pr_extended:
|
|||||||
rules:
|
rules:
|
||||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||||
when: on_success
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
- when: manual
|
- when: manual
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
parallel:
|
parallel:
|
||||||
@@ -119,21 +126,22 @@ pr_extended:
|
|||||||
- debian11-docker
|
- debian11-docker
|
||||||
- debian12-calico
|
- debian12-calico
|
||||||
- debian12-docker
|
- debian12-docker
|
||||||
- opensuse15-6-docker-cilium
|
- debian13-calico
|
||||||
- rockylinux9-calico
|
- rockylinux9-calico
|
||||||
- ubuntu20-calico-etcd-kubeadm
|
- rockylinux10-calico
|
||||||
- ubuntu20-flannel
|
|
||||||
- ubuntu22-all-in-one-docker
|
- ubuntu22-all-in-one-docker
|
||||||
- ubuntu24-all-in-one-docker
|
- ubuntu24-all-in-one-docker
|
||||||
- ubuntu24-calico-all-in-one
|
- ubuntu24-calico-all-in-one
|
||||||
|
- ubuntu24-calico-etcd-kubeadm
|
||||||
|
- ubuntu24-flannel
|
||||||
|
|
||||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
# TODO: migrate to pr-full, fix the broken ones
|
||||||
periodic:
|
periodic:
|
||||||
only:
|
|
||||||
variables:
|
|
||||||
- $PERIODIC_CI_ENABLED
|
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .kubevirt
|
extends: .kubevirt
|
||||||
|
rules:
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
parallel:
|
parallel:
|
||||||
matrix:
|
matrix:
|
||||||
- TESTCASE:
|
- TESTCASE:
|
||||||
@@ -142,6 +150,6 @@ periodic:
|
|||||||
- debian12-cilium-svc-proxy
|
- debian12-cilium-svc-proxy
|
||||||
- fedora39-calico-selinux
|
- fedora39-calico-selinux
|
||||||
- fedora40-docker-calico
|
- fedora40-docker-calico
|
||||||
- ubuntu20-calico-etcd-kubeadm-upgrade-ha
|
- ubuntu24-calico-etcd-kubeadm-upgrade-ha
|
||||||
- ubuntu20-calico-ha-recover
|
- ubuntu24-calico-ha-recover
|
||||||
- ubuntu20-calico-ha-recover-noquorum
|
- ubuntu24-calico-ha-recover-noquorum
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ pre-commit:
|
|||||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
||||||
variables:
|
variables:
|
||||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||||
|
ANSIBLE_STDOUT_CALLBACK: default
|
||||||
script:
|
script:
|
||||||
- pre-commit run --all-files --show-diff-on-failure
|
- pre-commit run --all-files --show-diff-on-failure
|
||||||
cache:
|
cache:
|
||||||
@@ -23,4 +24,3 @@ vagrant-validate:
|
|||||||
VAGRANT_VERSION: 2.3.7
|
VAGRANT_VERSION: 2.3.7
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/vagrant-validate.sh
|
- ./tests/scripts/vagrant-validate.sh
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
---
|
---
|
||||||
.molecule:
|
.molecule:
|
||||||
tags: [ffci]
|
tags: [ffci]
|
||||||
only: [/^pr-.*$/]
|
rules: # run on ci-short as well
|
||||||
except: ['triggers']
|
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||||
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
|
- when: manual
|
||||||
|
allow_failure: true
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
image: $PIPELINE_IMAGE
|
image: $PIPELINE_IMAGE
|
||||||
needs:
|
needs:
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
# - ci-not-authorized
|
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh
|
- ./tests/scripts/molecule_run.sh
|
||||||
after_script:
|
after_script:
|
||||||
- ./tests/scripts/molecule_logs.sh
|
- rm -fr molecule_logs
|
||||||
|
- mkdir -p molecule_logs
|
||||||
|
- find ~/.cache/molecule/ \( -name '*.out' -o -name '*.err' \) -type f | xargs tar -uf molecule_logs/molecule.tar
|
||||||
|
- gzip molecule_logs/molecule.tar
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
@@ -27,28 +34,22 @@ molecule:
|
|||||||
- container-engine/cri-dockerd
|
- container-engine/cri-dockerd
|
||||||
- container-engine/containerd
|
- container-engine/containerd
|
||||||
- container-engine/cri-o
|
- container-engine/cri-o
|
||||||
|
- container-engine/gvisor
|
||||||
|
- container-engine/youki
|
||||||
- adduser
|
- adduser
|
||||||
- bastion-ssh-config
|
- bastion-ssh-config
|
||||||
- bootstrap-os
|
- bootstrap_os
|
||||||
|
|
||||||
# CI template for periodic CI jobs
|
|
||||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
|
||||||
molecule_full:
|
molecule_full:
|
||||||
only:
|
|
||||||
variables:
|
|
||||||
- $PERIODIC_CI_ENABLED
|
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
rules:
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
|
- when: manual
|
||||||
|
allow_failure: true
|
||||||
extends: molecule
|
extends: molecule
|
||||||
parallel:
|
parallel:
|
||||||
matrix:
|
matrix:
|
||||||
- ROLE:
|
- ROLE:
|
||||||
- container-engine/cri-dockerd
|
|
||||||
- container-engine/containerd
|
|
||||||
- container-engine/cri-o
|
|
||||||
- adduser
|
|
||||||
- bastion-ssh-config
|
|
||||||
- bootstrap-os
|
|
||||||
# FIXME : tests below are perma-failing
|
# FIXME : tests below are perma-failing
|
||||||
- container-engine/kata-containers
|
- container-engine/kata-containers
|
||||||
- container-engine/gvisor
|
|
||||||
- container-engine/youki
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
.terraform_install:
|
.terraform_install:
|
||||||
extends: .job
|
extends: .job
|
||||||
needs:
|
needs:
|
||||||
- ci-not-authorized
|
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
variables:
|
variables:
|
||||||
TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub"
|
TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub"
|
||||||
@@ -14,18 +13,18 @@
|
|||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
||||||
- ./tests/scripts/terraform_install.sh
|
- ./tests/scripts/opentofu_install.sh
|
||||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||||
- ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts
|
- ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts
|
||||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
- tofu -chdir="contrib/terraform/$PROVIDER" init
|
||||||
|
|
||||||
terraform_validate:
|
terraform_validate:
|
||||||
extends: .terraform_install
|
extends: .terraform_install
|
||||||
tags: [ffci]
|
tags: [ffci]
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
script:
|
script:
|
||||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
- tofu -chdir="contrib/terraform/$PROVIDER" validate
|
||||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
- tofu -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||||
stage: test
|
stage: test
|
||||||
needs:
|
needs:
|
||||||
- pipeline-image
|
- pipeline-image
|
||||||
@@ -33,13 +32,11 @@ terraform_validate:
|
|||||||
matrix:
|
matrix:
|
||||||
- PROVIDER:
|
- PROVIDER:
|
||||||
- openstack
|
- openstack
|
||||||
- equinix
|
|
||||||
- aws
|
- aws
|
||||||
- exoscale
|
- exoscale
|
||||||
- hetzner
|
- hetzner
|
||||||
- vsphere
|
- vsphere
|
||||||
- upcloud
|
- upcloud
|
||||||
- nifcloud
|
|
||||||
|
|
||||||
.terraform_apply:
|
.terraform_apply:
|
||||||
extends: .terraform_install
|
extends: .terraform_install
|
||||||
@@ -60,11 +57,11 @@ terraform_validate:
|
|||||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||||
# Random subnet to avoid routing conflicts
|
# Random subnet to avoid routing conflicts
|
||||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||||
- terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
|
- tofu -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
|
||||||
- tests/scripts/testcases_run.sh
|
- tests/scripts/testcases_run.sh
|
||||||
after_script:
|
after_script:
|
||||||
# Cleanup regardless of exit code
|
# Cleanup regardless of exit code
|
||||||
- terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
|
- tofu -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
|
||||||
|
|
||||||
# Elastx is generously donating resources for Kubespray on Openstack CI
|
# Elastx is generously donating resources for Kubespray on Openstack CI
|
||||||
# Contacts: @gix @bl0m1
|
# Contacts: @gix @bl0m1
|
||||||
@@ -91,11 +88,10 @@ tf-elastx_cleanup:
|
|||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
tf-elastx_ubuntu20-calico:
|
tf-elastx_ubuntu24-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
when: on_success
|
when: on_success
|
||||||
allow_failure: true
|
|
||||||
variables:
|
variables:
|
||||||
<<: *elastx_variables
|
<<: *elastx_variables
|
||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
@@ -118,5 +114,5 @@ tf-elastx_ubuntu20-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-20.04-server-latest
|
TF_VAR_image: ubuntu-24.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
---
|
---
|
||||||
vagrant:
|
vagrant:
|
||||||
extends: .job-moderated
|
extends: .job-moderated
|
||||||
needs:
|
|
||||||
- ci-not-authorized
|
|
||||||
variables:
|
variables:
|
||||||
CI_PLATFORM: "vagrant"
|
CI_PLATFORM: "vagrant"
|
||||||
SSH_USER: "vagrant"
|
SSH_USER: "vagrant"
|
||||||
@@ -13,8 +11,6 @@ vagrant:
|
|||||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||||
tags: [ffci-vm-large]
|
tags: [ffci-vm-large]
|
||||||
# only: [/^pr-.*$/]
|
|
||||||
# except: ['triggers']
|
|
||||||
image: quay.io/kubespray/vm-kubespray-ci:v13
|
image: quay.io/kubespray/vm-kubespray-ci:v13
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
@@ -40,8 +36,12 @@ vagrant:
|
|||||||
policy: pull-push # TODO: change to "pull" when not on main
|
policy: pull-push # TODO: change to "pull" when not on main
|
||||||
stage: deploy-extended
|
stage: deploy-extended
|
||||||
rules:
|
rules:
|
||||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||||
when: on_success
|
when: on_success
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||||
|
when: on_success
|
||||||
|
- when: manual
|
||||||
|
allow_failure: true
|
||||||
parallel:
|
parallel:
|
||||||
matrix:
|
matrix:
|
||||||
- TESTCASE:
|
- TESTCASE:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v5.0.0
|
rev: v6.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
- id: check-case-conflict
|
- id: check-case-conflict
|
||||||
@@ -15,13 +15,13 @@ repos:
|
|||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint.git
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
rev: v1.35.1
|
rev: v1.37.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: yamllint
|
- id: yamllint
|
||||||
args: [--strict]
|
args: [--strict]
|
||||||
|
|
||||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
rev: v0.10.0.1
|
rev: v0.11.0.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: shellcheck
|
- id: shellcheck
|
||||||
args: ["--severity=error"]
|
args: ["--severity=error"]
|
||||||
@@ -29,7 +29,7 @@ repos:
|
|||||||
files: "\\.sh$"
|
files: "\\.sh$"
|
||||||
|
|
||||||
- repo: https://github.com/ansible/ansible-lint
|
- repo: https://github.com/ansible/ansible-lint
|
||||||
rev: v25.1.1
|
rev: v25.11.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
@@ -38,7 +38,7 @@ repos:
|
|||||||
- distlib
|
- distlib
|
||||||
|
|
||||||
- repo: https://github.com/golangci/misspell
|
- repo: https://github.com/golangci/misspell
|
||||||
rev: v0.6.0
|
rev: v0.7.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: misspell
|
- id: misspell
|
||||||
exclude: "OWNERS_ALIASES$"
|
exclude: "OWNERS_ALIASES$"
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
|||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||||
5. Addess any pre-commit validation failures.
|
5. Address any pre-commit validation failures.
|
||||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||||
7. Submit a pull request.
|
7. Submit a pull request.
|
||||||
8. Work with the reviewers on their suggestions.
|
8. Work with the reviewers on their suggestions.
|
||||||
|
|||||||
10
Dockerfile
10
Dockerfile
@@ -1,7 +1,7 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use immutable image tags rather than mutable tags (like ubuntu:24.04)
|
||||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
FROM ubuntu:noble-20260113@sha256:cd1dba651b3080c3686ecf4e3c4220f026b521fb76978881737d24f200828b2b
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
@@ -29,14 +29,14 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|||||||
|
|
||||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
pip install --break-system-packages --no-compile --no-cache-dir -r requirements.txt \
|
||||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||||
&& curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
&& curl -L "https://dl.k8s.io/release/v1.34.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
&& echo "$(curl -L "https://dl.k8s.io/release/v1.34.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||||
&& chmod a+x /usr/local/bin/kubectl
|
&& chmod a+x /usr/local/bin/kubectl
|
||||||
|
|
||||||
COPY *.yml ./
|
COPY *.yml ./
|
||||||
|
|||||||
@@ -1,17 +1,13 @@
|
|||||||
aliases:
|
aliases:
|
||||||
kubespray-approvers:
|
kubespray-approvers:
|
||||||
- cristicalin
|
|
||||||
- floryut
|
|
||||||
- liupeng0518
|
|
||||||
- mzaian
|
|
||||||
- oomichi
|
|
||||||
- yankay
|
|
||||||
- ant31
|
- ant31
|
||||||
|
- mzaian
|
||||||
|
- tico88612
|
||||||
- vannten
|
- vannten
|
||||||
|
- yankay
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- cyclinder
|
- cyclinder
|
||||||
- erikjiang
|
- erikjiang
|
||||||
- mrfreezeex
|
|
||||||
- mzaian
|
- mzaian
|
||||||
- tico88612
|
- tico88612
|
||||||
- vannten
|
- vannten
|
||||||
@@ -19,8 +15,12 @@ aliases:
|
|||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- atoms
|
- atoms
|
||||||
- chadswen
|
- chadswen
|
||||||
|
- cristicalin
|
||||||
|
- floryut
|
||||||
|
- liupeng0518
|
||||||
- luckysb
|
- luckysb
|
||||||
- mattymo
|
- mattymo
|
||||||
- miouge1
|
- miouge1
|
||||||
|
- oomichi
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- woopstar
|
- woopstar
|
||||||
|
|||||||
48
README.md
48
README.md
@@ -22,7 +22,7 @@ Ensure you have installed Docker then
|
|||||||
```ShellSession
|
```ShellSession
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
@@ -87,15 +87,15 @@ vagrant up
|
|||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bookworm, Bullseye
|
- **Debian** Bookworm, Bullseye, Trixie
|
||||||
- **Ubuntu** 20.04, 22.04, 24.04
|
- **Ubuntu** 22.04, 24.04
|
||||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **CentOS Stream / RHEL** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Fedora** 39, 40
|
- **Fedora** 39, 40
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Oracle Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Alma Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
- **Rocky Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8) (experimental in 10: see [Rocky Linux 10 notes](docs/operating_systems/rhel.md#rocky-linux-10))
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||||
@@ -111,27 +111,26 @@ Note:
|
|||||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.34.3
|
||||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
- [etcd](https://github.com/etcd-io/etcd) 3.5.26
|
||||||
- [docker](https://www.docker.com/) 28.0
|
- [docker](https://www.docker.com/) 28.3
|
||||||
- [containerd](https://containerd.io/) 2.0.3
|
- [containerd](https://containerd.io/) 2.2.1
|
||||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) 1.34.4 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2
|
||||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
- [kube-vip](https://github.com/kube-vip/kube-vip) 1.0.3
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
- [coredns](https://github.com/coredns/coredns) 1.12.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.13.3
|
||||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||||
- [helm](https://helm.sh/) 3.16.4
|
- [helm](https://helm.sh/) 3.18.4
|
||||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||||
- [registry](https://github.com/distribution/distribution) 2.8.1
|
- [registry](https://github.com/distribution/distribution) 2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
@@ -139,7 +138,7 @@ Note:
|
|||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.32
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
||||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
||||||
|
|
||||||
@@ -183,9 +182,6 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
|||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [weave](docs/CNI/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
|
||||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
|
||||||
|
|
||||||
- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
|||||||
1. The release issue is closed
|
1. The release issue is closed
|
||||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
1. Create/Update Issue for upgrading kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||||
|
|
||||||
## Major/minor releases and milestones
|
## Major/minor releases and milestones
|
||||||
|
|
||||||
|
|||||||
29
Vagrantfile
vendored
29
Vagrantfile
vendored
@@ -4,6 +4,8 @@
|
|||||||
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
require 'ipaddr'
|
||||||
|
require 'socket'
|
||||||
|
|
||||||
Vagrant.require_version ">= 2.0.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
|
|
||||||
@@ -99,6 +101,33 @@ $extra_vars ||= {}
|
|||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
|
def collect_networks(subnet, subnet_ipv6)
|
||||||
|
Socket.getifaddrs.filter_map do |iface|
|
||||||
|
next unless iface&.netmask&.ip_address && iface.addr
|
||||||
|
|
||||||
|
is_ipv6 = iface.addr.ipv6?
|
||||||
|
ip = IPAddr.new(iface.addr.ip_address.split('%').first)
|
||||||
|
ip_test = is_ipv6 ? IPAddr.new("#{subnet_ipv6}::0") : IPAddr.new("#{subnet}.0")
|
||||||
|
|
||||||
|
prefix = IPAddr.new(iface.netmask.ip_address).to_i.to_s(2).count('1')
|
||||||
|
network = ip.mask(prefix)
|
||||||
|
|
||||||
|
[IPAddr.new("#{network}/#{prefix}"), ip_test]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def subnet_in_use?(network_ips)
|
||||||
|
network_ips.any? { |net, test_ip| net.include?(test_ip) && test_ip != net }
|
||||||
|
end
|
||||||
|
|
||||||
|
network_ips = collect_networks($subnet, $subnet_ipv6)
|
||||||
|
|
||||||
|
if subnet_in_use?(network_ips)
|
||||||
|
puts "Invalid subnet provided, subnet is already in use: #{$subnet}.0"
|
||||||
|
puts "Subnets in use: #{network_ips.inspect}"
|
||||||
|
exit 1
|
||||||
|
end
|
||||||
|
|
||||||
# throw error if os is not supported
|
# throw error if os is not supported
|
||||||
if ! SUPPORTED_OS.key?($os)
|
if ! SUPPORTED_OS.key?($os)
|
||||||
puts "Unsupported OS: #{$os}"
|
puts "Unsupported OS: #{$os}"
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ timeout = 300
|
|||||||
stdout_callback = default
|
stdout_callback = default
|
||||||
display_skipped_hosts = no
|
display_skipped_hosts = no
|
||||||
library = ./library
|
library = ./library
|
||||||
callbacks_enabled = profile_tasks,ara_default
|
callbacks_enabled = profile_tasks
|
||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||||
|
|||||||
9
contrib/collection.sh
Executable file
9
contrib/collection.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -eux
|
||||||
|
# Install collection from source assuming dependencies are present.
|
||||||
|
# Run in SemaphoreUI this bash script can install Kubespray from the repo
|
||||||
|
NAMESPACE=kubernetes_sigs
|
||||||
|
COLLECTION=kubespray
|
||||||
|
MY_VER=$(grep '^version:' galaxy.yml|cut -d: -f2|sed 's/ //')
|
||||||
|
|
||||||
|
ansible-galaxy collection build --force --output-path .
|
||||||
|
ansible-galaxy collection install --offline --force $NAMESPACE-$COLLECTION-$MY_VER.tar.gz
|
||||||
@@ -31,7 +31,7 @@ manage-offline-container-images.sh register
|
|||||||
|
|
||||||
## generate_list.sh
|
## generate_list.sh
|
||||||
|
|
||||||
This script generates the list of downloaded files and the list of container images by `roles/kubespray-defaults/defaults/main/download.yml` file.
|
This script generates the list of downloaded files and the list of container images by `roles/kubespray_defaults/defaults/main/download.yml` file.
|
||||||
|
|
||||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
|||||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||||
|
|
||||||
: ${DOWNLOAD_YML:="roles/kubespray-defaults/defaults/main/download.yml"}
|
: ${DOWNLOAD_YML:="roles/kubespray_defaults/defaults/main/download.yml"}
|
||||||
|
|
||||||
mkdir -p ${TEMP_DIR}
|
mkdir -p ${TEMP_DIR}
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
|||||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||||
|
|
||||||
# add kube-* images to images list template
|
# add kube-* images to images list template
|
||||||
# Those container images are downloaded by kubeadm, then roles/kubespray-defaults/defaults/main/download.yml
|
# Those container images are downloaded by kubeadm, then roles/kubespray_defaults/defaults/main/download.yml
|
||||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||||
# list separately.
|
# list separately.
|
||||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
roles:
|
roles:
|
||||||
# Just load default variables from roles.
|
# Just load default variables from roles.
|
||||||
- role: kubespray-defaults
|
- role: kubespray_defaults
|
||||||
when: false
|
when: false
|
||||||
- role: download
|
- role: download
|
||||||
when: false
|
when: false
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ function create_container_image_tar() {
|
|||||||
|
|
||||||
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
||||||
# NOTE: etcd and pause cannot be seen as pods.
|
# NOTE: etcd and pause cannot be seen as pods.
|
||||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
|
||||||
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
||||||
else
|
else
|
||||||
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
||||||
@@ -36,7 +35,7 @@ function create_container_image_tar() {
|
|||||||
mkdir ${IMAGE_DIR}
|
mkdir ${IMAGE_DIR}
|
||||||
cd ${IMAGE_DIR}
|
cd ${IMAGE_DIR}
|
||||||
|
|
||||||
sudo ${runtime} pull registry:latest
|
sudo --preserve-env=http_proxy,https_proxy,no_proxy ${runtime} pull registry:latest
|
||||||
sudo ${runtime} save -o registry-latest.tar registry:latest
|
sudo ${runtime} save -o registry-latest.tar registry:latest
|
||||||
|
|
||||||
while read -r image
|
while read -r image
|
||||||
@@ -45,7 +44,7 @@ function create_container_image_tar() {
|
|||||||
set +e
|
set +e
|
||||||
for step in $(seq 1 ${RETRY_COUNT})
|
for step in $(seq 1 ${RETRY_COUNT})
|
||||||
do
|
do
|
||||||
sudo ${runtime} pull ${image}
|
sudo --preserve-env=http_proxy,https_proxy,no_proxy ${runtime} pull ${image}
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
@@ -127,7 +126,7 @@ function register_container_images() {
|
|||||||
|
|
||||||
tar -zxvf ${IMAGE_TAR_FILE}
|
tar -zxvf ${IMAGE_TAR_FILE}
|
||||||
|
|
||||||
if [ "${create_registry}" ]; then
|
if ${create_registry}; then
|
||||||
sudo ${runtime} load -i ${IMAGE_DIR}/registry-latest.tar
|
sudo ${runtime} load -i ${IMAGE_DIR}/registry-latest.tar
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
@@ -148,7 +147,7 @@ function register_container_images() {
|
|||||||
if [ "${org_image}" == "ID:" ]; then
|
if [ "${org_image}" == "ID:" ]; then
|
||||||
org_image=$(echo "${load_image}" | awk '{print $4}')
|
org_image=$(echo "${load_image}" | awk '{print $4}')
|
||||||
fi
|
fi
|
||||||
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
image_id=$(sudo ${runtime} image inspect --format "{{.Id}}" "${org_image}")
|
||||||
if [ -z "${file_name}" ]; then
|
if [ -z "${file_name}" ]; then
|
||||||
echo "Failed to get file_name for line ${line}"
|
echo "Failed to get file_name for line ${line}"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ fi
|
|||||||
|
|
||||||
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
sudo "${runtime}" run \
|
sudo --preserve-env=http_proxy,https_proxy,no_proxy "${runtime}" run \
|
||||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
--volume "${OFFLINE_FILES_DIR}":/usr/share/nginx/html/download \
|
--volume "${OFFLINE_FILES_DIR}":/usr/share/nginx/html/download \
|
||||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.12.0"
|
required_version = ">= 0.12.0"
|
||||||
|
required_providers {
|
||||||
|
aws = {
|
||||||
|
source = "hashicorp/aws"
|
||||||
|
version = "~> 5.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
|
|||||||
@@ -1,246 +0,0 @@
|
|||||||
# Kubernetes on Equinix Metal with Terraform
|
|
||||||
|
|
||||||
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
|
||||||
[Equinix Metal](https://metal.equinix.com) ([formerly Packet](https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/)).
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
This will install a Kubernetes cluster on Equinix Metal. It should work in all locations and on most server types.
|
|
||||||
|
|
||||||
## Approach
|
|
||||||
|
|
||||||
The terraform configuration inspects variables found in
|
|
||||||
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
|
|
||||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
|
||||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../../cluster.yml)
|
|
||||||
to actually install Kubernetes with Kubespray.
|
|
||||||
|
|
||||||
### Kubernetes Nodes
|
|
||||||
|
|
||||||
You can create many different kubernetes topologies by setting the number of
|
|
||||||
different classes of hosts.
|
|
||||||
|
|
||||||
- Master nodes with etcd: `number_of_k8s_masters` variable
|
|
||||||
- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable
|
|
||||||
- Standalone etcd hosts: `number_of_etcd` variable
|
|
||||||
- Kubernetes worker nodes: `number_of_k8s_nodes` variable
|
|
||||||
|
|
||||||
Note that the Ansible script will report an invalid configuration if you wind up
|
|
||||||
with an *even number* of etcd instances since that is not a valid configuration. This
|
|
||||||
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
|
||||||
master nodes with etcd replicas. As an example, if you have three master nodes with
|
|
||||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
|
||||||
now six total etcd replicas.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
|
||||||
- [Install Ansible dependencies](/docs/ansible/ansible.md#installing-ansible)
|
|
||||||
- Account with Equinix Metal
|
|
||||||
- An SSH key pair
|
|
||||||
|
|
||||||
## SSH Key Setup
|
|
||||||
|
|
||||||
An SSH keypair is required so Ansible can access the newly provisioned nodes (Equinix Metal hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Equinix Metal (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.
|
|
||||||
|
|
||||||
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
ssh-keygen -f ~/.ssh/id_rsa
|
|
||||||
```
|
|
||||||
|
|
||||||
## Terraform
|
|
||||||
|
|
||||||
Terraform will be used to provision all of the Equinix Metal resources with base software as appropriate.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
#### Inventory files
|
|
||||||
|
|
||||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
cp -LRp contrib/terraform/equinix/sample-inventory inventory/$CLUSTER
|
|
||||||
cd inventory/$CLUSTER
|
|
||||||
ln -s ../../contrib/terraform/equinix/hosts
|
|
||||||
```
|
|
||||||
|
|
||||||
This will be the base for subsequent Terraform commands.
|
|
||||||
|
|
||||||
#### Equinix Metal API access
|
|
||||||
|
|
||||||
Your Equinix Metal API key must be available in the `METAL_AUTH_TOKEN` environment variable.
|
|
||||||
This key is typically stored outside of the code repo since it is considered secret.
|
|
||||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
|
||||||
|
|
||||||
For more information on how to generate an API key or find your project ID, please see
|
|
||||||
[Accounts Index](https://metal.equinix.com/developers/docs/accounts/).
|
|
||||||
|
|
||||||
The Equinix Metal Project ID associated with the key will be set later in `cluster.tfvars`.
|
|
||||||
|
|
||||||
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
|
|
||||||
|
|
||||||
For more information about terraform provider authentication, please see [the equinix provider documentation](https://registry.terraform.io/providers/equinix/equinix/latest/docs).
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
export METAL_AUTH_TOKEN="Example-API-Token"
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
|
||||||
|
|
||||||
#### Cluster variables
|
|
||||||
|
|
||||||
The construction of the cluster is driven by values found in
|
|
||||||
[variables.tf](variables.tf).
|
|
||||||
|
|
||||||
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|
||||||
|
|
||||||
The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
|
|
||||||
This helps when identifying which hosts are associated with each cluster.
|
|
||||||
|
|
||||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
|
||||||
|
|
||||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
|
||||||
- equinix_metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
|
||||||
|
|
||||||
#### Enable localhost access
|
|
||||||
|
|
||||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
|
||||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
|
||||||
|
|
||||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
|
|
||||||
`\# kubeconfig_localhost: false`
|
|
||||||
becomes:
|
|
||||||
`kubeconfig_localhost: true`
|
|
||||||
|
|
||||||
Once the Kubespray playbooks are run, a Kubernetes configuration file will be written to the local host at `inventory/$CLUSTER/artifacts/admin.conf`
|
|
||||||
|
|
||||||
#### Terraform state files
|
|
||||||
|
|
||||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
|
||||||
or manually), to prevent you from pushing them accidentally they are in a
|
|
||||||
`.gitignore` file in the `contrib/terraform/equinix` directory :
|
|
||||||
|
|
||||||
- `.terraform`
|
|
||||||
- `.tfvars`
|
|
||||||
- `.tfstate`
|
|
||||||
- `.tfstate.backup`
|
|
||||||
- `.lock.hcl`
|
|
||||||
|
|
||||||
You can still add them manually if you want to.
|
|
||||||
|
|
||||||
### Initialization
|
|
||||||
|
|
||||||
Before Terraform can operate on your cluster you need to install the required
|
|
||||||
plugins. This is accomplished as follows:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
cd inventory/$CLUSTER
|
|
||||||
terraform -chdir=../../contrib/terraform/metal init -var-file=cluster.tfvars
|
|
||||||
```
|
|
||||||
|
|
||||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
|
||||||
|
|
||||||
### Provisioning cluster
|
|
||||||
|
|
||||||
You can apply the Terraform configuration to your cluster with the following command
|
|
||||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
terraform -chdir=../../contrib/terraform/equinix apply -var-file=cluster.tfvars
|
|
||||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
|
||||||
ansible-playbook -i hosts ../../cluster.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Destroying cluster
|
|
||||||
|
|
||||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
terraform -chdir=../../contrib/terraform/equinix destroy -var-file=cluster.tfvars
|
|
||||||
```
|
|
||||||
|
|
||||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
|
||||||
|
|
||||||
- Remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
|
||||||
- Clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
|
||||||
|
|
||||||
### Debugging
|
|
||||||
|
|
||||||
You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command.
|
|
||||||
|
|
||||||
## Ansible
|
|
||||||
|
|
||||||
### Node access
|
|
||||||
|
|
||||||
#### SSH
|
|
||||||
|
|
||||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
|
||||||
step is required by the terraform provisioner:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
eval $(ssh-agent -s)
|
|
||||||
ssh-add ~/.ssh/id_rsa
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
|
||||||
|
|
||||||
#### Test access
|
|
||||||
|
|
||||||
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
|
||||||
example-k8s_node-1 | SUCCESS => {
|
|
||||||
"changed": false,
|
|
||||||
"ping": "pong"
|
|
||||||
}
|
|
||||||
example-etcd-1 | SUCCESS => {
|
|
||||||
"changed": false,
|
|
||||||
"ping": "pong"
|
|
||||||
}
|
|
||||||
example-k8s-master-1 | SUCCESS => {
|
|
||||||
"changed": false,
|
|
||||||
"ping": "pong"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
|
|
||||||
|
|
||||||
### Deploy Kubernetes
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
This will take some time as there are many tasks to run.
|
|
||||||
|
|
||||||
## Kubernetes
|
|
||||||
|
|
||||||
### Set up kubectl
|
|
||||||
|
|
||||||
- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
|
||||||
- Verify that Kubectl runs correctly
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
kubectl version
|
|
||||||
```
|
|
||||||
|
|
||||||
- Verify that the Kubernetes configuration file has been copied over
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
cat inventory/alpha/$CLUSTER/admin.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
- Verify that all the nodes are running correctly.
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
kubectl version
|
|
||||||
kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes
|
|
||||||
```
|
|
||||||
|
|
||||||
## What's next
|
|
||||||
|
|
||||||
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../terraform.py
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
resource "equinix_metal_ssh_key" "k8s" {
|
|
||||||
count = var.public_key_path != "" ? 1 : 0
|
|
||||||
name = "kubernetes-${var.cluster_name}"
|
|
||||||
public_key = chomp(file(var.public_key_path))
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "equinix_metal_device" "k8s_master" {
|
|
||||||
depends_on = [equinix_metal_ssh_key.k8s]
|
|
||||||
|
|
||||||
count = var.number_of_k8s_masters
|
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
|
||||||
plan = var.plan_k8s_masters
|
|
||||||
metro = var.metro
|
|
||||||
operating_system = var.operating_system
|
|
||||||
billing_cycle = var.billing_cycle
|
|
||||||
project_id = var.equinix_metal_project_id
|
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "equinix_metal_device" "k8s_master_no_etcd" {
|
|
||||||
depends_on = [equinix_metal_ssh_key.k8s]
|
|
||||||
|
|
||||||
count = var.number_of_k8s_masters_no_etcd
|
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
|
||||||
plan = var.plan_k8s_masters_no_etcd
|
|
||||||
metro = var.metro
|
|
||||||
operating_system = var.operating_system
|
|
||||||
billing_cycle = var.billing_cycle
|
|
||||||
project_id = var.equinix_metal_project_id
|
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "equinix_metal_device" "k8s_etcd" {
|
|
||||||
depends_on = [equinix_metal_ssh_key.k8s]
|
|
||||||
|
|
||||||
count = var.number_of_etcd
|
|
||||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
|
||||||
plan = var.plan_etcd
|
|
||||||
metro = var.metro
|
|
||||||
operating_system = var.operating_system
|
|
||||||
billing_cycle = var.billing_cycle
|
|
||||||
project_id = var.equinix_metal_project_id
|
|
||||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "equinix_metal_device" "k8s_node" {
|
|
||||||
depends_on = [equinix_metal_ssh_key.k8s]
|
|
||||||
|
|
||||||
count = var.number_of_k8s_nodes
|
|
||||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
|
||||||
plan = var.plan_k8s_nodes
|
|
||||||
metro = var.metro
|
|
||||||
operating_system = var.operating_system
|
|
||||||
billing_cycle = var.billing_cycle
|
|
||||||
project_id = var.equinix_metal_project_id
|
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
output "k8s_masters" {
|
|
||||||
value = equinix_metal_device.k8s_master.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_masters_no_etc" {
|
|
||||||
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_etcds" {
|
|
||||||
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_nodes" {
|
|
||||||
value = equinix_metal_device.k8s_node.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">= 1.0.0"
|
|
||||||
|
|
||||||
provider_meta "equinix" {
|
|
||||||
module_name = "kubespray"
|
|
||||||
}
|
|
||||||
required_providers {
|
|
||||||
equinix = {
|
|
||||||
source = "equinix/equinix"
|
|
||||||
version = "1.24.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configure the Equinix Metal Provider
|
|
||||||
provider "equinix" {
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# your Kubernetes cluster name here
|
|
||||||
cluster_name = "mycluster"
|
|
||||||
|
|
||||||
# Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/
|
|
||||||
equinix_metal_project_id = "Example-Project-Id"
|
|
||||||
|
|
||||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
|
||||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
|
||||||
# Terraform will complain if the public key is setup in Equinix Metal
|
|
||||||
public_key_path = "~/.ssh/id_rsa.pub"
|
|
||||||
|
|
||||||
# Equinix interconnected bare metal across our global metros.
|
|
||||||
metro = "da"
|
|
||||||
|
|
||||||
# operating_system
|
|
||||||
operating_system = "ubuntu_22_04"
|
|
||||||
|
|
||||||
# standalone etcds
|
|
||||||
number_of_etcd = 0
|
|
||||||
|
|
||||||
plan_etcd = "t1.small.x86"
|
|
||||||
|
|
||||||
# masters
|
|
||||||
number_of_k8s_masters = 1
|
|
||||||
|
|
||||||
number_of_k8s_masters_no_etcd = 0
|
|
||||||
|
|
||||||
plan_k8s_masters = "t1.small.x86"
|
|
||||||
|
|
||||||
plan_k8s_masters_no_etcd = "t1.small.x86"
|
|
||||||
|
|
||||||
# nodes
|
|
||||||
number_of_k8s_nodes = 2
|
|
||||||
|
|
||||||
plan_k8s_nodes = "t1.small.x86"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../../../../inventory/sample/group_vars
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
variable "cluster_name" {
|
|
||||||
default = "kubespray"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "equinix_metal_project_id" {
|
|
||||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "operating_system" {
|
|
||||||
default = "ubuntu_22_04"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "public_key_path" {
|
|
||||||
description = "The path of the ssh pub key"
|
|
||||||
default = "~/.ssh/id_rsa.pub"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "billing_cycle" {
|
|
||||||
default = "hourly"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "metro" {
|
|
||||||
default = "da"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "plan_k8s_masters" {
|
|
||||||
default = "c3.small.x86"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "plan_k8s_masters_no_etcd" {
|
|
||||||
default = "c3.small.x86"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "plan_etcd" {
|
|
||||||
default = "c3.small.x86"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "plan_k8s_nodes" {
|
|
||||||
default = "c3.medium.x86"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "number_of_k8s_masters" {
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "number_of_k8s_masters_no_etcd" {
|
|
||||||
default = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "number_of_etcd" {
|
|
||||||
default = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "number_of_k8s_nodes" {
|
|
||||||
default = 1
|
|
||||||
}
|
|
||||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
|||||||
*.tfstate*
|
|
||||||
.terraform.lock.hcl
|
|
||||||
.terraform
|
|
||||||
|
|
||||||
sample-inventory/inventory.ini
|
|
||||||
@@ -1,138 +0,0 @@
|
|||||||
# Kubernetes on NIFCLOUD with Terraform
|
|
||||||
|
|
||||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The setup looks like following
|
|
||||||
|
|
||||||
```text
|
|
||||||
Kubernetes cluster
|
|
||||||
+----------------------------+
|
|
||||||
+---------------+ | +--------------------+ |
|
|
||||||
| | | | +--------------------+ |
|
|
||||||
| API server LB +---------> | | | |
|
|
||||||
| | | | | Control Plane/etcd | |
|
|
||||||
+---------------+ | | | node(s) | |
|
|
||||||
| +-+ | |
|
|
||||||
| +--------------------+ |
|
|
||||||
| ^ |
|
|
||||||
| | |
|
|
||||||
| v |
|
|
||||||
| +--------------------+ |
|
|
||||||
| | +--------------------+ |
|
|
||||||
| | | | |
|
|
||||||
| | | Worker | |
|
|
||||||
| | | node(s) | |
|
|
||||||
| +-+ | |
|
|
||||||
| +--------------------+ |
|
|
||||||
+----------------------------+
|
|
||||||
```
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
* Terraform 1.3.7
|
|
||||||
|
|
||||||
## Quickstart
|
|
||||||
|
|
||||||
### Export Variables
|
|
||||||
|
|
||||||
* Your NIFCLOUD credentials:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
|
||||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
* The SSH KEY used to connect to the instance:
|
|
||||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
|
||||||
```
|
|
||||||
|
|
||||||
* The IP address to connect to bastion server:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create The Infrastructure
|
|
||||||
|
|
||||||
* Run terraform:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
terraform init
|
|
||||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
|
||||||
```
|
|
||||||
|
|
||||||
### Setup The Kubernetes
|
|
||||||
|
|
||||||
* Generate cluster configuration file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
|
||||||
```
|
|
||||||
|
|
||||||
* Export Variables:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
|
||||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
|
||||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
|
||||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
|
||||||
```
|
|
||||||
|
|
||||||
* Set ssh-agent"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
eval `ssh-agent`
|
|
||||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run cluster.yml playbook:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd ./../../../
|
|
||||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Connecting to Kubernetes
|
|
||||||
|
|
||||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
|
||||||
* Fetching kubeconfig file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir -p ~/.kube
|
|
||||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
|
||||||
```
|
|
||||||
|
|
||||||
* Rewrite /etc/hosts
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run kubectl
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl get node
|
|
||||||
```
|
|
||||||
|
|
||||||
## Variables
|
|
||||||
|
|
||||||
* `region`: Region where to run the cluster
|
|
||||||
* `az`: Availability zone where to run the cluster
|
|
||||||
* `private_ip_bn`: Private ip address of bastion server
|
|
||||||
* `private_network_cidr`: Subnet of private network
|
|
||||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
|
||||||
* `private_ip`: private ip address of machine
|
|
||||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
|
||||||
* `private_ip`: private ip address of machine
|
|
||||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
|
||||||
* `instance_type_bn`: The instance type of bastion server
|
|
||||||
* `instance_type_wk`: The instance type of worker node
|
|
||||||
* `instance_type_cp`: The instance type of control plane
|
|
||||||
* `image_name`: OS image used for the instance
|
|
||||||
* `working_instance_ip`: The IP address to connect to bastion server
|
|
||||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
#
|
|
||||||
# Generates a inventory file based on the terraform output.
|
|
||||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
|
||||||
# Default state file is terraform.tfstate
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
TF_OUT=$(terraform output -json)
|
|
||||||
|
|
||||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
|
||||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
|
||||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
|
||||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
|
||||||
|
|
||||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
|
||||||
|
|
||||||
echo "[all]"
|
|
||||||
# Generate control plane hosts
|
|
||||||
i=1
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
|
||||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
|
||||||
i=$(( i + 1 ))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Generate worker hosts
|
|
||||||
for name in "${WORKER_NAMES[@]}"; do
|
|
||||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
|
||||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
|
||||||
done
|
|
||||||
|
|
||||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[all:vars]"
|
|
||||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
|
||||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
|
||||||
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[kube_control_plane]"
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[etcd]"
|
|
||||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[kube_node]"
|
|
||||||
for name in "${WORKER_NAMES[@]}"; do
|
|
||||||
echo "${name}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "[k8s_cluster:children]"
|
|
||||||
echo "kube_control_plane"
|
|
||||||
echo "kube_node"
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
provider "nifcloud" {
|
|
||||||
region = var.region
|
|
||||||
}
|
|
||||||
|
|
||||||
module "kubernetes_cluster" {
|
|
||||||
source = "./modules/kubernetes-cluster"
|
|
||||||
|
|
||||||
availability_zone = var.az
|
|
||||||
prefix = "dev"
|
|
||||||
|
|
||||||
private_network_cidr = var.private_network_cidr
|
|
||||||
|
|
||||||
instance_key_name = var.instance_key_name
|
|
||||||
instances_cp = var.instances_cp
|
|
||||||
instances_wk = var.instances_wk
|
|
||||||
image_name = var.image_name
|
|
||||||
|
|
||||||
instance_type_bn = var.instance_type_bn
|
|
||||||
instance_type_cp = var.instance_type_cp
|
|
||||||
instance_type_wk = var.instance_type_wk
|
|
||||||
|
|
||||||
private_ip_bn = var.private_ip_bn
|
|
||||||
|
|
||||||
additional_lb_filter = [var.working_instance_ip]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
|
||||||
security_group_names = [
|
|
||||||
module.kubernetes_cluster.security_group_name.bastion
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = 22
|
|
||||||
to_port = 22
|
|
||||||
protocol = "TCP"
|
|
||||||
cidr_ip = var.working_instance_ip
|
|
||||||
}
|
|
||||||
@@ -1,301 +0,0 @@
|
|||||||
#################################################
|
|
||||||
##
|
|
||||||
## Local variables
|
|
||||||
##
|
|
||||||
locals {
|
|
||||||
# e.g. east-11 is 11
|
|
||||||
az_num = reverse(split("-", var.availability_zone))[0]
|
|
||||||
# e.g. east-11 is e11
|
|
||||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
|
||||||
|
|
||||||
# Port used by the protocol
|
|
||||||
port_ssh = 22
|
|
||||||
port_kubectl = 6443
|
|
||||||
port_kubelet = 10250
|
|
||||||
|
|
||||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
|
||||||
port_bgp = 179
|
|
||||||
port_vxlan = 4789
|
|
||||||
port_etcd = 2379
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## General
|
|
||||||
##
|
|
||||||
|
|
||||||
# data
|
|
||||||
data "nifcloud_image" "this" {
|
|
||||||
image_name = var.image_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# private lan
|
|
||||||
resource "nifcloud_private_lan" "this" {
|
|
||||||
private_lan_name = "${var.prefix}lan"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
cidr_block = var.private_network_cidr
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Bastion
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "bn" {
|
|
||||||
group_name = "${var.prefix}bn"
|
|
||||||
description = "${var.prefix} bastion"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "bn" {
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
|
||||||
security_group = nifcloud_security_group.bn.group_name
|
|
||||||
instance_type = var.instance_type_bn
|
|
||||||
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = var.private_ip_bn
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Control Plane
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "cp" {
|
|
||||||
group_name = "${var.prefix}cp"
|
|
||||||
description = "${var.prefix} control plane"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "cp" {
|
|
||||||
for_each = var.instances_cp
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
security_group = nifcloud_security_group.cp.group_name
|
|
||||||
instance_type = var.instance_type_cp
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = each.value.private_ip
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_load_balancer" "this" {
|
|
||||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
balancing_type = 1 // Round-Robin
|
|
||||||
load_balancer_port = local.port_kubectl
|
|
||||||
instance_port = local.port_kubectl
|
|
||||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
|
||||||
filter = concat(
|
|
||||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
|
||||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
|
||||||
var.additional_lb_filter,
|
|
||||||
)
|
|
||||||
filter_type = 1 // Allow
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Worker
|
|
||||||
##
|
|
||||||
resource "nifcloud_security_group" "wk" {
|
|
||||||
group_name = "${var.prefix}wk"
|
|
||||||
description = "${var.prefix} worker"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_instance" "wk" {
|
|
||||||
for_each = var.instances_wk
|
|
||||||
|
|
||||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
security_group = nifcloud_security_group.wk.group_name
|
|
||||||
instance_type = var.instance_type_wk
|
|
||||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
|
||||||
private_ip_address = each.value.private_ip
|
|
||||||
ssh_port = local.port_ssh
|
|
||||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
|
||||||
})
|
|
||||||
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
accounting_type = var.accounting_type
|
|
||||||
image_id = data.nifcloud_image.this.image_id
|
|
||||||
key_name = var.instance_key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_id = "net-COMMON_GLOBAL"
|
|
||||||
}
|
|
||||||
network_interface {
|
|
||||||
network_id = nifcloud_private_lan.this.network_id
|
|
||||||
ip_address = "static"
|
|
||||||
}
|
|
||||||
|
|
||||||
# The image_id changes when the OS image type is demoted from standard to public.
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [
|
|
||||||
image_id,
|
|
||||||
user_data,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Security Group Rule: Kubernetes
|
|
||||||
##
|
|
||||||
|
|
||||||
# ssh
|
|
||||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_ssh
|
|
||||||
to_port = local.port_ssh
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# kubectl
|
|
||||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubectl
|
|
||||||
to_port = local.port_kubectl
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# kubelet
|
|
||||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubelet
|
|
||||||
to_port = local.port_kubelet
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_kubelet
|
|
||||||
to_port = local.port_kubelet
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Security Group Rule: calico
|
|
||||||
##
|
|
||||||
|
|
||||||
# vslan
|
|
||||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_vxlan
|
|
||||||
to_port = local.port_vxlan
|
|
||||||
protocol = "UDP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_vxlan
|
|
||||||
to_port = local.port_vxlan
|
|
||||||
protocol = "UDP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# bgp
|
|
||||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.wk.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_bgp
|
|
||||||
to_port = local.port_bgp
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_bgp
|
|
||||||
to_port = local.port_bgp
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
|
|
||||||
# etcd
|
|
||||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
|
||||||
security_group_names = [
|
|
||||||
nifcloud_security_group.cp.group_name,
|
|
||||||
]
|
|
||||||
type = "IN"
|
|
||||||
from_port = local.port_etcd
|
|
||||||
to_port = local.port_etcd
|
|
||||||
protocol = "TCP"
|
|
||||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
output "control_plane_lb" {
|
|
||||||
description = "The DNS name of LB for control plane"
|
|
||||||
value = nifcloud_load_balancer.this.dns_name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "security_group_name" {
|
|
||||||
description = "The security group used in the cluster"
|
|
||||||
value = {
|
|
||||||
bastion = nifcloud_security_group.bn.group_name,
|
|
||||||
control_plane = nifcloud_security_group.cp.group_name,
|
|
||||||
worker = nifcloud_security_group.wk.group_name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "private_network_id" {
|
|
||||||
description = "The private network used in the cluster"
|
|
||||||
value = nifcloud_private_lan.this.id
|
|
||||||
}
|
|
||||||
|
|
||||||
output "bastion_info" {
|
|
||||||
description = "The basion information in cluster"
|
|
||||||
value = { (nifcloud_instance.bn.instance_id) : {
|
|
||||||
instance_id = nifcloud_instance.bn.instance_id,
|
|
||||||
unique_id = nifcloud_instance.bn.unique_id,
|
|
||||||
private_ip = nifcloud_instance.bn.private_ip,
|
|
||||||
public_ip = nifcloud_instance.bn.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
|
|
||||||
output "worker_info" {
|
|
||||||
description = "The worker information in cluster"
|
|
||||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
|
||||||
instance_id = v.instance_id,
|
|
||||||
unique_id = v.unique_id,
|
|
||||||
private_ip = v.private_ip,
|
|
||||||
public_ip = v.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
|
|
||||||
output "control_plane_info" {
|
|
||||||
description = "The control plane information in cluster"
|
|
||||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
|
||||||
instance_id = v.instance_id,
|
|
||||||
unique_id = v.unique_id,
|
|
||||||
private_ip = v.private_ip,
|
|
||||||
public_ip = v.public_ip,
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## IP Address
|
|
||||||
##
|
|
||||||
configure_private_ip_address () {
|
|
||||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
|
||||||
network:
|
|
||||||
version: 2
|
|
||||||
renderer: networkd
|
|
||||||
ethernets:
|
|
||||||
ens192:
|
|
||||||
dhcp4: yes
|
|
||||||
dhcp6: yes
|
|
||||||
dhcp-identifier: mac
|
|
||||||
ens224:
|
|
||||||
dhcp4: no
|
|
||||||
dhcp6: no
|
|
||||||
addresses: [${private_ip_address}]
|
|
||||||
EOS
|
|
||||||
netplan apply
|
|
||||||
}
|
|
||||||
configure_private_ip_address
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## SSH
|
|
||||||
##
|
|
||||||
configure_ssh_port () {
|
|
||||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
|
||||||
}
|
|
||||||
configure_ssh_port
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Hostname
|
|
||||||
##
|
|
||||||
hostnamectl set-hostname ${hostname}
|
|
||||||
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
|
||||||
##
|
|
||||||
systemctl mask "dev-sda3.swap"
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">=1.3.7"
|
|
||||||
required_providers {
|
|
||||||
nifcloud = {
|
|
||||||
source = "nifcloud/nifcloud"
|
|
||||||
version = ">= 1.8.0, < 2.0.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
variable "availability_zone" {
|
|
||||||
description = "The availability zone"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "prefix" {
|
|
||||||
description = "The prefix for the entire cluster"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = length(var.prefix) <= 5
|
|
||||||
error_message = "Must be a less than 5 character long."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_network_cidr" {
|
|
||||||
description = "The subnet of private network"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = can(cidrnetmask(var.private_network_cidr))
|
|
||||||
error_message = "Must be a valid IPv4 CIDR block address."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_ip_bn" {
|
|
||||||
description = "Private IP of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_cp" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_wk" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_key_name" {
|
|
||||||
description = "The key name of the Key Pair to use for the instance"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_bn" {
|
|
||||||
description = "The instance type of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_wk" {
|
|
||||||
description = "The instance type of worker"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_cp" {
|
|
||||||
description = "The instance type of control plane"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image_name" {
|
|
||||||
description = "The name of image"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "additional_lb_filter" {
|
|
||||||
description = "Additional LB filter"
|
|
||||||
type = list(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accounting_type" {
|
|
||||||
type = string
|
|
||||||
default = "1"
|
|
||||||
validation {
|
|
||||||
condition = anytrue([
|
|
||||||
var.accounting_type == "1", // Monthly
|
|
||||||
var.accounting_type == "2", // Pay per use
|
|
||||||
])
|
|
||||||
error_message = "Must be a 1 or 2."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
output "kubernetes_cluster" {
|
|
||||||
value = module.kubernetes_cluster
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
region = "jp-west-1"
|
|
||||||
az = "west-11"
|
|
||||||
|
|
||||||
instance_key_name = "deployerkey"
|
|
||||||
|
|
||||||
instance_type_bn = "e-medium"
|
|
||||||
instance_type_cp = "e-medium"
|
|
||||||
instance_type_wk = "e-medium"
|
|
||||||
|
|
||||||
private_network_cidr = "192.168.30.0/24"
|
|
||||||
instances_cp = {
|
|
||||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
|
||||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
|
||||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
|
||||||
}
|
|
||||||
instances_wk = {
|
|
||||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
|
||||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
|
||||||
}
|
|
||||||
private_ip_bn = "192.168.30.10/24"
|
|
||||||
|
|
||||||
image_name = "Ubuntu Server 22.04 LTS"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../../../../inventory/sample/group_vars
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">=1.3.7"
|
|
||||||
required_providers {
|
|
||||||
nifcloud = {
|
|
||||||
source = "nifcloud/nifcloud"
|
|
||||||
version = "1.8.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
variable "region" {
|
|
||||||
description = "The region"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "az" {
|
|
||||||
description = "The availability zone"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_ip_bn" {
|
|
||||||
description = "Private IP of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "private_network_cidr" {
|
|
||||||
description = "The subnet of private network"
|
|
||||||
type = string
|
|
||||||
validation {
|
|
||||||
condition = can(cidrnetmask(var.private_network_cidr))
|
|
||||||
error_message = "Must be a valid IPv4 CIDR block address."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_cp" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instances_wk" {
|
|
||||||
type = map(object({
|
|
||||||
private_ip = string
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_key_name" {
|
|
||||||
description = "The key name of the Key Pair to use for the instance"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_bn" {
|
|
||||||
description = "The instance type of bastion server"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_wk" {
|
|
||||||
description = "The instance type of worker"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "instance_type_cp" {
|
|
||||||
description = "The instance type of control plane"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image_name" {
|
|
||||||
description = "The name of image"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "working_instance_ip" {
|
|
||||||
description = "The IP address to connect to bastion server."
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "accounting_type" {
|
|
||||||
type = string
|
|
||||||
default = "2"
|
|
||||||
validation {
|
|
||||||
condition = anytrue([
|
|
||||||
var.accounting_type == "1", // Monthly
|
|
||||||
var.accounting_type == "2", // Pay per use
|
|
||||||
])
|
|
||||||
error_message = "Must be a 1 or 2."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -281,9 +281,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default |
|
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}, { "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default |
|
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, `[{ "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||||
@@ -624,7 +624,7 @@ Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
|||||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_controllers/openstack.md) to allow service and pod subnets
|
- **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_controllers/openstack.md) to allow service and pod subnets
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
# Choose network plugin (calico, weave or flannel)
|
# Choose network plugin (calico or flannel)
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: flannel
|
kube_network_plugin: flannel
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -271,7 +271,14 @@ variable "master_allowed_ports" {
|
|||||||
variable "master_allowed_ports_ipv6" {
|
variable "master_allowed_ports_ipv6" {
|
||||||
type = list(any)
|
type = list(any)
|
||||||
|
|
||||||
default = []
|
default = [
|
||||||
|
{
|
||||||
|
"protocol" = "ipv6-icmp"
|
||||||
|
"port_range_min" = 0
|
||||||
|
"port_range_max" = 0
|
||||||
|
"remote_ip_prefix" = "::/0"
|
||||||
|
},
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "worker_allowed_ports" {
|
variable "worker_allowed_ports" {
|
||||||
@@ -297,6 +304,12 @@ variable "worker_allowed_ports_ipv6" {
|
|||||||
"port_range_max" = 32767
|
"port_range_max" = 32767
|
||||||
"remote_ip_prefix" = "::/0"
|
"remote_ip_prefix" = "::/0"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"protocol" = "ipv6-icmp"
|
||||||
|
"port_range_min" = 0
|
||||||
|
"port_range_max" = 0
|
||||||
|
"remote_ip_prefix" = "::/0"
|
||||||
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>5.9.0"
|
version = "~>5.29.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
upcloud = {
|
upcloud = {
|
||||||
source = "UpCloudLtd/upcloud"
|
source = "UpCloudLtd/upcloud"
|
||||||
version = "~>5.9.0"
|
version = "~>5.29.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
required_version = ">= 0.13"
|
required_version = ">= 0.13"
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ calico_group_id=rr1
|
|||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
`global_as_num` is set to `65400`:
|
`global_as_num` is set to `65400`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Optional : Define default endpoint to host action
|
### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,13 @@
|
|||||||
# Cilium
|
# Cilium
|
||||||
|
|
||||||
|
## Unprivileged agent configuration
|
||||||
|
|
||||||
|
By default, Cilium is installed with `securityContext.privileged: false`. You need to set the `kube_owner` variable to `root` in the inventory:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
kube_owner: root
|
||||||
|
```
|
||||||
|
|
||||||
## IP Address Management (IPAM)
|
## IP Address Management (IPAM)
|
||||||
|
|
||||||
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
||||||
@@ -54,6 +62,10 @@ cilium_loadbalancer_ip_pools:
|
|||||||
- name: "blue-pool"
|
- name: "blue-pool"
|
||||||
cidrs:
|
cidrs:
|
||||||
- "10.0.10.0/24"
|
- "10.0.10.0/24"
|
||||||
|
ranges:
|
||||||
|
- start: "20.0.20.100"
|
||||||
|
stop: "20.0.20.200"
|
||||||
|
- start: "1.2.3.4"
|
||||||
```
|
```
|
||||||
|
|
||||||
For further information, check [LB IPAM documentation](https://docs.cilium.io/en/stable/network/lb-ipam/)
|
For further information, check [LB IPAM documentation](https://docs.cilium.io/en/stable/network/lb-ipam/)
|
||||||
@@ -233,7 +245,7 @@ cilium_operator_extra_volume_mounts:
|
|||||||
## Choose Cilium version
|
## Choose Cilium version
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
cilium_version: "1.15.9"
|
cilium_version: "1.18.6"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add variable to config
|
## Add variable to config
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
|||||||
|
|
||||||
* Disable nodelocaldns
|
* Disable nodelocaldns
|
||||||
|
|
||||||
The nodelocal dns IP is not reacheable.
|
The nodelocal dns IP is not reachable.
|
||||||
|
|
||||||
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
|
|||||||
@@ -1,79 +0,0 @@
|
|||||||
# Weave
|
|
||||||
|
|
||||||
Weave 2.0.1 is supported by kubespray
|
|
||||||
|
|
||||||
Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode.
|
|
||||||
|
|
||||||
`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster
|
|
||||||
|
|
||||||
Weave encryption is supported for all communication
|
|
||||||
|
|
||||||
* To use Weave encryption, specify a strong password (if no password, no encryption)
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
# In file ./inventory/sample/group_vars/k8s_cluster.yml
|
|
||||||
weave_password: EnterPasswordHere
|
|
||||||
```
|
|
||||||
|
|
||||||
This password is used to set an environment variable inside weave container.
|
|
||||||
|
|
||||||
Weave is deployed by kubespray using a daemonSet
|
|
||||||
|
|
||||||
* Check the status of Weave containers
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
# From client
|
|
||||||
kubectl -n kube-system get pods | grep weave
|
|
||||||
# output
|
|
||||||
weave-net-50wd2 2/2 Running 0 2m
|
|
||||||
weave-net-js9rb 2/2 Running 0 2m
|
|
||||||
```
|
|
||||||
|
|
||||||
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
|
|
||||||
|
|
||||||
* Check status of weave (connection,encryption ...) for each node
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
# On nodes
|
|
||||||
curl http://127.0.0.1:6784/status
|
|
||||||
# output on node1
|
|
||||||
Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
|
|
||||||
|
|
||||||
Service: router
|
|
||||||
Protocol: weave 1..2
|
|
||||||
Name: fa:16:3e:b3:d6:b2(node1)
|
|
||||||
Encryption: enabled
|
|
||||||
PeerDiscovery: enabled
|
|
||||||
Targets: 2
|
|
||||||
Connections: 2 (1 established, 1 failed)
|
|
||||||
Peers: 2 (with 2 established connections)
|
|
||||||
TrustedSubnets: none
|
|
||||||
|
|
||||||
Service: ipam
|
|
||||||
Status: ready
|
|
||||||
Range: 10.233.64.0/18
|
|
||||||
DefaultSubnet: 10.233.64.0/18
|
|
||||||
```
|
|
||||||
|
|
||||||
* Check parameters of weave for each node
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
# On nodes
|
|
||||||
ps -aux | grep weaver
|
|
||||||
# output on node1 (here its use seed mode)
|
|
||||||
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
|
|
||||||
```
|
|
||||||
|
|
||||||
## Consensus mode (default mode)
|
|
||||||
|
|
||||||
This mode is best to use on static size cluster
|
|
||||||
|
|
||||||
### Seed mode
|
|
||||||
|
|
||||||
This mode is best to use on dynamic size cluster
|
|
||||||
|
|
||||||
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployment.
|
|
||||||
|
|
||||||
* Switch from consensus mode to seed/Observation mode
|
|
||||||
|
|
||||||
See [weave ipam documentation](https://www.weave.works/docs/net/latest/tasks/ipam/ipam/) and use `weave_extra_args` to enable.
|
|
||||||
@@ -65,11 +65,10 @@ In kubespray, the default runtime name is "runc", and it can be configured with
|
|||||||
containerd_runc_runtime:
|
containerd_runc_runtime:
|
||||||
name: runc
|
name: runc
|
||||||
type: "io.containerd.runc.v2"
|
type: "io.containerd.runc.v2"
|
||||||
engine: ""
|
|
||||||
root: ""
|
|
||||||
options:
|
options:
|
||||||
systemdCgroup: "false"
|
Root: ""
|
||||||
binaryName: /usr/local/bin/my-runc
|
SystemdCgroup: "false"
|
||||||
|
BinaryName: /usr/local/bin/my-runc
|
||||||
base_runtime_spec: cri-base.json
|
base_runtime_spec: cri-base.json
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -149,3 +148,11 @@ following configuration:
|
|||||||
```yaml
|
```yaml
|
||||||
nri_enabled: true
|
nri_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Optional : Static Binary
|
||||||
|
|
||||||
|
To ensure compatibility with older distributions (such as Debian 11), you can use a static containerd binary. By default, this is static binary if the system's glibc version is less than 2.34; otherwise, it is the default binary.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containerd_static_binary: true
|
||||||
|
```
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to
|
|||||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||||
|
|
||||||
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
||||||
Defaults capabilties are:
|
Defaults capabilities are:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
crio_default_capabilities:
|
crio_default_capabilities:
|
||||||
|
|||||||
3
docs/_sidebar.md
generated
3
docs/_sidebar.md
generated
@@ -6,7 +6,6 @@
|
|||||||
* [Downloads](/docs/advanced/downloads.md)
|
* [Downloads](/docs/advanced/downloads.md)
|
||||||
* [Gcp-lb](/docs/advanced/gcp-lb.md)
|
* [Gcp-lb](/docs/advanced/gcp-lb.md)
|
||||||
* [Kubernetes-reliability](/docs/advanced/kubernetes-reliability.md)
|
* [Kubernetes-reliability](/docs/advanced/kubernetes-reliability.md)
|
||||||
* [Mitogen](/docs/advanced/mitogen.md)
|
|
||||||
* [Netcheck](/docs/advanced/netcheck.md)
|
* [Netcheck](/docs/advanced/netcheck.md)
|
||||||
* [Ntp](/docs/advanced/ntp.md)
|
* [Ntp](/docs/advanced/ntp.md)
|
||||||
* [Proxy](/docs/advanced/proxy.md)
|
* [Proxy](/docs/advanced/proxy.md)
|
||||||
@@ -23,7 +22,6 @@
|
|||||||
* [Aws](/docs/cloud_providers/aws.md)
|
* [Aws](/docs/cloud_providers/aws.md)
|
||||||
* [Azure](/docs/cloud_providers/azure.md)
|
* [Azure](/docs/cloud_providers/azure.md)
|
||||||
* [Cloud](/docs/cloud_providers/cloud.md)
|
* [Cloud](/docs/cloud_providers/cloud.md)
|
||||||
* [Equinix-metal](/docs/cloud_providers/equinix-metal.md)
|
|
||||||
* CNI
|
* CNI
|
||||||
* [Calico](/docs/CNI/calico.md)
|
* [Calico](/docs/CNI/calico.md)
|
||||||
* [Cilium](/docs/CNI/cilium.md)
|
* [Cilium](/docs/CNI/cilium.md)
|
||||||
@@ -33,7 +31,6 @@
|
|||||||
* [Kube-router](/docs/CNI/kube-router.md)
|
* [Kube-router](/docs/CNI/kube-router.md)
|
||||||
* [Macvlan](/docs/CNI/macvlan.md)
|
* [Macvlan](/docs/CNI/macvlan.md)
|
||||||
* [Multus](/docs/CNI/multus.md)
|
* [Multus](/docs/CNI/multus.md)
|
||||||
* [Weave](/docs/CNI/weave.md)
|
|
||||||
* CRI
|
* CRI
|
||||||
* [Containerd](/docs/CRI/containerd.md)
|
* [Containerd](/docs/CRI/containerd.md)
|
||||||
* [Cri-o](/docs/CRI/cri-o.md)
|
* [Cri-o](/docs/CRI/cri-o.md)
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ The following table shows the impact of the CPU architecture on compatible featu
|
|||||||
| kube_network_plugin | amd64 | arm64 | amd64 + arm64 |
|
| kube_network_plugin | amd64 | arm64 | amd64 + arm64 |
|
||||||
|---------------------|-------|-------|---------------|
|
|---------------------|-------|-------|---------------|
|
||||||
| Calico | Y | Y | Y |
|
| Calico | Y | Y | Y |
|
||||||
| Weave | Y | Y | Y |
|
|
||||||
| Flannel | Y | N | N |
|
| Flannel | Y | N | N |
|
||||||
| Canal | Y | N | N |
|
| Canal | Y | N | N |
|
||||||
| Cilium | Y | Y | N |
|
| Cilium | Y | Y | N |
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key)
|
- [Create New TLS Root CA Certificate and Key](#create-new-tls-root-ca-certificate-and-key)
|
||||||
- [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit)
|
- [Install Cloudflare PKI/TLS `cfssl` Toolkit.](#install-cloudflare-pkitls-cfssl-toolkit)
|
||||||
- [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file)
|
- [Create Root Certificate Authority (CA) Configuration File](#create-root-certificate-authority-ca-configuration-file)
|
||||||
- [Create Certficate Signing Request (CSR) Configuration File](#create-certficate-signing-request-csr-configuration-file)
|
- [Create Certificate Signing Request (CSR) Configuration File](#create-certificate-signing-request-csr-configuration-file)
|
||||||
- [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key)
|
- [Create TLS Root CA Certificate and Key](#create-tls-root-ca-certificate-and-key)
|
||||||
|
|
||||||
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
||||||
@@ -134,7 +134,7 @@ $ cat > ca-config.json <<EOF
|
|||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Create Certficate Signing Request (CSR) Configuration File
|
#### Create Certificate Signing Request (CSR) Configuration File
|
||||||
|
|
||||||
The TLS certificate `names` details can be updated to your own specific requirements.
|
The TLS certificate `names` details can be updated to your own specific requirements.
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# GCP Load Balancers for type=LoadBalacer of Kubernetes Services
|
# GCP Load Balancers for type=LoadBalancer of Kubernetes Services
|
||||||
|
|
||||||
> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider)
|
> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider)
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
# Mitogen
|
|
||||||
|
|
||||||
*Warning:* Mitogen support is now deprecated in kubespray due to upstream not releasing an updated version to support ansible 4.x (ansible-base 2.11.x) and above. The CI support has been stripped for mitogen and we are no longer validating any support or regressions for it. The supporting mitogen install playbook and integration documentation will be removed in a later version.
|
|
||||||
|
|
||||||
[Mitogen for Ansible](https://mitogen.networkgenomics.com/ansible_detailed.html) allow a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, depending on network conditions, modules executed, and time already spent by targets on useful work. Mitogen cannot improve a module once it is executing, it can only ensure the module executes as quickly as possible.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
ansible-playbook contrib/mitogen/mitogen.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
The above playbook sets the ansible `strategy` and `strategy_plugins` in `ansible.cfg` but you can also enable them if you use your own `ansible.cfg` by setting the environment varialbles:
|
|
||||||
|
|
||||||
```ShellSession
|
|
||||||
export ANSIBLE_STRATEGY=mitogen_linear
|
|
||||||
export ANSIBLE_STRATEGY_PLUGINS=plugins/mitogen/ansible_mitogen/plugins/strategy
|
|
||||||
```
|
|
||||||
|
|
||||||
... or `ansible.cfg` setup:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[defaults]
|
|
||||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
|
||||||
strategy=mitogen_linear
|
|
||||||
```
|
|
||||||
|
|
||||||
## Limitation
|
|
||||||
|
|
||||||
If you are experiencing problems, please see the [documentation](https://mitogen.networkgenomics.com/ansible_detailed.html#noteworthy-differences).
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# Setting up Environment Proxy
|
# Setting up Environment Proxy
|
||||||
|
|
||||||
If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray-defaults/tasks/no_proxy.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy.
|
If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray_defaults/tasks/no_proxy.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy.
|
||||||
|
|
||||||
## Set proxy for http and https
|
## Set proxy for http and https
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ KUBESPRAYDIR=kubespray
|
|||||||
python3 -m venv $VENVDIR
|
python3 -m venv $VENVDIR
|
||||||
source $VENVDIR/bin/activate
|
source $VENVDIR/bin/activate
|
||||||
cd $KUBESPRAYDIR
|
cd $KUBESPRAYDIR
|
||||||
pip install -U -r requirements.txt
|
pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you have a similar message when installing the requirements:
|
In case you have a similar message when installing the requirements:
|
||||||
@@ -32,7 +32,7 @@ Based on the table below and the available python version for your ansible host
|
|||||||
|
|
||||||
| Ansible Version | Python Version |
|
| Ansible Version | Python Version |
|
||||||
|-----------------|----------------|
|
|-----------------|----------------|
|
||||||
| >= 2.16.4 | 3.10-3.12 |
|
| >= 2.17.3 | 3.10-3.12 |
|
||||||
|
|
||||||
## Customize Ansible vars
|
## Customize Ansible vars
|
||||||
|
|
||||||
@@ -42,13 +42,10 @@ Kubespray expects users to use one of the following variables sources for settin
|
|||||||
|----------------------------------------|------------------------------------------------------------------------------|
|
|----------------------------------------|------------------------------------------------------------------------------|
|
||||||
| inventory vars | |
|
| inventory vars | |
|
||||||
| - **inventory group_vars** | most used |
|
| - **inventory group_vars** | most used |
|
||||||
| - inventory host_vars | host specifc vars overrides, group_vars is usually more practical |
|
| - inventory host_vars | host specific vars overrides, group_vars is usually more practical |
|
||||||
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
||||||
|
|
||||||
[!IMPORTANT]
|
> Extra vars are best used to override kubespray internal variables, for instances, roles/vars/. Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||||
Extra vars are best used to override kubespray internal variables, for instances, roles/vars/.
|
|
||||||
Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray
|
|
||||||
interface. Thus they can change, disappear, or break stuff unexpectedly.
|
|
||||||
|
|
||||||
## Ansible tags
|
## Ansible tags
|
||||||
|
|
||||||
@@ -62,7 +59,7 @@ The following tags are defined in playbooks:
|
|||||||
| aws-ebs-csi-driver | Configuring csi driver: aws-ebs |
|
| aws-ebs-csi-driver | Configuring csi driver: aws-ebs |
|
||||||
| azure-csi-driver | Configuring csi driver: azure |
|
| azure-csi-driver | Configuring csi driver: azure |
|
||||||
| bastion | Setup ssh config for bastion |
|
| bastion | Setup ssh config for bastion |
|
||||||
| bootstrap-os | Anything related to host OS configuration |
|
| bootstrap_os | Anything related to host OS configuration |
|
||||||
| calico | Network plugin Calico |
|
| calico | Network plugin Calico |
|
||||||
| calico_rr | Configuring Calico route reflector |
|
| calico_rr | Configuring Calico route reflector |
|
||||||
| cert-manager | Configuring certificate manager for K8s |
|
| cert-manager | Configuring certificate manager for K8s |
|
||||||
@@ -118,12 +115,11 @@ The following tags are defined in playbooks:
|
|||||||
| local-path-provisioner | Configure External provisioner: local-path |
|
| local-path-provisioner | Configure External provisioner: local-path |
|
||||||
| local-volume-provisioner | Configure External provisioner: local-volume |
|
| local-volume-provisioner | Configure External provisioner: local-volume |
|
||||||
| macvlan | Network plugin macvlan |
|
| macvlan | Network plugin macvlan |
|
||||||
| master (DEPRECATED) | Deprecated - see `control-plane` |
|
|
||||||
| metallb | Installing and configuring metallb |
|
| metallb | Installing and configuring metallb |
|
||||||
| metrics_server | Configuring metrics_server |
|
| metrics_server | Configuring metrics_server |
|
||||||
| netchecker | Installing netchecker K8s app |
|
| netchecker | Installing netchecker K8s app |
|
||||||
| network | Configuring networking plugins for K8s |
|
| network | Configuring networking plugins for K8s |
|
||||||
| mounts | Umount kubelet dirs when reseting |
|
| mounts | Umount kubelet dirs when resetting |
|
||||||
| multus | Network plugin multus |
|
| multus | Network plugin multus |
|
||||||
| nginx | Configuring LB for kube-apiserver instances |
|
| nginx | Configuring LB for kube-apiserver instances |
|
||||||
| node | Configuring K8s minion (compute) node role |
|
| node | Configuring K8s minion (compute) node role |
|
||||||
@@ -153,21 +149,16 @@ The following tags are defined in playbooks:
|
|||||||
| upgrade | Upgrading, f.e. container images/binaries |
|
| upgrade | Upgrading, f.e. container images/binaries |
|
||||||
| upload | Distributing images/binaries across hosts |
|
| upload | Distributing images/binaries across hosts |
|
||||||
| vsphere-csi-driver | Configuring csi driver: vsphere |
|
| vsphere-csi-driver | Configuring csi driver: vsphere |
|
||||||
| weave | Network plugin Weave |
|
|
||||||
| win_nodes | Running windows specific tasks |
|
| win_nodes | Running windows specific tasks |
|
||||||
| youki | Configuring youki runtime |
|
| youki | Configuring youki runtime |
|
||||||
|
|
||||||
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
|
||||||
tags found in the codebase. New tags will be listed with the empty "Used for"
|
|
||||||
field.
|
|
||||||
|
|
||||||
## Example commands
|
## Example commands
|
||||||
|
|
||||||
Example command to filter and apply only DNS configuration tasks and skip
|
Example command to filter and apply only DNS configuration tasks and skip
|
||||||
everything else related to host OS configuration and downloading images of containers:
|
everything else related to host OS configuration and downloading images of containers:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap_os
|
||||||
```
|
```
|
||||||
|
|
||||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||||
@@ -187,17 +178,13 @@ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
|||||||
|
|
||||||
Note: use `--tags` and `--skip-tags` wisely and only if you're 100% sure what you're doing.
|
Note: use `--tags` and `--skip-tags` wisely and only if you're 100% sure what you're doing.
|
||||||
|
|
||||||
## Mitogen
|
|
||||||
|
|
||||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/advanced/mitogen.md) for usage and reasons for deprecation.
|
|
||||||
|
|
||||||
## Troubleshooting Ansible issues
|
## Troubleshooting Ansible issues
|
||||||
|
|
||||||
Having the wrong version of ansible, ansible collections or python dependencies can cause issue.
|
Having the wrong version of ansible, ansible collections or python dependencies can cause issue.
|
||||||
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRAY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
In particular, Kubespray ship custom modules which Ansible needs to find, for which you should specify [ANSIBLE_LIBRARY](https://docs.ansible.com/ansible/latest/dev_guide/developing_locally.html#adding-a-module-or-plugin-outside-of-a-collection)
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
export ANSIBLE_LIBRAY=<kubespray_dir>/library`
|
export ANSIBLE_LIBRARY=<kubespray_dir>/library`
|
||||||
```
|
```
|
||||||
|
|
||||||
A simple way to ensure you get all the correct version of Ansible is to use
|
A simple way to ensure you get all the correct version of Ansible is to use
|
||||||
@@ -206,11 +193,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
|||||||
to access the inventory and SSH key in the container, like this:
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.27.0
|
git checkout v2.30.0
|
||||||
docker pull quay.io/kubespray/kubespray:v2.27.0
|
docker pull quay.io/kubespray/kubespray:v2.30.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -2,14 +2,13 @@
|
|||||||
|
|
||||||
Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html).
|
Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html).
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- An inventory file with the appropriate host groups. See the [README](../README.md#usage).
|
|
||||||
- A `group_vars` directory. These group variables **need** to match the appropriate variable names under `inventory/local/group_vars`. See the [README](../README.md#usage).
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
1. Add Kubespray to your requirements.yml file
|
1. Set up an inventory with the appropriate host groups and required group vars.
|
||||||
|
See also the documentation on [kubespray inventories](./inventory.md) and the
|
||||||
|
general ["Getting started" documentation](../getting_started/getting-started.md#building-your-own-inventory).
|
||||||
|
|
||||||
|
2. Add Kubespray to your requirements.yml file
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
collections:
|
collections:
|
||||||
@@ -18,20 +17,20 @@ Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/a
|
|||||||
version: master # use the appropriate tag or branch for the version you need
|
version: master # use the appropriate tag or branch for the version you need
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install your collection
|
3. Install your collection
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
ansible-galaxy install -r requirements.yml
|
ansible-galaxy install -r requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Create a playbook to install your Kubernetes cluster
|
4. Create a playbook to install your Kubernetes cluster
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- name: Install Kubernetes
|
- name: Install Kubernetes
|
||||||
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray
|
5. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK
|
ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK
|
||||||
|
|||||||
@@ -103,13 +103,13 @@ following default cluster parameters:
|
|||||||
|
|
||||||
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
||||||
|
|
||||||
* *kube_service_subnets* - All service subnets separated by commas (default is a mix of ``kube_service_addresses`` and ``kube_service_addresses_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
|
* *kube_service_subnets* - All service subnets separated by commas (default is a mix of ``kube_service_addresses`` and ``kube_service_addresses_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stack`` options),
|
||||||
for example ``10.233.0.0/18,fd85:ee78:d8a6:8607::1000/116`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
for example ``10.233.0.0/18,fd85:ee78:d8a6:8607::1000/116`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
||||||
It is not recommended to change this variable directly.
|
It is not recommended to change this variable directly.
|
||||||
|
|
||||||
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
|
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
|
||||||
|
|
||||||
* *kube_pods_subnets* - All pods subnets separated by commas (default is a mix of ``kube_pods_subnet`` and ``kube_pod_subnet_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
|
* *kube_pods_subnets* - All pods subnets separated by commas (default is a mix of ``kube_pods_subnet`` and ``kube_pod_subnet_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stack`` options),
|
||||||
for example ``10.233.64.0/18,fd85:ee78:d8a6:8607::1:0000/112`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
for example ``10.233.64.0/18,fd85:ee78:d8a6:8607::1:0000/112`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
||||||
It is not recommended to change this variable directly.
|
It is not recommended to change this variable directly.
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
|
|||||||
|
|
||||||
IPv4 stack enable by *ipv4_stack* is set to ``true``, by default.
|
IPv4 stack enable by *ipv4_stack* is set to ``true``, by default.
|
||||||
IPv6 stack enable by *ipv6_stack* is set to ``false`` by default.
|
IPv6 stack enable by *ipv6_stack* is set to ``false`` by default.
|
||||||
This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
|
This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray_defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
|
||||||
Set both variables to ``true`` for Dual Stack mode.
|
Set both variables to ``true`` for Dual Stack mode.
|
||||||
IPv4 has higher priority in Dual Stack mode(e.g. in variables `main_ip`, `main_access_ip` and other).
|
IPv4 has higher priority in Dual Stack mode(e.g. in variables `main_ip`, `main_access_ip` and other).
|
||||||
You can also make IPv6 only clusters with ``false`` in *ipv4_stack*.
|
You can also make IPv6 only clusters with ``false`` in *ipv4_stack*.
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ The cloud provider is configured to have Octavia by default in Kubespray.
|
|||||||
external_openstack_lbaas_method: ROUND_ROBIN
|
external_openstack_lbaas_method: ROUND_ROBIN
|
||||||
external_openstack_lbaas_provider: amphora
|
external_openstack_lbaas_provider: amphora
|
||||||
external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
||||||
|
external_openstack_lbaas_member_subnet_id: "Neutron subnet ID on which to create the members of the load balancer"
|
||||||
external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
|
external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
|
||||||
external_openstack_lbaas_manage_security_groups: false
|
external_openstack_lbaas_manage_security_groups: false
|
||||||
external_openstack_lbaas_create_monitor: false
|
external_openstack_lbaas_create_monitor: false
|
||||||
|
|||||||
@@ -1,100 +0,0 @@
|
|||||||
# Equinix Metal
|
|
||||||
|
|
||||||
Kubespray provides support for bare metal deployments using the [Equinix Metal](http://metal.equinix.com).
|
|
||||||
Deploying upon bare metal allows Kubernetes to run at locations where an existing public or private cloud might not exist such
|
|
||||||
as cell tower, edge collocated installations. The deployment mechanism used by Kubespray for Equinix Metal is similar to that used for
|
|
||||||
AWS and OpenStack clouds (notably using Terraform to deploy the infrastructure). Terraform uses the Equinix Metal provider plugin
|
|
||||||
to provision and configure hosts which are then used by the Kubespray Ansible playbooks. The Ansible inventory is generated
|
|
||||||
dynamically from the Terraform state file.
|
|
||||||
|
|
||||||
## Local Host Configuration
|
|
||||||
|
|
||||||
To perform this installation, you will need a localhost to run Terraform/Ansible (laptop, VM, etc) and an account with Equinix Metal.
|
|
||||||
In this example, we are provisioning a m1.large CentOS7 OpenStack VM as the localhost for the Kubernetes installation.
|
|
||||||
You'll need Ansible, Git, and PIP.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo yum install epel-release
|
|
||||||
sudo yum install ansible
|
|
||||||
sudo yum install git
|
|
||||||
sudo yum install python-pip
|
|
||||||
```
|
|
||||||
|
|
||||||
## Playbook SSH Key
|
|
||||||
|
|
||||||
An SSH key is needed by Kubespray/Ansible to run the playbooks.
|
|
||||||
This key is installed into the bare metal hosts during the Terraform deployment.
|
|
||||||
You can generate a key new key or use an existing one.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ssh-keygen -f ~/.ssh/id_rsa
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install Terraform
|
|
||||||
|
|
||||||
Terraform is required to deploy the bare metal infrastructure. The steps below are for installing on CentOS 7.
|
|
||||||
[More terraform installation options are available.](https://learn.hashicorp.com/terraform/getting-started/install.html)
|
|
||||||
|
|
||||||
Grab the latest version of Terraform and install it.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_linux_amd64.zip"
|
|
||||||
sudo yum install unzip
|
|
||||||
sudo unzip terraform_0.14.10_linux_amd64.zip -d /usr/local/bin/
|
|
||||||
```
|
|
||||||
|
|
||||||
## Download Kubespray
|
|
||||||
|
|
||||||
Pull over Kubespray and setup any required libraries.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/kubernetes-sigs/kubespray
|
|
||||||
cd kubespray
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install Ansible
|
|
||||||
|
|
||||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
|
||||||
|
|
||||||
## Cluster Definition
|
|
||||||
|
|
||||||
In this example, a new cluster called "alpha" will be created.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/alpha
|
|
||||||
cd inventory/alpha/
|
|
||||||
ln -s ../../contrib/terraform/packet/hosts
|
|
||||||
```
|
|
||||||
|
|
||||||
Details about the cluster, such as the name, as well as the authentication tokens and project ID
|
|
||||||
for Equinix Metal need to be defined. To find these values see [Equinix Metal API Accounts](https://metal.equinix.com/developers/docs/accounts/).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
vi cluster.tfvars
|
|
||||||
```
|
|
||||||
|
|
||||||
* cluster_name = alpha
|
|
||||||
* packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456
|
|
||||||
* public_key_path = 12345678-90AB-CDEF-GHIJ-KLMNOPQRSTUV
|
|
||||||
|
|
||||||
## Deploy Bare Metal Hosts
|
|
||||||
|
|
||||||
Initializing Terraform will pull down any necessary plugins/providers.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
terraform init ../../contrib/terraform/packet/
|
|
||||||
```
|
|
||||||
|
|
||||||
Run Terraform to deploy the hardware.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run Kubespray Playbooks
|
|
||||||
|
|
||||||
With the bare metal infrastructure deployed, Kubespray can now install Kubernetes and setup the cluster.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ansible-playbook --become -i inventory/alpha/hosts cluster.yml
|
|
||||||
```
|
|
||||||
@@ -2,19 +2,14 @@
|
|||||||
|
|
||||||
## Pipeline
|
## Pipeline
|
||||||
|
|
||||||
1. build: build a docker image to be used in the pipeline
|
See [.gitlab-ci.yml](/.gitlab-ci.yml) and the included files for an overview.
|
||||||
2. unit-tests: fast jobs for fast feedback (linting, etc...)
|
|
||||||
3. deploy-part1: small number of jobs to test if the PR works with default settings
|
|
||||||
4. deploy-extended: slow jobs testing different platforms, OS, settings, CNI, etc...
|
|
||||||
5. deploy-extended: very slow jobs (upgrades, etc...)
|
|
||||||
|
|
||||||
## Runners
|
## Runners
|
||||||
|
|
||||||
Kubespray has 3 types of GitLab runners:
|
Kubespray has 2 types of GitLab runners, both deployed on the Kubespray CI cluster (hosted on Oracle Cloud Infrastructure):
|
||||||
|
|
||||||
- packet runners: used for E2E jobs (usually long), running on Equinix Metal (ex-packet), on kubevirt managed VMs
|
- pods: use the [gitlab-ci kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes/)
|
||||||
- light runners: used for short lived jobs, running on Equinix Metal (ex-packet), as managed pods
|
- vagrant: custom executor running in pods with access to the libvirt socket on the nodes
|
||||||
- auto scaling runners (managed via docker-machine on Equinix Metal): used for on-demand resources, see [GitLab docs](https://docs.gitlab.com/runner/configuration/autoscale.html) for more info
|
|
||||||
|
|
||||||
## Vagrant
|
## Vagrant
|
||||||
|
|
||||||
@@ -22,18 +17,17 @@ Vagrant jobs are using the [quay.io/kubespray/vagrant](/test-infra/vagrant-docke
|
|||||||
|
|
||||||
## CI Variables
|
## CI Variables
|
||||||
|
|
||||||
In CI we have a set of overrides we use to ensure greater success of our CI jobs and avoid throttling by various APIs we depend on. See:
|
In CI we have a [set of extra vars](/test/common_vars.yml) we use to ensure greater success of our CI jobs and avoid throttling by various APIs we depend on.
|
||||||
|
|
||||||
- [Docker mirrors](/tests/common/_docker_hub_registry_mirror.yml)
|
## CI clusters
|
||||||
- [Test settings](/tests/common/_kubespray_test_settings.yml)
|
|
||||||
|
|
||||||
## CI Environment
|
DISCLAIMER: The following information is not fully up to date, in particular, the CI cluster is now on Oracle Cloud Infrastcture, not Equinix.
|
||||||
|
|
||||||
The CI packet and light runners are deployed on a kubernetes cluster on Equinix Metal. The cluster is deployed with kubespray itself and maintained by the kubespray maintainers.
|
The cluster is deployed with kubespray itself and maintained by the kubespray maintainers.
|
||||||
|
|
||||||
The following files are used for that inventory:
|
The following files are used for that inventory:
|
||||||
|
|
||||||
### cluster.tfvars
|
### cluster.tfvars (OBSOLETE: this section is no longer accurate)
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
# your Kubernetes cluster name here
|
# your Kubernetes cluster name here
|
||||||
@@ -162,22 +156,10 @@ kube_feature_gates:
|
|||||||
- "NodeSwap=True"
|
- "NodeSwap=True"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Aditional files
|
## Additional files
|
||||||
|
|
||||||
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
||||||
|
|
||||||
### /root/nscleanup.sh
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
kubectl=/usr/local/bin/kubectl
|
|
||||||
|
|
||||||
$kubectl get ns | grep -P "(\d.+-\d.+)" | awk 'match($3,/[0-9]+d/) {print $1}' | xargs -r $kubectl delete ns
|
|
||||||
$kubectl get ns | grep -P "(\d.+-\d.+)" | awk 'match($3,/[3-9]+h/) {print $1}' | xargs -r $kubectl delete ns
|
|
||||||
$kubectl get ns | grep Terminating | awk '{print $1}' | xargs -i $kubectl delete vmi/instance-1 vmi/instance-0 vmi/instance-2 -n {} --force --grace-period=0 &
|
|
||||||
```
|
|
||||||
|
|
||||||
### /root/path-calico.sh
|
### /root/path-calico.sh
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -6,55 +6,52 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
|||||||
|
|
||||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
||||||
|---| --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
debian13 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
flatcar4081 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse15 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu24 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
||||||
|
|
||||||
## crio
|
## crio
|
||||||
|
|
||||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
||||||
|---| --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse15 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
## docker
|
## docker
|
||||||
|
|
||||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan |
|
||||||
|---| --- | --- | --- | --- | --- | --- | --- |
|
|---| --- | --- | --- | --- | --- | --- | --- |
|
||||||
almalinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
almalinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian13 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse15 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ The kube-scheduler binary includes a list of plugins:
|
|||||||
- [CapacityScheduling](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/capacityscheduling) [Beta]
|
- [CapacityScheduling](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/capacityscheduling) [Beta]
|
||||||
- [CoScheduling](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/coscheduling) [Beta]
|
- [CoScheduling](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/coscheduling) [Beta]
|
||||||
- [NodeResources](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/noderesources) [Beta]
|
- [NodeResources](https://github.com/kubernetes-sigs/scheduler-plugins/tree/master/pkg/noderesources) [Beta]
|
||||||
- [NodeResouceTopology](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/noderesourcetopology/README.md) [Beta]
|
- [NodeResourceTopology](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/noderesourcetopology/README.md) [Beta]
|
||||||
- [PreemptionToleration](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/preemptiontoleration/README.md) [Alpha]
|
- [PreemptionToleration](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/preemptiontoleration/README.md) [Alpha]
|
||||||
- [Trimaran](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/trimaran/README.md) [Alpha]
|
- [Trimaran](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/trimaran/README.md) [Alpha]
|
||||||
- [NetworkAware](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/networkaware/README.md) [Sample]
|
- [NetworkAware](https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/pkg/networkaware/README.md) [Sample]
|
||||||
|
|||||||
@@ -61,12 +61,12 @@ gcloud compute networks subnets create kubernetes \
|
|||||||
#### Firewall Rules
|
#### Firewall Rules
|
||||||
|
|
||||||
Create a firewall rule that allows internal communication across all protocols.
|
Create a firewall rule that allows internal communication across all protocols.
|
||||||
It is important to note that the vxlan protocol has to be allowed in order for
|
It is important to note that the vxlan (udp) protocol has to be allowed in order for
|
||||||
the calico (see later) networking plugin to work.
|
the calico (see later) networking plugin to work.
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-internal \
|
gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-internal \
|
||||||
--allow tcp,udp,icmp,vxlan \
|
--allow tcp,udp,icmp \
|
||||||
--network kubernetes-the-kubespray-way \
|
--network kubernetes-the-kubespray-way \
|
||||||
--source-ranges 10.240.0.0/24
|
--source-ranges 10.240.0.0/24
|
||||||
```
|
```
|
||||||
@@ -88,7 +88,7 @@ cluster.
|
|||||||
|
|
||||||
### Compute Instances
|
### Compute Instances
|
||||||
|
|
||||||
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04.
|
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 24.04.
|
||||||
Each compute instance will be provisioned with a fixed private IP address and
|
Each compute instance will be provisioned with a fixed private IP address and
|
||||||
a public IP address (that can be fixed - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)).
|
a public IP address (that can be fixed - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)).
|
||||||
Using fixed public IP addresses has the advantage that our cluster node
|
Using fixed public IP addresses has the advantage that our cluster node
|
||||||
@@ -103,7 +103,7 @@ for i in 0 1 2; do
|
|||||||
--async \
|
--async \
|
||||||
--boot-disk-size 200GB \
|
--boot-disk-size 200GB \
|
||||||
--can-ip-forward \
|
--can-ip-forward \
|
||||||
--image-family ubuntu-1804-lts \
|
--image-family ubuntu-2404-lts-amd64 \
|
||||||
--image-project ubuntu-os-cloud \
|
--image-project ubuntu-os-cloud \
|
||||||
--machine-type e2-standard-2 \
|
--machine-type e2-standard-2 \
|
||||||
--private-network-ip 10.240.0.1${i} \
|
--private-network-ip 10.240.0.1${i} \
|
||||||
@@ -124,7 +124,7 @@ for i in 0 1 2; do
|
|||||||
--async \
|
--async \
|
||||||
--boot-disk-size 200GB \
|
--boot-disk-size 200GB \
|
||||||
--can-ip-forward \
|
--can-ip-forward \
|
||||||
--image-family ubuntu-1804-lts \
|
--image-family ubuntu-2404-lts-amd64 \
|
||||||
--image-project ubuntu-os-cloud \
|
--image-project ubuntu-os-cloud \
|
||||||
--machine-type e2-standard-2 \
|
--machine-type e2-standard-2 \
|
||||||
--private-network-ip 10.240.0.2${i} \
|
--private-network-ip 10.240.0.2${i} \
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ kubectl create clusterrolebinding cluster-admin-binding \
|
|||||||
The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version.
|
The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0/deploy/static/provider/cloud/deploy.yaml
|
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.13.3/deploy/static/provider/cloud/deploy.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Provider Specific Steps
|
### Provider Specific Steps
|
||||||
|
|||||||
@@ -21,6 +21,12 @@ metallb_enabled: true
|
|||||||
metallb_speaker_enabled: true
|
metallb_speaker_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
By default, MetalLB resources are deployed into the `metallb-system` namespace. You can override this namespace using a variable.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
metallb_namespace: woodenlb-system
|
||||||
|
```
|
||||||
|
|
||||||
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -35,7 +41,7 @@ metallb_config:
|
|||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fasion:
|
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fashion:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
metallb_config:
|
metallb_config:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# bootstrap-os
|
# bootstrap_os
|
||||||
|
|
||||||
Bootstrap an Ansible host to be able to run Ansible modules.
|
Bootstrap an Ansible host to be able to run Ansible modules.
|
||||||
|
|
||||||
@@ -48,8 +48,8 @@ Remember to disable fact gathering since Python might not be present on hosts.
|
|||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: false # not all hosts might be able to run modules yet
|
gather_facts: false # not all hosts might be able to run modules yet
|
||||||
roles:
|
roles:
|
||||||
- kubespray-defaults
|
- kubespray_defaults
|
||||||
- bootstrap-os
|
- bootstrap_os
|
||||||
```
|
```
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|||||||
@@ -37,4 +37,12 @@ If you have containers that are using iptables in the host network namespace (`h
|
|||||||
you need to ensure they are using iptables-nft.
|
you need to ensure they are using iptables-nft.
|
||||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||||
|
|
||||||
The kernel version is lower than the kubenretes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
The kernel version is lower than the kubernetes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
||||||
|
|
||||||
|
## Rocky Linux 10
|
||||||
|
|
||||||
|
(Experimental in Kubespray CI)
|
||||||
|
|
||||||
|
The official Rocky Linux 10 cloud image does not include `kernel-module-extra`. Both Kube Proxy and CNI rely on this package, and since it relates to kernel version compatibility (which may require VM reboots, etc.), we haven't found an ideal solution.
|
||||||
|
|
||||||
|
However, some users report that it doesn't affect them (minimal version). Therefore, the Kubespray CI Rocky Linux 10 image is built by Kubespray maintainers using `diskimage-builder`. For detailed methods, please refer to [the comments](https://github.com/kubernetes-sigs/kubespray/pull/12355#issuecomment-3705400093).
|
||||||
|
|||||||
@@ -30,11 +30,6 @@ kube_memory_reserved: 256Mi
|
|||||||
kube_cpu_reserved: 100m
|
kube_cpu_reserved: 100m
|
||||||
# kube_ephemeral_storage_reserved: 2Gi
|
# kube_ephemeral_storage_reserved: 2Gi
|
||||||
# kube_pid_reserved: "1000"
|
# kube_pid_reserved: "1000"
|
||||||
# Reservation for master hosts
|
|
||||||
kube_master_memory_reserved: 512Mi
|
|
||||||
kube_master_cpu_reserved: 200m
|
|
||||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
|
||||||
# kube_master_pid_reserved: "1000"
|
|
||||||
|
|
||||||
# Set to true to reserve resources for system daemons
|
# Set to true to reserve resources for system daemons
|
||||||
system_reserved: true
|
system_reserved: true
|
||||||
@@ -44,11 +39,6 @@ system_memory_reserved: 512Mi
|
|||||||
system_cpu_reserved: 500m
|
system_cpu_reserved: 500m
|
||||||
# system_ephemeral_storage_reserved: 2Gi
|
# system_ephemeral_storage_reserved: 2Gi
|
||||||
# system_pid_reserved: "1000"
|
# system_pid_reserved: "1000"
|
||||||
# Reservation for master hosts
|
|
||||||
system_master_memory_reserved: 256Mi
|
|
||||||
system_master_cpu_reserved: 250m
|
|
||||||
# system_master_ephemeral_storage_reserved: 2Gi
|
|
||||||
# system_master_pid_reserved: "1000"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
After the setup, the cgroups hierarchy is as follows:
|
After the setup, the cgroups hierarchy is as follows:
|
||||||
|
|||||||
@@ -18,8 +18,6 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
|
|||||||
|
|
||||||
## kube-apiserver
|
## kube-apiserver
|
||||||
authorization_modes: ['Node', 'RBAC']
|
authorization_modes: ['Node', 'RBAC']
|
||||||
# AppArmor-based OS
|
|
||||||
# kube_apiserver_feature_gates: ['AppArmor=true']
|
|
||||||
kube_apiserver_request_timeout: 120s
|
kube_apiserver_request_timeout: 120s
|
||||||
kube_apiserver_service_account_lookup: true
|
kube_apiserver_service_account_lookup: true
|
||||||
|
|
||||||
@@ -77,17 +75,17 @@ remove_anonymous_access: true
|
|||||||
## kube-controller-manager
|
## kube-controller-manager
|
||||||
kube_controller_manager_bind_address: 127.0.0.1
|
kube_controller_manager_bind_address: 127.0.0.1
|
||||||
kube_controller_terminated_pod_gc_threshold: 50
|
kube_controller_terminated_pod_gc_threshold: 50
|
||||||
# AppArmor-based OS
|
|
||||||
# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
|
|
||||||
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||||
|
|
||||||
## kube-scheduler
|
## kube-scheduler
|
||||||
kube_scheduler_bind_address: 127.0.0.1
|
kube_scheduler_bind_address: 127.0.0.1
|
||||||
# AppArmor-based OS
|
|
||||||
# kube_scheduler_feature_gates: ["AppArmor=true"]
|
|
||||||
|
|
||||||
## etcd
|
## etcd
|
||||||
etcd_deployment_type: kubeadm
|
# Running etcd (on dedicated hosts) outside the Kubernetes cluster is the most secure deployment option,
|
||||||
|
# as it isolates etcd from the cluster's CNI network and removes direct pod-level attack vectors.
|
||||||
|
# This approach prevents RBAC misconfigurations that potentially compromise etcd,
|
||||||
|
# creating an additional security boundary that protects the cluster's critical state store.
|
||||||
|
etcd_deployment_type: host
|
||||||
|
|
||||||
## kubelet
|
## kubelet
|
||||||
kubelet_authorization_mode_webhook: true
|
kubelet_authorization_mode_webhook: true
|
||||||
@@ -102,6 +100,8 @@ kubelet_make_iptables_util_chains: true
|
|||||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||||
kubelet_seccomp_default: true
|
kubelet_seccomp_default: true
|
||||||
kubelet_systemd_hardening: true
|
kubelet_systemd_hardening: true
|
||||||
|
# To disable kubelet's staticPodPath (for nodes that don't use static pods like worker nodes)
|
||||||
|
kubelet_static_pod_path: ""
|
||||||
# In case you have multiple interfaces in your
|
# In case you have multiple interfaces in your
|
||||||
# control plane nodes and you want to specify the right
|
# control plane nodes and you want to specify the right
|
||||||
# IP addresses, kubelet_secure_addresses allows you
|
# IP addresses, kubelet_secure_addresses allows you
|
||||||
@@ -126,9 +126,8 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
|||||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
||||||
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.
|
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.
|
||||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
|
||||||
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||||

|

|
||||||
|
|
||||||
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
Once you have the file properly filled, you can run the **Ansible** command to start the installation:
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ kubeadm_ignore_preflight_errors:
|
|||||||
|
|
||||||
The Kernel Version Matrixs:
|
The Kernel Version Matrixs:
|
||||||
|
|
||||||
| OS Verion | Kernel Verion | Kernel >=4.19 |
|
| OS Version | Kernel Version | Kernel >=4.19 |
|
||||||
|--- | --- | --- |
|
|--- | --- | --- |
|
||||||
| RHEL 9 | 5.14 | :white_check_mark: |
|
| RHEL 9 | 5.14 | :white_check_mark: |
|
||||||
| RHEL 8 | 4.18 | :x: |
|
| RHEL 8 | 4.18 | :x: |
|
||||||
|
|||||||
@@ -2,58 +2,6 @@
|
|||||||
|
|
||||||
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
|
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
|
||||||
|
|
||||||
## Limitation: Removal of first kube_control_plane and etcd-master
|
|
||||||
|
|
||||||
Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to:
|
|
||||||
|
|
||||||
### 1) Change order of current control planes
|
|
||||||
|
|
||||||
Modify the order of your control plane list by pushing your first entry to any other position. E.g. if you want to remove `node-1` of the following example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
children:
|
|
||||||
kube_control_plane:
|
|
||||||
hosts:
|
|
||||||
node-1:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
kube_node:
|
|
||||||
hosts:
|
|
||||||
node-1:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
etcd:
|
|
||||||
hosts:
|
|
||||||
node-1:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
```
|
|
||||||
|
|
||||||
change your inventory to:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
children:
|
|
||||||
kube_control_plane:
|
|
||||||
hosts:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
node-1:
|
|
||||||
kube_node:
|
|
||||||
hosts:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
node-1:
|
|
||||||
etcd:
|
|
||||||
hosts:
|
|
||||||
node-2:
|
|
||||||
node-3:
|
|
||||||
node-1:
|
|
||||||
```
|
|
||||||
|
|
||||||
## 2) Upgrade the cluster
|
|
||||||
|
|
||||||
run `upgrade-cluster.yml` or `cluster.yml`. Now you are good to go on with the removal.
|
|
||||||
|
|
||||||
## Adding/replacing a worker node
|
## Adding/replacing a worker node
|
||||||
|
|
||||||
This should be the easiest.
|
This should be the easiest.
|
||||||
@@ -83,6 +31,8 @@ That's it.
|
|||||||
|
|
||||||
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
||||||
|
|
||||||
|
**Note:** When adding new control plane nodes, always append them to the end of the `kube_control_plane` group in your inventory. Adding control plane nodes in the first position is not supported and will cause the playbook to fail.
|
||||||
|
|
||||||
### 2) Restart kube-system/nginx-proxy
|
### 2) Restart kube-system/nginx-proxy
|
||||||
|
|
||||||
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
||||||
@@ -100,40 +50,74 @@ crictl ps | grep nginx-proxy | awk '{print $1}' | xargs crictl stop
|
|||||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
|
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
|
||||||
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
||||||
|
|
||||||
## Replacing a first control plane node
|
## Adding/Removal of first `kube_control_plane` and etcd-master
|
||||||
|
|
||||||
### 1) Change control plane nodes order in inventory
|
Currently you can't remove the first node in your `kube_control_plane` and etcd-master list. If you still want to remove this node you have to:
|
||||||
|
|
||||||
from
|
### 1) Change order of current control planes
|
||||||
|
|
||||||
```ini
|
Modify the order of your control plane list by pushing your first entry to any other position. E.g. if you want to remove `node-1` of the following example:
|
||||||
[kube_control_plane]
|
|
||||||
node-1
|
```yaml
|
||||||
node-2
|
all:
|
||||||
node-3
|
hosts:
|
||||||
|
children:
|
||||||
|
kube_control_plane:
|
||||||
|
hosts:
|
||||||
|
node-1:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
|
kube_node:
|
||||||
|
hosts:
|
||||||
|
node-1:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
|
etcd:
|
||||||
|
hosts:
|
||||||
|
node-1:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
```
|
```
|
||||||
|
|
||||||
to
|
change your inventory to:
|
||||||
|
|
||||||
```ini
|
```yaml
|
||||||
[kube_control_plane]
|
all:
|
||||||
node-2
|
hosts:
|
||||||
node-3
|
children:
|
||||||
node-1
|
kube_control_plane:
|
||||||
|
hosts:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
|
node-1:
|
||||||
|
kube_node:
|
||||||
|
hosts:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
|
node-1:
|
||||||
|
etcd:
|
||||||
|
hosts:
|
||||||
|
node-2:
|
||||||
|
node-3:
|
||||||
|
node-1:
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2) Remove old first control plane node from cluster
|
### 2) Upgrade the cluster
|
||||||
|
|
||||||
|
run `upgrade-cluster.yml` or `cluster.yml`. Now you are good to go on with the removal.
|
||||||
|
|
||||||
|
### 3) Remove old first control plane node from cluster
|
||||||
|
|
||||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=node-1` to the playbook to limit the execution to the node being removed.
|
||||||
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
If the node you want to remove is not online, you should add `reset_nodes=false` and `allow_ungraceful_removal=true` to your extra-vars.
|
||||||
|
|
||||||
### 3) Edit cluster-info configmap in kube-public namespace
|
### 4) Edit cluster-info configmap in kube-public namespace
|
||||||
|
|
||||||
`kubectl edit cm -n kube-public cluster-info`
|
`kubectl edit cm -n kube-public cluster-info`
|
||||||
|
|
||||||
Change ip of old kube_control_plane node with ip of live kube_control_plane node (`server` field). Also, update `certificate-authority-data` field if you changed certs.
|
Change ip of old kube_control_plane node with ip of live kube_control_plane node (`server` field). Also, update `certificate-authority-data` field if you changed certs.
|
||||||
|
|
||||||
### 4) Add new control plane node
|
### 5) Add new control plane node
|
||||||
|
|
||||||
Update inventory (if needed)
|
Update inventory (if needed)
|
||||||
|
|
||||||
|
|||||||
@@ -75,17 +75,17 @@ quay_image_repo: "{{ registry_host }}"
|
|||||||
github_image_repo: "{{ registry_host }}"
|
github_image_repo: "{{ registry_host }}"
|
||||||
|
|
||||||
local_path_provisioner_helper_image_repo: "{{ registry_host }}/busybox"
|
local_path_provisioner_helper_image_repo: "{{ registry_host }}/busybox"
|
||||||
kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
kubeadm_download_url: "{{ files_repo }}/kubernetes/v{{ kube_version }}/kubeadm"
|
||||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
kubectl_download_url: "{{ files_repo }}/kubernetes/v{{ kube_version }}/kubectl"
|
||||||
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
kubelet_download_url: "{{ files_repo }}/kubernetes/v{{ kube_version }}/kubelet"
|
||||||
# etcd is optional if you **DON'T** use etcd_deployment=host
|
# etcd is optional if you **DON'T** use etcd_deployment=host
|
||||||
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-v{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-v{{ cni_version }}.tgz"
|
||||||
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-v{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
# If using Calico
|
# If using Calico
|
||||||
calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/v{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||||
# If using Calico with kdd
|
# If using Calico with kdd
|
||||||
calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/v{{ calico_version }}.tar.gz"
|
||||||
# Containerd
|
# Containerd
|
||||||
containerd_download_url: "{{ files_repo }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
containerd_download_url: "{{ files_repo }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
||||||
@@ -136,7 +136,7 @@ If you use the settings like the one above, you'll need to define in your invent
|
|||||||
|
|
||||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||||
the ones defined
|
the ones defined
|
||||||
in [kubesprays-defaults's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubespray-defaults/defaults/main/download.yml)
|
in [kubesprays-defaults's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubespray_defaults/defaults/main/download.yml)
|
||||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||||
same repository path, you won't have to override anything else.
|
same repository path, you won't have to override anything else.
|
||||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||||
|
|||||||
@@ -13,9 +13,7 @@ versions. Here are all version vars for each component:
|
|||||||
* etcd_version
|
* etcd_version
|
||||||
* calico_version
|
* calico_version
|
||||||
* calico_cni_version
|
* calico_cni_version
|
||||||
* weave_version
|
|
||||||
* flannel_version
|
* flannel_version
|
||||||
* kubedns_version
|
|
||||||
|
|
||||||
> **Warning**
|
> **Warning**
|
||||||
> [Attempting to upgrade from an older release straight to the latest release is unsupported and likely to break something](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515)
|
> [Attempting to upgrade from an older release straight to the latest release is unsupported and likely to break something](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515)
|
||||||
@@ -84,7 +82,7 @@ If you don't want to upgrade all nodes in one run, you can use `--limit` [patter
|
|||||||
Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes:
|
Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
ansible-playbook facts.yml -b -i inventory/sample/hosts.ini
|
ansible-playbook playbooks/facts.yml -b -i inventory/sample/hosts.ini
|
||||||
```
|
```
|
||||||
|
|
||||||
After this upgrade control plane and etcd groups [#5147](https://github.com/kubernetes-sigs/kubespray/issues/5147):
|
After this upgrade control plane and etcd groups [#5147](https://github.com/kubernetes-sigs/kubespray/issues/5147):
|
||||||
@@ -357,7 +355,7 @@ follows:
|
|||||||
* Containerd
|
* Containerd
|
||||||
* etcd
|
* etcd
|
||||||
* kubelet and kube-proxy
|
* kubelet and kube-proxy
|
||||||
* network_plugin (such as Calico or Weave)
|
* network_plugin (such as Calico)
|
||||||
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||||
* Add-ons (such as KubeDNS)
|
* Add-ons (such as KubeDNS)
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
hosts: kube_control_plane[0]
|
hosts: kube_control_plane[0]
|
||||||
tasks:
|
tasks:
|
||||||
- name: Include kubespray-default variables
|
- name: Include kubespray-default variables
|
||||||
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
|
include_vars: ../roles/kubespray_defaults/defaults/main/main.yml
|
||||||
- name: Copy get_cinder_pvs.sh to first control plane node
|
- name: Copy get_cinder_pvs.sh to first control plane node
|
||||||
copy:
|
copy:
|
||||||
src: get_cinder_pvs.sh
|
src: get_cinder_pvs.sh
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- name: Bootstrap hosts OS for Ansible
|
- name: Bootstrap hosts OS for Ansible
|
||||||
@@ -22,18 +22,18 @@
|
|||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
# Need to disable pipelining for bootstrap_os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
# fail. bootstrap_os fixes this on these systems, so in later plays it can be enabled.
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap_os, tags: bootstrap_os}
|
||||||
|
|
||||||
- name: Preinstall
|
- name: Preinstall
|
||||||
hosts: k8s_cluster:etcd:calico_rr
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
- name: Handle upgrades to control plane components first to maintain backwards compat.
|
- name: Handle upgrades to control plane components first to maintain backwards compat.
|
||||||
@@ -41,7 +41,7 @@
|
|||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: 1
|
serial: 1
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
|
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
|
||||||
@@ -54,8 +54,8 @@
|
|||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray_defaults}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
namespace: kubernetes_sigs
|
namespace: kubernetes_sigs
|
||||||
description: Deploy a production ready Kubernetes cluster
|
description: Deploy a production ready Kubernetes cluster
|
||||||
name: kubespray
|
name: kubespray
|
||||||
version: 2.28.0
|
version: 2.31.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||||
|
|||||||
@@ -38,6 +38,7 @@
|
|||||||
loadSidebar: 'docs/_sidebar.md',
|
loadSidebar: 'docs/_sidebar.md',
|
||||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||||
auto2top: true,
|
auto2top: true,
|
||||||
|
noCompileLinks: ['.*\.ini'],
|
||||||
logo: '/logo/logo-clear.png'
|
logo: '/logo/logo-clear.png'
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
|||||||
# https_proxy: ""
|
# https_proxy: ""
|
||||||
# https_proxy_cert_file: ""
|
# https_proxy_cert_file: ""
|
||||||
|
|
||||||
## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy
|
## Refer to roles/kubespray_defaults/defaults/main/main.yml before modifying no_proxy
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||||
@@ -115,6 +115,9 @@ no_proxy_exclude_workers: false
|
|||||||
# sysctl_file_path to add sysctl conf to
|
# sysctl_file_path to add sysctl conf to
|
||||||
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
||||||
|
|
||||||
|
# ignore sysctl errors about unknown keys
|
||||||
|
# sysctl_ignore_unknown_keys: false
|
||||||
|
|
||||||
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
||||||
kube_webhook_token_auth: false
|
kube_webhook_token_auth: false
|
||||||
kube_webhook_token_auth_url_skip_tls_verify: false
|
kube_webhook_token_auth_url_skip_tls_verify: false
|
||||||
|
|||||||
@@ -11,15 +11,15 @@
|
|||||||
# containerd_runc_runtime:
|
# containerd_runc_runtime:
|
||||||
# name: runc
|
# name: runc
|
||||||
# type: "io.containerd.runc.v2"
|
# type: "io.containerd.runc.v2"
|
||||||
# engine: ""
|
# options:
|
||||||
# root: ""
|
# Root: ""
|
||||||
|
|
||||||
# containerd_additional_runtimes:
|
# containerd_additional_runtimes:
|
||||||
# Example for Kata Containers as additional runtime:
|
# Example for Kata Containers as additional runtime:
|
||||||
# - name: kata
|
# - name: kata
|
||||||
# type: "io.containerd.kata.v2"
|
# type: "io.containerd.kata.v2"
|
||||||
# engine: ""
|
# options:
|
||||||
# root: ""
|
# Root: ""
|
||||||
|
|
||||||
# containerd_grpc_max_recv_message_size: 16777216
|
# containerd_grpc_max_recv_message_size: 16777216
|
||||||
# containerd_grpc_max_send_message_size: 16777216
|
# containerd_grpc_max_send_message_size: 16777216
|
||||||
@@ -50,6 +50,8 @@
|
|||||||
# - host: https://registry-1.docker.io
|
# - host: https://registry-1.docker.io
|
||||||
# capabilities: ["pull", "resolve"]
|
# capabilities: ["pull", "resolve"]
|
||||||
# skip_verify: false
|
# skip_verify: false
|
||||||
|
# header:
|
||||||
|
# Authorization: "Basic XXX"
|
||||||
|
|
||||||
# containerd_max_container_log_line_size: 16384
|
# containerd_max_container_log_line_size: 16384
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,6 @@
|
|||||||
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||||
# oci_use_instance_principals: false
|
# oci_use_instance_principals: false
|
||||||
# oci_cloud_controller_version: 0.6.0
|
|
||||||
## If you would like to control OCI query rate limits for the controller
|
## If you would like to control OCI query rate limits for the controller
|
||||||
# oci_rate_limit:
|
# oci_rate_limit:
|
||||||
# rate_limit_qps_read:
|
# rate_limit_qps_read:
|
||||||
|
|||||||
@@ -18,9 +18,9 @@
|
|||||||
# quay_image_repo: "{{ registry_host }}"
|
# quay_image_repo: "{{ registry_host }}"
|
||||||
|
|
||||||
## Kubernetes components
|
## Kubernetes components
|
||||||
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
||||||
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
||||||
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
||||||
|
|
||||||
|
|
||||||
## Two options - Override entire repository or override only a single binary.
|
## Two options - Override entire repository or override only a single binary.
|
||||||
@@ -33,24 +33,24 @@
|
|||||||
|
|
||||||
## [Optional] 2 - Override a specific binary
|
## [Optional] 2 - Override a specific binary
|
||||||
## CNI Plugins
|
## CNI Plugins
|
||||||
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/v{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-v{{ cni_version }}.tgz"
|
||||||
|
|
||||||
## cri-tools
|
## cri-tools
|
||||||
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/crictl-v{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
## [Optional] etcd: only if you use etcd_deployment=host
|
## [Optional] etcd: only if you use etcd_deployment=host
|
||||||
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/v{{ etcd_version }}/etcd-v{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
# [Optional] Calico: If using Calico network plugin
|
# [Optional] Calico: If using Calico network plugin
|
||||||
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/v{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||||
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/v{{ calico_version }}.tar.gz"
|
||||||
|
|
||||||
# [Optional] Cilium: If using Cilium network plugin
|
# [Optional] Cilium: If using Cilium network plugin
|
||||||
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
|
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/v{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
# [Optional] helm: only if you set helm_enabled: true
|
# [Optional] helm: only if you set helm_enabled: true
|
||||||
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-v{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|
||||||
# [Optional] crun: only if you set crun_enabled: true
|
# [Optional] crun: only if you set crun_enabled: true
|
||||||
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||||
@@ -62,13 +62,13 @@
|
|||||||
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
|
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
|
||||||
|
|
||||||
# [Optional] runc: if you set container_manager to containerd or crio
|
# [Optional] runc: if you set container_manager to containerd or crio
|
||||||
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
|
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ image_arch }}"
|
||||||
|
|
||||||
# [Optional] cri-o: only if you set container_manager: crio
|
# [Optional] cri-o: only if you set container_manager: crio
|
||||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||||
# crio_download_url: "{{ files_repo }}/storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
|
# crio_download_url: "{{ files_repo }}/storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.v{{ crio_version }}.tar.gz"
|
||||||
# skopeo_download_url: "{{ files_repo }}/github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
|
# skopeo_download_url: "{{ files_repo }}/github.com/lework/skopeo-binary/releases/download/v{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
|
||||||
|
|
||||||
# [Optional] containerd: only if you set container_runtime: containerd
|
# [Optional] containerd: only if you set container_runtime: containerd
|
||||||
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
|
||||||
# openstack_blockstorage_ignore_volume_az: yes
|
# openstack_blockstorage_ignore_volume_az: yes
|
||||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||||
# openstack_lbaas_enabled: True
|
# openstack_lbaas_enabled: True
|
||||||
|
|||||||
@@ -7,26 +7,6 @@
|
|||||||
# external_vsphere_datacenter: "DATACENTER_name"
|
# external_vsphere_datacenter: "DATACENTER_name"
|
||||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||||
|
|
||||||
## Vsphere version where located VMs
|
|
||||||
# external_vsphere_version: "6.7u3"
|
|
||||||
|
|
||||||
## Tags for the external vSphere Cloud Provider images
|
|
||||||
## registry.k8s.io/cloud-pv-vsphere/cloud-provider-vsphere
|
|
||||||
# external_vsphere_cloud_controller_image_tag: "v1.31.0"
|
|
||||||
## registry.k8s.io/csi-vsphere/syncer
|
|
||||||
# vsphere_syncer_image_tag: "v3.3.1"
|
|
||||||
## registry.k8s.io/sig-storage/csi-attacher
|
|
||||||
# vsphere_csi_attacher_image_tag: "v3.4.0"
|
|
||||||
## registry.k8s.io/csi-vsphere/driver
|
|
||||||
# vsphere_csi_controller: "v3.3.1"
|
|
||||||
## registry.k8s.io/sig-storage/livenessprobe
|
|
||||||
# vsphere_csi_liveness_probe_image_tag: "v2.6.0"
|
|
||||||
## registry.k8s.io/sig-storage/csi-provisioner
|
|
||||||
# vsphere_csi_provisioner_image_tag: "v3.1.0"
|
|
||||||
## registry.k8s.io/sig-storage/csi-resizer
|
|
||||||
## makes sense only for vSphere version >=7.0
|
|
||||||
# vsphere_csi_resizer_tag: "v1.3.0"
|
|
||||||
|
|
||||||
## To use vSphere CSI plugin to provision volumes set this value to true
|
## To use vSphere CSI plugin to provision volumes set this value to true
|
||||||
# vsphere_csi_enabled: true
|
# vsphere_csi_enabled: true
|
||||||
# vsphere_csi_controller_replicas: 1
|
# vsphere_csi_controller_replicas: 1
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
---
|
|
||||||
## Etcd auto compaction retention for mvcc key value store in hour
|
|
||||||
# etcd_compaction_retention: 0
|
|
||||||
|
|
||||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
|
||||||
# etcd_metrics: basic
|
|
||||||
|
|
||||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
|
||||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
|
||||||
## This value is only relevant when deploying etcd with `etcd_deployment_type: docker`
|
|
||||||
# etcd_memory_limit: "512M"
|
|
||||||
|
|
||||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
|
||||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
|
||||||
## etcd documentation for more information.
|
|
||||||
# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it.
|
|
||||||
# etcd_quota_backend_bytes: "2147483648"
|
|
||||||
|
|
||||||
# Maximum client request size in bytes the server will accept.
|
|
||||||
# etcd is designed to handle small key value pairs typical for metadata.
|
|
||||||
# Larger requests will work, but may increase the latency of other requests
|
|
||||||
# etcd_max_request_bytes: "1572864"
|
|
||||||
|
|
||||||
### ETCD: disable peer client cert authentication.
|
|
||||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
|
||||||
# etcd_peer_client_auth: true
|
|
||||||
|
|
||||||
## Enable distributed tracing
|
|
||||||
## To enable this experimental feature, set the etcd_experimental_enable_distributed_tracing: true, along with the
|
|
||||||
## etcd_experimental_distributed_tracing_sample_rate to choose how many samples to collect per million spans,
|
|
||||||
## the default sampling rate is 0 https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing
|
|
||||||
# etcd_experimental_enable_distributed_tracing: false
|
|
||||||
# etcd_experimental_distributed_tracing_sample_rate: 100
|
|
||||||
# etcd_experimental_distributed_tracing_address: "localhost:4317"
|
|
||||||
# etcd_experimental_distributed_tracing_service_name: etcd
|
|
||||||
|
|
||||||
## The interval for etcd watch progress notify events
|
|
||||||
# etcd_experimental_watch_progress_notify_interval: 5s
|
|
||||||
@@ -29,7 +29,6 @@ local_path_provisioner_enabled: false
|
|||||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||||
# local_path_provisioner_debug: false
|
# local_path_provisioner_debug: false
|
||||||
# local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
|
# local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner"
|
||||||
# local_path_provisioner_image_tag: "v0.0.24"
|
|
||||||
# local_path_provisioner_helper_image_repo: "busybox"
|
# local_path_provisioner_helper_image_repo: "busybox"
|
||||||
# local_path_provisioner_helper_image_tag: "latest"
|
# local_path_provisioner_helper_image_tag: "latest"
|
||||||
|
|
||||||
@@ -67,7 +66,6 @@ local_volume_provisioner_enabled: false
|
|||||||
|
|
||||||
# Gateway API CRDs
|
# Gateway API CRDs
|
||||||
gateway_api_enabled: false
|
gateway_api_enabled: false
|
||||||
# gateway_api_experimental_channel: false
|
|
||||||
|
|
||||||
# Nginx ingress controller deployment
|
# Nginx ingress controller deployment
|
||||||
ingress_nginx_enabled: false
|
ingress_nginx_enabled: false
|
||||||
@@ -149,7 +147,6 @@ cert_manager_enabled: false
|
|||||||
metallb_enabled: false
|
metallb_enabled: false
|
||||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||||
metallb_namespace: "metallb-system"
|
metallb_namespace: "metallb-system"
|
||||||
# metallb_version: 0.13.9
|
|
||||||
# metallb_protocol: "layer2"
|
# metallb_protocol: "layer2"
|
||||||
# metallb_port: "7472"
|
# metallb_port: "7472"
|
||||||
# metallb_memberlist_port: "7946"
|
# metallb_memberlist_port: "7946"
|
||||||
@@ -211,7 +208,6 @@ metallb_namespace: "metallb-system"
|
|||||||
# - pool2
|
# - pool2
|
||||||
|
|
||||||
argocd_enabled: false
|
argocd_enabled: false
|
||||||
# argocd_version: 2.14.5
|
|
||||||
# argocd_namespace: argocd
|
# argocd_namespace: argocd
|
||||||
# Default password:
|
# Default password:
|
||||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||||
@@ -239,6 +235,7 @@ kube_vip_enabled: false
|
|||||||
# kube_vip_cp_detect: false
|
# kube_vip_cp_detect: false
|
||||||
# kube_vip_leasename: plndr-cp-lock
|
# kube_vip_leasename: plndr-cp-lock
|
||||||
# kube_vip_enable_node_labeling: false
|
# kube_vip_enable_node_labeling: false
|
||||||
|
# kube_vip_lb_fwdmethod: local
|
||||||
|
|
||||||
# Node Feature Discovery
|
# Node Feature Discovery
|
||||||
node_feature_discovery_enabled: false
|
node_feature_discovery_enabled: false
|
||||||
|
|||||||
@@ -16,16 +16,14 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
|||||||
|
|
||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
|
||||||
kube_version: 1.32.2
|
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
local_release_dir: "/tmp/releases"
|
local_release_dir: "/tmp/releases"
|
||||||
# Random shifts for retrying failed ops like pushing/downloading
|
# Random shifts for retrying failed ops like pushing/downloading
|
||||||
retry_stagger: 5
|
retry_stagger: 5
|
||||||
|
|
||||||
# This is the user that owns tha cluster installation.
|
# This is the user that owns the cluster installation.
|
||||||
|
# Note: cilium needs to set kube_owner to root https://kubespray.io/#/docs/CNI/cilium?id=unprivileged-agent-configuration
|
||||||
kube_owner: kube
|
kube_owner: kube
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
# This is the group that the cert creation scripts chgrp the
|
||||||
@@ -65,7 +63,7 @@ credentials_dir: "{{ inventory_dir }}/credentials"
|
|||||||
# kube_webhook_authorization_url: https://...
|
# kube_webhook_authorization_url: https://...
|
||||||
# kube_webhook_authorization_url_skip_tls_verify: false
|
# kube_webhook_authorization_url_skip_tls_verify: false
|
||||||
|
|
||||||
# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin)
|
# Choose network plugin (cilium, calico, kube-ovn or flannel. Use cni for generic cni plugin)
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
@@ -349,7 +347,7 @@ event_ttl_duration: "1h0m0s"
|
|||||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||||
auto_renew_certificates: false
|
auto_renew_certificates: false
|
||||||
# First Monday of each month
|
# First Monday of each month
|
||||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
|
||||||
|
|
||||||
kubeadm_patches_dir: "{{ kube_config_dir }}/patches"
|
kubeadm_patches_dir: "{{ kube_config_dir }}/patches"
|
||||||
kubeadm_patches: []
|
kubeadm_patches: []
|
||||||
|
|||||||
@@ -25,15 +25,9 @@ calico_pool_blocksize: 26
|
|||||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
||||||
# calico_pool_cidr: 1.2.3.4/5
|
# calico_pool_cidr: 1.2.3.4/5
|
||||||
|
|
||||||
# add default ippool CIDR to CNI config
|
|
||||||
# calico_cni_pool: true
|
|
||||||
|
|
||||||
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
|
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
|
||||||
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||||
|
|
||||||
# Add default IPV6 IPPool CIDR to CNI config
|
|
||||||
# calico_cni_pool_ipv6: true
|
|
||||||
|
|
||||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||||
# global_as_num: "64512"
|
# global_as_num: "64512"
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
---
|
---
|
||||||
# cilium_version: "1.15.9"
|
|
||||||
|
|
||||||
# Log-level
|
# Log-level
|
||||||
# cilium_debug: false
|
# cilium_debug: false
|
||||||
|
|
||||||
@@ -58,8 +56,8 @@ cilium_l2announcements: false
|
|||||||
#
|
#
|
||||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||||
# cilium_monitor_aggregation_flags: "all"
|
# cilium_monitor_aggregation_flags: "all"
|
||||||
# Kube Proxy Replacement mode (strict/partial)
|
# Kube Proxy Replacement mode (true/false)
|
||||||
# cilium_kube_proxy_replacement: partial
|
# cilium_kube_proxy_replacement: false
|
||||||
|
|
||||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||||
# to prevent service disruptions. See also:
|
# to prevent service disruptions. See also:
|
||||||
@@ -177,6 +175,10 @@ cilium_l2announcements: false
|
|||||||
### Buffer size of the channel to receive monitor events.
|
### Buffer size of the channel to receive monitor events.
|
||||||
# cilium_hubble_event_queue_size: 50
|
# cilium_hubble_event_queue_size: 50
|
||||||
|
|
||||||
|
# Override the DNS suffix that Hubble-Relay uses to resolve its peer service.
|
||||||
|
# It defaults to the inventory's `dns_domain`.
|
||||||
|
# cilium_hubble_peer_service_cluster_domain: "{{ dns_domain }}"
|
||||||
|
|
||||||
# IP address management mode for v1.9+.
|
# IP address management mode for v1.9+.
|
||||||
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
||||||
# cilium_ipam_mode: kubernetes
|
# cilium_ipam_mode: kubernetes
|
||||||
@@ -255,6 +257,10 @@ cilium_l2announcements: false
|
|||||||
# - name: "blue-pool"
|
# - name: "blue-pool"
|
||||||
# cidrs:
|
# cidrs:
|
||||||
# - "10.0.10.0/24"
|
# - "10.0.10.0/24"
|
||||||
|
# ranges:
|
||||||
|
# - start: "20.0.20.100"
|
||||||
|
# stop: "20.0.20.200"
|
||||||
|
# - start: "1.2.3.4"
|
||||||
|
|
||||||
# -- Configure BGP Instances (New bgpv2 API v1.16+)
|
# -- Configure BGP Instances (New bgpv2 API v1.16+)
|
||||||
# cilium_bgp_cluster_configs:
|
# cilium_bgp_cluster_configs:
|
||||||
@@ -378,3 +384,7 @@ cilium_l2announcements: false
|
|||||||
# resourceNames:
|
# resourceNames:
|
||||||
# - toto
|
# - toto
|
||||||
# cilium_clusterrole_rules_operator_extra_vars: []
|
# cilium_clusterrole_rules_operator_extra_vars: []
|
||||||
|
|
||||||
|
# Cilium extra values, use any values from cilium Helm Chart
|
||||||
|
# ref: https://docs.cilium.io/en/stable/helm-reference/
|
||||||
|
# cilium_extra_values: {}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@
|
|||||||
# custom_cni_chart_repository_name: cilium
|
# custom_cni_chart_repository_name: cilium
|
||||||
# custom_cni_chart_repository_url: https://helm.cilium.io
|
# custom_cni_chart_repository_url: https://helm.cilium.io
|
||||||
# custom_cni_chart_ref: cilium/cilium
|
# custom_cni_chart_ref: cilium/cilium
|
||||||
# custom_cni_chart_version: 1.14.3
|
# custom_cni_chart_version: <chart version> (e.g.: 1.14.3)
|
||||||
# custom_cni_chart_values:
|
# custom_cni_chart_values:
|
||||||
# cluster:
|
# cluster:
|
||||||
# name: "cilium-demo"
|
# name: "cilium-demo"
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user