mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-01 09:38:12 -03:30
Compare commits
683 Commits
release-2.
...
release-2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b75ee0b111 | ||
|
|
63e3f4dea9 | ||
|
|
1026b5974f | ||
|
|
453dbcef1d | ||
|
|
4a6600002f | ||
|
|
6eb313584e | ||
|
|
a270632466 | ||
|
|
00550ba832 | ||
|
|
b4951da405 | ||
|
|
cd93d10688 | ||
|
|
e6940d8a7b | ||
|
|
dca5cde493 | ||
|
|
1f65e6d3b5 | ||
|
|
9bf7aaf6cd | ||
|
|
5512465b34 | ||
|
|
2f30ab558a | ||
|
|
5c136ae3af | ||
|
|
c927da00e0 | ||
|
|
1600fd9082 | ||
|
|
14acd124bc | ||
|
|
e3cbbfb9ed | ||
|
|
5f21e0b58b | ||
|
|
d22204a59f | ||
|
|
90289b8502 | ||
|
|
78aacee21b | ||
|
|
f47aca3558 | ||
|
|
73fc70dbe8 | ||
|
|
dc2a18e436 | ||
|
|
82590eb087 | ||
|
|
4c97ce747c | ||
|
|
ebbc5ed0ce | ||
|
|
dc1af5a9c5 | ||
|
|
85bd1eea27 | ||
|
|
2b151c6aa2 | ||
|
|
93fe3e06ef | ||
|
|
9d3a894991 | ||
|
|
0e6b727e53 | ||
|
|
e42a01f203 | ||
|
|
a28b58dbd0 | ||
|
|
a26a9ee14f | ||
|
|
c09fcd4f92 | ||
|
|
593359ec77 | ||
|
|
34ec4d5d40 | ||
|
|
3d8f3bc0b7 | ||
|
|
eea7bb7692 | ||
|
|
3a89e31dee | ||
|
|
0c504e4984 | ||
|
|
0bf070c33b | ||
|
|
dc8ad78206 | ||
|
|
48e938660d | ||
|
|
632d457f78 | ||
|
|
569a319ff5 | ||
|
|
47812ec002 | ||
|
|
c27dee57ea | ||
|
|
b289f533b3 | ||
|
|
3eb0a4071a | ||
|
|
5684610a55 | ||
|
|
f26f544ff6 | ||
|
|
b9e5b0cb53 | ||
|
|
13443b05a6 | ||
|
|
e70c00a0fe | ||
|
|
bb67b654c5 | ||
|
|
aef25819bc | ||
|
|
1d96f465f4 | ||
|
|
8f618ab408 | ||
|
|
5296d7ef9c | ||
|
|
b715500b48 | ||
|
|
37a5271f5a | ||
|
|
42fc71fafa | ||
|
|
02b6e4833a | ||
|
|
323a111362 | ||
|
|
e7df4d3dd9 | ||
|
|
3e52a0db95 | ||
|
|
94484873d1 | ||
|
|
0d6ea85167 | ||
|
|
674ec92224 | ||
|
|
e7e5037a86 | ||
|
|
fbcf426240 | ||
|
|
2301554e98 | ||
|
|
5bc35002ba | ||
|
|
9143810a4d | ||
|
|
8f118fb619 | ||
|
|
1113460b68 | ||
|
|
74c7e009b7 | ||
|
|
c20ab7d987 | ||
|
|
fe66121287 | ||
|
|
9605bbaa67 | ||
|
|
b7ce6a9f79 | ||
|
|
c04a73c11a | ||
|
|
f184725c5f | ||
|
|
26a0b0f1e8 | ||
|
|
fa1d222eee | ||
|
|
56cf163a23 | ||
|
|
afcedf6d77 | ||
|
|
21fc197ee0 | ||
|
|
fcb4c8fb61 | ||
|
|
b6e2c56ae6 | ||
|
|
b005985d4e | ||
|
|
1294fd5730 | ||
|
|
835fd86a08 | ||
|
|
b7004d72c5 | ||
|
|
eb566ca626 | ||
|
|
aa12f1c56b | ||
|
|
6cc5b38a2e | ||
|
|
e6c4330e4e | ||
|
|
1e827f9807 | ||
|
|
a4f26dc8f3 | ||
|
|
3f065918d9 | ||
|
|
2c2d4513ac | ||
|
|
937e64d296 | ||
|
|
3261d26181 | ||
|
|
c98a0a448f | ||
|
|
7e7218f5ce | ||
|
|
45262da726 | ||
|
|
aef5f1e139 | ||
|
|
3d4baea01c | ||
|
|
30306d6ec7 | ||
|
|
d7254eead6 | ||
|
|
9dced7133c | ||
|
|
c2fb1a0747 | ||
|
|
00a4d2d3c4 | ||
|
|
424ef3b3f9 | ||
|
|
996ef98b87 | ||
|
|
19d5a1c7c3 | ||
|
|
0481dd946f | ||
|
|
29109575f5 | ||
|
|
3782573ede | ||
|
|
bba91a7524 | ||
|
|
b67cadf743 | ||
|
|
56dda4392c | ||
|
|
34fec09ff1 | ||
|
|
cefd1339fc | ||
|
|
b915376194 | ||
|
|
455cc6ff75 | ||
|
|
cc9c376d0f | ||
|
|
018611f829 | ||
|
|
1781eab21f | ||
|
|
78b05d0ffc | ||
|
|
1c0df78278 | ||
|
|
6cc9da6b0a | ||
|
|
6af9cae0a5 | ||
|
|
ef29455652 | ||
|
|
503ab0f722 | ||
|
|
90883e76af | ||
|
|
113de8381c | ||
|
|
652f2edbe1 | ||
|
|
a67e36703f | ||
|
|
73c6943402 | ||
|
|
d46817d690 | ||
|
|
97cb64c62d | ||
|
|
3f70241fb7 | ||
|
|
21b71b38a3 | ||
|
|
b2f9442aba | ||
|
|
fa9f85c7e9 | ||
|
|
ffa285c2e7 | ||
|
|
7b1dc600d5 | ||
|
|
5e67ebeb9e | ||
|
|
af7066d33c | ||
|
|
dd2d95ecdf | ||
|
|
a86d9bd8e8 | ||
|
|
21b1516d80 | ||
|
|
4c15038194 | ||
|
|
538f9df5cc | ||
|
|
efb0412b63 | ||
|
|
5a486a5cca | ||
|
|
394857b5ce | ||
|
|
5043517cfb | ||
|
|
307d122a84 | ||
|
|
d444a2fb83 | ||
|
|
fb7c56e3d3 | ||
|
|
2b79be68e7 | ||
|
|
512d5e3348 | ||
|
|
4b6892ece9 | ||
|
|
5a49ac52f9 | ||
|
|
db1e30e4fc | ||
|
|
b4a61370c8 | ||
|
|
58b2f39ce5 | ||
|
|
56d882abed | ||
|
|
39acb2b84d | ||
|
|
3ccba08983 | ||
|
|
632aa764e6 | ||
|
|
f6342b6cf4 | ||
|
|
471585dcd5 | ||
|
|
51821a811f | ||
|
|
299a9ae7ba | ||
|
|
bf7a506f79 | ||
|
|
2e925f82ef | ||
|
|
ddef7e1139 | ||
|
|
672e47a7eb | ||
|
|
3e8e64a3e5 | ||
|
|
b554246502 | ||
|
|
6d683c98a3 | ||
|
|
ee079f4740 | ||
|
|
a090038d02 | ||
|
|
4f1499bd23 | ||
|
|
36393d77d3 | ||
|
|
e053ee4272 | ||
|
|
1d46c07307 | ||
|
|
f9b5e448c1 | ||
|
|
3effb008c9 | ||
|
|
a088f492f4 | ||
|
|
e9c8913248 | ||
|
|
b9a27c91da | ||
|
|
d4f654275b | ||
|
|
f6eb4c749d | ||
|
|
418fc00718 | ||
|
|
2537177929 | ||
|
|
9af719bf99 | ||
|
|
9e020b252e | ||
|
|
cc45e365ae | ||
|
|
97c667f67c | ||
|
|
063fc525b1 | ||
|
|
0f73d87509 | ||
|
|
402e85ad6e | ||
|
|
1d635e04e4 | ||
|
|
98d5d0cdd5 | ||
|
|
31d4a38f09 | ||
|
|
1ebe456f2d | ||
|
|
c6e5314fab | ||
|
|
a6a79883b7 | ||
|
|
b02e68222f | ||
|
|
da8522af64 | ||
|
|
84b93090a8 | ||
|
|
5695c892d0 | ||
|
|
696101a910 | ||
|
|
54dfe73d24 | ||
|
|
87928baa31 | ||
|
|
6a4fd33a03 | ||
|
|
790448f48b | ||
|
|
7759494c85 | ||
|
|
aed187e56c | ||
|
|
eac799f589 | ||
|
|
5ecb07b59a | ||
|
|
ff621fb7f1 | ||
|
|
958bca8800 | ||
|
|
eacd55fbca | ||
|
|
0e2ab5c273 | ||
|
|
c47634290e | ||
|
|
92d612c3e0 | ||
|
|
2bbe5732b7 | ||
|
|
e6e7fbc25f | ||
|
|
7d4d554436 | ||
|
|
d31db847b7 | ||
|
|
3562d3378b | ||
|
|
ababcd5481 | ||
|
|
7caffde0b6 | ||
|
|
c40b43de01 | ||
|
|
b0eb5650da | ||
|
|
52f221f976 | ||
|
|
26a5948d2a | ||
|
|
d86a3b962c | ||
|
|
d64b341b38 | ||
|
|
d580014c66 | ||
|
|
be9a1f80c1 | ||
|
|
73ff3b0d3b | ||
|
|
9fce9ca42a | ||
|
|
f1adb734e3 | ||
|
|
575e0ca457 | ||
|
|
69f088bb82 | ||
|
|
ef34f5fe7d | ||
|
|
e88aa7c96b | ||
|
|
38d129a0b6 | ||
|
|
392815d97c | ||
|
|
6e2e61012a | ||
|
|
e791089466 | ||
|
|
418f12f62a | ||
|
|
caff539ccd | ||
|
|
c0d1bb1a5c | ||
|
|
ea44d64511 | ||
|
|
1a69f8c3ad | ||
|
|
ccd3180a69 | ||
|
|
01dcbc18ac | ||
|
|
7c67ec4976 | ||
|
|
43d128362f | ||
|
|
1337c9c244 | ||
|
|
86953b2ac4 | ||
|
|
135c9b29a7 | ||
|
|
e0d67367ed | ||
|
|
d007132655 | ||
|
|
cfd9873bbc | ||
|
|
b2b95cc8f9 | ||
|
|
73c889eb10 | ||
|
|
642725efe7 | ||
|
|
29aafff2ce | ||
|
|
df425ac143 | ||
|
|
57a1d18db3 | ||
|
|
aa4a3d7afd | ||
|
|
06ad5525b8 | ||
|
|
f80fd24a55 | ||
|
|
51bd9bee0d | ||
|
|
52266406f8 | ||
|
|
cd601c77c7 | ||
|
|
6abae713f7 | ||
|
|
1312f92a8d | ||
|
|
92abf26d29 | ||
|
|
c11e4ba9a7 | ||
|
|
7ae00947f5 | ||
|
|
59f62473c9 | ||
|
|
8fbd08d027 | ||
|
|
dda557ed23 | ||
|
|
cb54eb40ce | ||
|
|
3eab1129b9 | ||
|
|
24f1402a14 | ||
|
|
bf00550388 | ||
|
|
78c83a8f26 | ||
|
|
e72f8e0412 | ||
|
|
6136fa7c49 | ||
|
|
8d2b4ed4a9 | ||
|
|
9e9b177674 | ||
|
|
4c4c83f0a1 | ||
|
|
0e98814732 | ||
|
|
92f25bf267 | ||
|
|
63a53c79d0 | ||
|
|
2f9a8c04dc | ||
|
|
8c67f42689 | ||
|
|
783a51e9ac | ||
|
|
841c61aaa1 | ||
|
|
157942a462 | ||
|
|
e88a27790c | ||
|
|
ed3932b7d5 | ||
|
|
2b5c185826 | ||
|
|
996ecca78b | ||
|
|
c3c128352f | ||
|
|
02a89543d6 | ||
|
|
c1954ff918 | ||
|
|
b49ae8c21d | ||
|
|
1a7b4435f3 | ||
|
|
ff5ca5f7f8 | ||
|
|
db0e458217 | ||
|
|
f01f7c54aa | ||
|
|
c59407f105 | ||
|
|
fdc5d7458f | ||
|
|
6aafb9b2d4 | ||
|
|
aa9ad1ed60 | ||
|
|
aa9b8453a0 | ||
|
|
4daa824b3c | ||
|
|
4f2e4524b8 | ||
|
|
8ac510e4d6 | ||
|
|
4f27c763af | ||
|
|
0e969c0b72 | ||
|
|
b396801e28 | ||
|
|
682c8a59c2 | ||
|
|
5a25de37ef | ||
|
|
bdb923df4a | ||
|
|
4ef2cf4c28 | ||
|
|
990ca38d21 | ||
|
|
c7e430573f | ||
|
|
a328b64464 | ||
|
|
a16d427536 | ||
|
|
c98a07825b | ||
|
|
a98ca6fcf3 | ||
|
|
4550f8c50f | ||
|
|
9afca43807 | ||
|
|
27ab364df5 | ||
|
|
615216f397 | ||
|
|
46b1b7ab34 | ||
|
|
30d9882851 | ||
|
|
dfdebda0b6 | ||
|
|
9d8a83314b | ||
|
|
e19ce27352 | ||
|
|
4d711691d0 | ||
|
|
ee0f1e9d58 | ||
|
|
a24162f596 | ||
|
|
e82443241b | ||
|
|
9f052702e5 | ||
|
|
b38382a68f | ||
|
|
785324827c | ||
|
|
31c7b6747b | ||
|
|
dc767c14b9 | ||
|
|
30ec03259d | ||
|
|
38c12288f1 | ||
|
|
0e22a90579 | ||
|
|
0cdf75d41a | ||
|
|
3c6fa6e583 | ||
|
|
ee882fa462 | ||
|
|
3431ed9857 | ||
|
|
279808b44e | ||
|
|
2fd529a993 | ||
|
|
1f6f79c91e | ||
|
|
52ee5d0fff | ||
|
|
2f44b40d68 | ||
|
|
20157254c3 | ||
|
|
09c17ba581 | ||
|
|
a5f88e14d0 | ||
|
|
e78bda65fe | ||
|
|
3ea496013f | ||
|
|
7e1873d927 | ||
|
|
fe0810aff9 | ||
|
|
e35a87e3eb | ||
|
|
a6fcf2e066 | ||
|
|
25316825b1 | ||
|
|
c74e1c9db3 | ||
|
|
be9de6b9d9 | ||
|
|
fe8c843cc8 | ||
|
|
f48ae18630 | ||
|
|
83e0b786d4 | ||
|
|
acd5185ad4 | ||
|
|
0263c649f4 | ||
|
|
8176e9155b | ||
|
|
424163c7d3 | ||
|
|
2c87170ccf | ||
|
|
02322c46de | ||
|
|
28b5281c45 | ||
|
|
4d79a55904 | ||
|
|
027cbefb87 | ||
|
|
a08d82d94e | ||
|
|
5f1456337b | ||
|
|
6eeb4883af | ||
|
|
b5a5478a8a | ||
|
|
0d0468e127 | ||
|
|
b7ae4a2cfd | ||
|
|
039205560a | ||
|
|
801268d5c1 | ||
|
|
46c536d261 | ||
|
|
4a8757161e | ||
|
|
65540c5771 | ||
|
|
6c1ab24981 | ||
|
|
61c2ae5549 | ||
|
|
04711d3b00 | ||
|
|
cb7c30a4f1 | ||
|
|
8922c45556 | ||
|
|
58390c79d0 | ||
|
|
b7eb1cf936 | ||
|
|
6e5b9e0ebf | ||
|
|
c94291558d | ||
|
|
8d553f7e91 | ||
|
|
a0be7f0e26 | ||
|
|
1c3d082b8d | ||
|
|
2ed211ba15 | ||
|
|
1161326b54 | ||
|
|
d473a6d442 | ||
|
|
8d82033bff | ||
|
|
9d4cdb7b02 | ||
|
|
b353e062c7 | ||
|
|
d8f9b9b61f | ||
|
|
0b441ade2c | ||
|
|
6f6fad5a16 | ||
|
|
465ffa3c9f | ||
|
|
539c9e0d99 | ||
|
|
649f962ac6 | ||
|
|
16bdb3fe51 | ||
|
|
7c3369e1b9 | ||
|
|
9eacde212f | ||
|
|
331647f4ab | ||
|
|
c2d4822c38 | ||
|
|
3c30be1320 | ||
|
|
d8d01bf5aa | ||
|
|
d42b7228c2 | ||
|
|
4db057e9c2 | ||
|
|
ea8e2fc651 | ||
|
|
10c30ea5b1 | ||
|
|
84b56d23a4 | ||
|
|
19d07a4f2e | ||
|
|
6a5b87dda4 | ||
|
|
6aac59394e | ||
|
|
f147163b24 | ||
|
|
16bf3549c1 | ||
|
|
b912dafd7a | ||
|
|
8b3481f511 | ||
|
|
7019c2685d | ||
|
|
d18cc38586 | ||
|
|
cee481f63d | ||
|
|
e4c8c7188e | ||
|
|
6c004efd5f | ||
|
|
1a57780a75 | ||
|
|
ce25e4aa21 | ||
|
|
ef4044b62f | ||
|
|
9ffe5940fe | ||
|
|
c8d9afce1a | ||
|
|
285983a555 | ||
|
|
ab4356aa69 | ||
|
|
e87d4e9ce3 | ||
|
|
5fcf047191 | ||
|
|
c68fb81aa7 | ||
|
|
e707f78899 | ||
|
|
41e0ca3f85 | ||
|
|
c5c10067ed | ||
|
|
43958614e3 | ||
|
|
af04906b51 | ||
|
|
c7e17688b9 | ||
|
|
ac76840c5d | ||
|
|
f5885d05ea | ||
|
|
af949cd967 | ||
|
|
eee2eb11d8 | ||
|
|
8d3961edbe | ||
|
|
4c5328fd1f | ||
|
|
1472528f6d | ||
|
|
9416c9aa86 | ||
|
|
da92c7e215 | ||
|
|
d27cf375af | ||
|
|
432a312a35 | ||
|
|
3a6230af6b | ||
|
|
ecd267854b | ||
|
|
ac846667b7 | ||
|
|
33146b9481 | ||
|
|
4bace2491d | ||
|
|
469b3ec525 | ||
|
|
22017b7ff0 | ||
|
|
88c11b5946 | ||
|
|
843252c968 | ||
|
|
ddea79f0f0 | ||
|
|
c0e1211abe | ||
|
|
c8d7f000c9 | ||
|
|
598f178054 | ||
|
|
6f8b24f367 | ||
|
|
5d1b34bdcd | ||
|
|
8efde799e1 | ||
|
|
96b61a5f53 | ||
|
|
a517a8db01 | ||
|
|
2211504790 | ||
|
|
fb8662ec19 | ||
|
|
6f7911264f | ||
|
|
ae44aff330 | ||
|
|
b83e8b020a | ||
|
|
30cd91dc6b | ||
|
|
09af3ab074 | ||
|
|
f2fa9c3b31 | ||
|
|
30a7dfa4f8 | ||
|
|
62ab477838 | ||
|
|
1edb7d771f | ||
|
|
f8a57f7598 | ||
|
|
85d18fc107 | ||
|
|
aa00c1d91a | ||
|
|
a5a88e41af | ||
|
|
35c928798d | ||
|
|
83f64a7ff9 | ||
|
|
60853fa682 | ||
|
|
b66356be65 | ||
|
|
efae2dbad6 | ||
|
|
a7b56a616d | ||
|
|
bd8b8916a8 | ||
|
|
57063b6828 | ||
|
|
69b67a293a | ||
|
|
d57ddf0be8 | ||
|
|
43e7e2d663 | ||
|
|
d355b43dce | ||
|
|
5d52025266 | ||
|
|
db470f8529 | ||
|
|
c8f3d88288 | ||
|
|
b54cf5bd0a | ||
|
|
7e4b176323 | ||
|
|
81bf4f9304 | ||
|
|
e1967b0700 | ||
|
|
507091ec8b | ||
|
|
c7529270ff | ||
|
|
48ceca4919 | ||
|
|
0171c71de0 | ||
|
|
46d0df394f | ||
|
|
207d3e7b4e | ||
|
|
426ad81db0 | ||
|
|
497d2ca306 | ||
|
|
9d3888a756 | ||
|
|
c8e090c17f | ||
|
|
77a74adedd | ||
|
|
1ccf32e08f | ||
|
|
b5aced20e1 | ||
|
|
17af348be8 | ||
|
|
1afdb05ea9 | ||
|
|
425b6741c6 | ||
|
|
d635961120 | ||
|
|
d5b865da4d | ||
|
|
89993e4833 | ||
|
|
6b5da84014 | ||
|
|
1c3d33e146 | ||
|
|
71af4b4a85 | ||
|
|
c49dd50ef3 | ||
|
|
f66c49bf42 | ||
|
|
4c9d7dedb3 | ||
|
|
5336943a8c | ||
|
|
9dfade5641 | ||
|
|
a040e521b4 | ||
|
|
dad4b26c6f | ||
|
|
0ac364dfae | ||
|
|
dfd35892f2 | ||
|
|
79166496f3 | ||
|
|
c7d12cddec | ||
|
|
1f09229740 | ||
|
|
c2d4700571 | ||
|
|
c06896a352 | ||
|
|
c119620f7c | ||
|
|
7f309bb092 | ||
|
|
e2b67b5700 | ||
|
|
82a9064d8d | ||
|
|
a70fab2249 | ||
|
|
86b45fce6a | ||
|
|
31a5a4e808 | ||
|
|
5db86f4c2b | ||
|
|
20c284c276 | ||
|
|
befc6cd650 | ||
|
|
97d95775a5 | ||
|
|
8f44cd35d8 | ||
|
|
627a06e30d | ||
|
|
bfebcfa2c5 | ||
|
|
56e230863a | ||
|
|
e5ee47408e | ||
|
|
f21a707e99 | ||
|
|
0ef7af76bc | ||
|
|
18666b3e2d | ||
|
|
ed87386d7b | ||
|
|
1ad9b33b08 | ||
|
|
000b4565c2 | ||
|
|
eda75fc706 | ||
|
|
6583add63a | ||
|
|
441ad841cc | ||
|
|
6511c5dd7a | ||
|
|
d5cbb19b39 | ||
|
|
b0fcc1ad1d | ||
|
|
417180246c | ||
|
|
1892562614 | ||
|
|
22b128dfd2 | ||
|
|
802fb8b591 | ||
|
|
c2cf0d9945 | ||
|
|
3b3ccac212 | ||
|
|
e61a9077f4 | ||
|
|
59ce9f9b87 | ||
|
|
bf54dc082b | ||
|
|
3ff7bc1f64 | ||
|
|
7516fe142f | ||
|
|
b0e4c375a7 | ||
|
|
d1388d69d0 | ||
|
|
a3149a41f1 | ||
|
|
823bd9118e | ||
|
|
394afc957b | ||
|
|
63e92d719a | ||
|
|
9b87131b19 | ||
|
|
4a15994da0 | ||
|
|
d0fb537448 | ||
|
|
59cf1770bc | ||
|
|
b77f207512 | ||
|
|
b46a69f5e1 | ||
|
|
0aaba5ea30 | ||
|
|
bd6d810d0a | ||
|
|
e3850fbbbc | ||
|
|
05d864c913 | ||
|
|
a3e34f589a | ||
|
|
a2cf6816ce | ||
|
|
7b3bc54cc3 | ||
|
|
cda88e6770 | ||
|
|
70f1abbc18 | ||
|
|
bbcafb5d7b | ||
|
|
d7039ef707 | ||
|
|
271be92b02 | ||
|
|
a31baf3c16 | ||
|
|
e83728897b | ||
|
|
282a27a07c | ||
|
|
3fe6dbb65c | ||
|
|
7547e6a272 | ||
|
|
1928dafc7e | ||
|
|
0cbc0f4119 | ||
|
|
e77b9bf3ee | ||
|
|
7f7e83a4d9 | ||
|
|
85fe716d46 | ||
|
|
85ff3eb8be | ||
|
|
e55c359cf9 | ||
|
|
8d7327c188 | ||
|
|
ca731dca95 | ||
|
|
d66da21726 | ||
|
|
1069b05e68 | ||
|
|
ec0c0d4a28 | ||
|
|
1739b27231 | ||
|
|
d9d29af87f | ||
|
|
7036b704b3 | ||
|
|
54cda80018 | ||
|
|
b46e751573 | ||
|
|
6a2ea94b39 | ||
|
|
4674b03661 | ||
|
|
e2f1964389 | ||
|
|
922de32290 | ||
|
|
7896bc7831 | ||
|
|
da07459bd6 | ||
|
|
3ca205446e | ||
|
|
eff1931283 | ||
|
|
3a37a49690 | ||
|
|
fd8ae54fa7 | ||
|
|
79fdee3979 | ||
|
|
a754c0d476 | ||
|
|
7208169db3 | ||
|
|
94dac10be7 | ||
|
|
d5fcbcd89f | ||
|
|
7b5d43cc00 | ||
|
|
c5ccedb694 | ||
|
|
858b29f425 | ||
|
|
7db76f8809 |
@@ -18,3 +18,13 @@ skip_list:
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -99,3 +99,12 @@ target/
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
|
||||
@@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.15.1
|
||||
KUBESPRAY_VERSION: v2.18.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -16,6 +16,7 @@ variables:
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
@@ -26,18 +27,20 @@ variables:
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_14_VERSION: 0.14.10
|
||||
TERRAFORM_13_VERSION: 0.13.6
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
@@ -51,6 +54,7 @@ before_script:
|
||||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@@ -77,3 +81,4 @@ include:
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.15
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@@ -23,9 +23,8 @@ ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
script:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
@@ -53,6 +52,7 @@ tox-inventory-builder:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
||||
93
.gitlab-ci/molecule.yml
Normal file
93
.gitlab-ci/molecule.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
molecule_cri-dockerd:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
||||
@@ -2,6 +2,7 @@
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
@@ -22,27 +23,62 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants during the nightly jobs
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos8-crio:
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
@@ -51,10 +87,13 @@ packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@@ -69,27 +108,30 @@ packet_ubuntu16-flannel-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-containerd:
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
@@ -101,17 +143,22 @@ packet_centos7-calico-ha-once-localhost:
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
packet_almalinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora32-weave:
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -121,14 +168,14 @@ packet_opensuse-canal:
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -138,12 +185,18 @@ packet_ubuntu18-cilium-sep:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -158,11 +211,6 @@ packet_centos7-calico-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@@ -173,19 +221,34 @@ packet_oracle7-canal-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora33-calico:
|
||||
packet_fedora35-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora34-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora32-kube-ovn-containerd:
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@@ -193,29 +256,47 @@ packet_fedora32-kube-ovn-containerd:
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
packet_centos7-docker-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_debian10-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
|
||||
@@ -12,13 +12,13 @@
|
||||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p group_vars
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
@@ -53,92 +53,51 @@
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-0.13.x-validate-openstack:
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-packet:
|
||||
tf-validate-metal:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: packet
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-aws:
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-exoscale:
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.13.x-validate-vsphere:
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-upcloud:
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.14.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -152,7 +111,7 @@ tf-0.14.x-validate-upcloud:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -187,10 +146,6 @@ tf-0.14.x-validate-upcloud:
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
# Since ELASTX is in Stockholm, Mitogen helps with latency
|
||||
MITOGEN_ENABLE: "false"
|
||||
# Mitogen doesn't support interpreter discovery yet
|
||||
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
@@ -207,9 +162,10 @@ tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@@ -235,44 +191,45 @@ tf-elastx_ubuntu18-calico:
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
tf-ovh_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
@@ -31,12 +15,14 @@ molecule_tests:
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
@@ -57,3 +43,24 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
@@ -6,11 +6,17 @@
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
|
||||
#### Molecule
|
||||
|
||||
@@ -29,3 +35,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
||||
29
Dockerfile
29
Dockerfile
@@ -1,30 +1,37 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220316
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U \
|
||||
&& /usr/bin/python3 -m pip install -r tests/requirements.txt \
|
||||
&& python3 -m pip install -r requirements.txt \
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
4
Makefile
4
Makefile
@@ -1,5 +1,7 @@
|
||||
mitogen:
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
||||
@@ -4,15 +4,20 @@ aliases:
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- woopstar
|
||||
|
||||
75
README.md
75
README.md
@@ -5,7 +5,7 @@
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
@@ -19,10 +19,10 @@ To deploy the cluster you can use :
|
||||
|
||||
#### Usage
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
@@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
@@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.15.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.15.1 bash
|
||||
quay.io/kubespray/kubespray:v2.19.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -75,10 +75,11 @@ python -V && pip -V
|
||||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
Install the necessary requirements
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
@@ -105,64 +106,64 @@ vagrant up
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Equinix Metal](docs/equinix-metal.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **Fedora** 34, 35
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)
|
||||
- **Rocky Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.20.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.4.4
|
||||
- [cri-o](http://cri-o.io/) v1.20 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.4
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.17.4
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.9
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.2.2
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.7.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.43.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.19**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- **Minimum required version of Kubernetes is v1.21**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
@@ -195,8 +196,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
@@ -217,8 +216,6 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
|
||||
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
@@ -239,6 +236,6 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
||||
33
RELEASE.md
33
RELEASE.md
@@ -2,17 +2,18 @@
|
||||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
@@ -46,3 +47,17 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
25
Vagrantfile
vendored
25
Vagrantfile
vendored
@@ -26,9 +26,11 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -53,9 +55,9 @@ $subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
@@ -68,9 +70,12 @@ $kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
|
||||
@@ -167,7 +172,7 @@ Vagrant.configure("2") do |config|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -238,9 +243,11 @@ Vagrant.configure("2") do |config|
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
@@ -250,7 +257,9 @@ Vagrant.configure("2") do |config|
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
|
||||
@@ -3,7 +3,6 @@ pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
@@ -15,7 +14,7 @@ fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
callback_whitelist = profile_tasks,ara_default
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
maximal_ansible_version: 2.11.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.13.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
@@ -16,6 +18,17 @@
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
||||
assert:
|
||||
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
when:
|
||||
- ansible_version.string is version('2.10.0', ">=")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
|
||||
18
cluster.yml
18
cluster.yml
@@ -32,7 +32,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
@@ -46,7 +46,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@@ -59,7 +59,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@@ -86,8 +86,8 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
@@ -116,16 +116,10 @@
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
||||
@@ -69,7 +69,7 @@ class SearchEC2Tags(object):
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
|
||||
@@ -47,6 +47,10 @@ If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
@@ -59,6 +63,5 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
@@ -12,3 +12,4 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
@@ -22,8 +22,10 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: 0644
|
||||
|
||||
@@ -8,11 +8,13 @@
|
||||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
@@ -63,6 +64,7 @@
|
||||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
|
||||
@@ -48,7 +48,7 @@ ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load']
|
||||
'load', 'add']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
@@ -82,22 +82,43 @@ class KubesprayInventory(object):
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
if self.config_file:
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||
except OSError:
|
||||
pass
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
@@ -158,17 +179,29 @@ class KubesprayInventory(object):
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
def build_hostnames(self, changed_hosts):
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
pass
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
@@ -176,6 +209,7 @@ class KubesprayInventory(object):
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
@@ -184,6 +218,8 @@ class KubesprayInventory(object):
|
||||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
@@ -203,11 +239,15 @@ class KubesprayInventory(object):
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
@@ -226,6 +266,7 @@ class KubesprayInventory(object):
|
||||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
@@ -394,9 +435,11 @@ help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
@@ -430,6 +473,7 @@ def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from test import support
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@@ -26,6 +27,28 @@ if path not in sys.path:
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with support.captured_stdout() as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
@@ -67,23 +90,14 @@ class TestInventory(unittest.TestCase):
|
||||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
@@ -99,6 +113,30 @@ class TestInventory(unittest.TestCase):
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
@@ -113,7 +151,24 @@ class TestInventory(unittest.TestCase):
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
@@ -313,7 +368,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
@@ -322,17 +377,7 @@ class TestInventory(unittest.TestCase):
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
@@ -341,6 +386,210 @@ class TestInventory(unittest.TestCase):
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
|
||||
- name: Install required packages
|
||||
yum:
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
|
||||
@@ -5,14 +5,15 @@
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.2.9
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
@@ -37,6 +38,12 @@
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
path: ansible.cfg
|
||||
section: defaults
|
||||
option: strategy
|
||||
value: mitogen_linear
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
@@ -11,8 +11,8 @@
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
yum: name={{ item }} state=present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
yum: name={{ item }} state=present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
yum: name=xfsprogs state=present
|
||||
package: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
@@ -82,6 +82,7 @@
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
yum: name={{ item }} state=present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
yum: name={{ item }} state=present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- role_under_test
|
||||
@@ -3,6 +3,7 @@
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
|
||||
@@ -2,6 +2,13 @@ all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
package:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: 0640
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
@@ -27,7 +30,10 @@
|
||||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout == \"\""
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout != \"\""
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
@@ -28,9 +28,10 @@
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout == \"\""
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
@@ -40,5 +41,5 @@
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout != \"\""
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
|
||||
@@ -73,8 +73,8 @@
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
@@ -84,8 +84,8 @@
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
@@ -19,7 +19,7 @@
|
||||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
@@ -35,11 +35,11 @@
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
||||
@@ -1,51 +1,51 @@
|
||||
---
|
||||
- name: "Remove storage class." # noqa 301
|
||||
- name: Remove storage class. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down heketi." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true
|
||||
- name: "Tear down bootstrap."
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: "Tear down glusterfs." # noqa 301
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi storage service." # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi gluster role binding" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi config secret" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi db backup" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true
|
||||
- name: "Remove heketi service account" # noqa 301
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true
|
||||
- name: "Get secrets"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: "Remove heketi storage secret"
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
@@ -9,7 +9,8 @@ This script has two features:
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment.
|
||||
to the target offline environment. if images are from a private registry,
|
||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
@@ -28,16 +29,19 @@ manage-offline-container-images.sh register
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
```shell
|
||||
bash generate_list.sh
|
||||
./generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── generate.sh
|
||||
└── images.list
|
||||
0 directories, 3 files
|
||||
├── files.list.template
|
||||
├── images.list
|
||||
└── images.list.template
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
56
contrib/offline/generate_list.sh
Normal file → Executable file
56
contrib/offline/generate_list.sh
Normal file → Executable file
@@ -5,53 +5,29 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${IMAGE_ARCH:="amd64"}
|
||||
: ${ANSIBLE_SYSTEM:="linux"}
|
||||
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
|
||||
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
|
||||
|
||||
cat > ${TEMP_DIR}/generate.sh << EOF
|
||||
arch=${ARCH}
|
||||
image_arch=${IMAGE_ARCH}
|
||||
ansible_system=${ANSIBLE_SYSTEM}
|
||||
ansible_architecture=${ANSIBLE_ARCHITECTURE}
|
||||
EOF
|
||||
|
||||
# generate all component version by $DOWNLOAD_YML
|
||||
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
|
||||
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
|
||||
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all download files url
|
||||
# generate all download files url template
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
|
||||
|
||||
# generate all images list
|
||||
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
# generate all images list template
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
|
||||
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# add kube-* images to images list
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
|
||||
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# print files.list and images.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list
|
||||
# run ansible to expand templates
|
||||
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
|
||||
|
||||
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1
|
||||
|
||||
19
contrib/offline/generate_list.yml
Normal file
19
contrib/offline/generate_list.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
- role: kubespray-defaults
|
||||
when: false
|
||||
- role: download
|
||||
when: false
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
@@ -54,7 +54,8 @@ function create_container_image_tar() {
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
@@ -100,15 +101,35 @@ function register_container_images() {
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
set +e
|
||||
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
org_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${org_image}"
|
||||
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${raw_image}" ]; then
|
||||
echo "Failed to get raw_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${org_image}" ]; then
|
||||
echo "Failed to get org_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${image_id}" ]; then
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
@@ -132,7 +153,8 @@ else
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "the gotten images to the target offline environment. if images are from"
|
||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
|
||||
@@ -20,4 +20,4 @@
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
|
||||
@@ -9,8 +9,8 @@ Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
Url: https://github.com/kubernetes-sigs/kubespray
|
||||
Source0: https://github.com/kubernetes-sigs/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
|
||||
1
contrib/terraform/aws/.gitignore
vendored
1
contrib/terraform/aws/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
@@ -20,20 +20,20 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_cidr_block = var.aws_vpc_cidr_block
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_cidr_subnets_private = var.aws_cidr_subnets_private
|
||||
aws_cidr_subnets_public = var.aws_cidr_subnets_public
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-elb" {
|
||||
source = "./modules/elb"
|
||||
module "aws-nlb" {
|
||||
source = "./modules/nlb"
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_id = module.aws-vpc.aws_vpc_id
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
|
||||
aws_elb_api_port = var.aws_elb_api_port
|
||||
aws_nlb_api_port = var.aws_nlb_api_port
|
||||
k8s_secure_api_port = var.k8s_secure_api_port
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
@@ -52,9 +52,8 @@ module "aws-iam" {
|
||||
resource "aws_instance" "bastion-server" {
|
||||
ami = data.aws_ami.distro.id
|
||||
instance_type = var.aws_bastion_size
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
count = var.aws_bastion_num
|
||||
associate_public_ip_address = true
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
@@ -79,11 +78,14 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
count = var.aws_kube_master_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_kube_master_disk_size
|
||||
}
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
@@ -94,10 +96,10 @@ resource "aws_instance" "k8s-master" {
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
elb = module.aws-elb.aws_elb_api_id
|
||||
instance = element(aws_instance.k8s-master.*.id, count.index)
|
||||
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn
|
||||
target_id = element(aws_instance.k8s-master.*.private_ip, count.index)
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
@@ -106,11 +108,14 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
count = var.aws_etcd_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_etcd_disk_size
|
||||
}
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
@@ -126,11 +131,14 @@ resource "aws_instance" "k8s-worker" {
|
||||
|
||||
count = var.aws_kube_worker_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
root_block_device {
|
||||
volume_size = var.aws_kube_worker_disk_size
|
||||
}
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube-worker-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
@@ -152,11 +160,11 @@ data "template_file" "inventory" {
|
||||
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
|
||||
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
|
||||
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
|
||||
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
|
||||
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = var.aws_elb_api_port
|
||||
to_port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = var.aws_subnet_ids_public
|
||||
security_groups = [aws_security_group.aws-elb.id]
|
||||
|
||||
listener {
|
||||
instance_port = var.k8s_secure_api_port
|
||||
instance_protocol = "tcp"
|
||||
lb_port = var.aws_elb_api_port
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
cross_zone_load_balancing = true
|
||||
idle_timeout = 400
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}))
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "aws_elb_api_id" {
|
||||
value = aws_elb.aws-elb-api.id
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = aws_elb.aws-elb-api.dns_name
|
||||
}
|
||||
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
# Create a new AWS NLB for K8S API
|
||||
resource "aws_lb" "aws-nlb-api" {
|
||||
name = "kubernetes-nlb-${var.aws_cluster_name}"
|
||||
load_balancer_type = "network"
|
||||
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
|
||||
idle_timeout = 400
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
|
||||
}))
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Instance Target Group
|
||||
resource "aws_lb_target_group" "aws-nlb-api-tg" {
|
||||
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
|
||||
port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
target_type = "ip"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
interval = 30
|
||||
protocol = "HTTPS"
|
||||
path = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Listener listen to target group
|
||||
resource "aws_lb_listener" "aws-nlb-api-listener" {
|
||||
load_balancer_arn = aws_lb.aws-nlb-api.arn
|
||||
port = var.aws_nlb_api_port
|
||||
protocol = "TCP"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
}
|
||||
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "aws_nlb_api_id" {
|
||||
value = aws_lb.aws-nlb-api.id
|
||||
}
|
||||
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = aws_lb.aws-nlb-api.dns_name
|
||||
}
|
||||
|
||||
output "aws_nlb_api_tg_arn" {
|
||||
value = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
@@ -6,8 +6,8 @@ variable "aws_vpc_id" {
|
||||
description = "AWS VPC ID"
|
||||
}
|
||||
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
@@ -25,13 +25,14 @@ resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -43,12 +44,14 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ output "workers" {
|
||||
}
|
||||
|
||||
output "etcd" {
|
||||
value = join("\n", aws_instance.k8s-etcd.*.private_ip)
|
||||
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}"
|
||||
}
|
||||
|
||||
output "inventory" {
|
||||
|
||||
@@ -9,6 +9,8 @@ aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_num = 1
|
||||
|
||||
aws_bastion_size = "t2.medium"
|
||||
|
||||
#Kubernetes Cluster
|
||||
@@ -17,22 +19,26 @@ aws_kube_master_num = 3
|
||||
|
||||
aws_kube_master_size = "t2.medium"
|
||||
|
||||
aws_kube_master_disk_size = 50
|
||||
|
||||
aws_etcd_num = 3
|
||||
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_etcd_disk_size = 50
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
#Settings AWS NLB
|
||||
|
||||
aws_nlb_api_port = 6443
|
||||
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest" # Product = "kubernetes"
|
||||
}
|
||||
|
||||
@@ -10,19 +10,18 @@ ${public_ip_address_bastion}
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
|
||||
[kube_node]
|
||||
${list_node}
|
||||
|
||||
|
||||
[etcd]
|
||||
${list_etcd}
|
||||
|
||||
[calico_rr]
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
calico_rr
|
||||
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${nlb_api_fqdn}
|
||||
|
||||
@@ -6,26 +6,34 @@ aws_vpc_cidr_block = "10.250.192.0/18"
|
||||
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_size = "t2.medium"
|
||||
# single AZ deployment
|
||||
#aws_cidr_subnets_private = ["10.250.192.0/20"]
|
||||
#aws_cidr_subnets_public = ["10.250.224.0/20"]
|
||||
|
||||
# 3+ AZ deployment
|
||||
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
|
||||
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_num = 1
|
||||
aws_bastion_size = "t3.small"
|
||||
|
||||
#Kubernetes Cluster
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t3.medium"
|
||||
aws_kube_master_disk_size = 50
|
||||
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t2.medium"
|
||||
aws_etcd_num = 0
|
||||
aws_etcd_size = "t3.medium"
|
||||
aws_etcd_disk_size = 50
|
||||
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest"
|
||||
|
||||
@@ -8,25 +8,26 @@ aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_ami = "ami-5900cc36"
|
||||
aws_bastion_size = "t2.small"
|
||||
|
||||
aws_bastion_num = 1
|
||||
aws_bastion_size = "t3.small"
|
||||
|
||||
#Kubernetes Cluster
|
||||
|
||||
aws_kube_master_num = 3
|
||||
aws_kube_master_size = "t2.medium"
|
||||
aws_kube_master_size = "t3.medium"
|
||||
aws_kube_master_disk_size = 50
|
||||
|
||||
aws_etcd_num = 3
|
||||
aws_etcd_size = "t2.medium"
|
||||
aws_etcd_size = "t3.medium"
|
||||
aws_etcd_disk_size = 50
|
||||
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_cluster_ami = "ami-903df7ff"
|
||||
aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = 0.0.0.0
|
||||
|
||||
default_tags = { }
|
||||
|
||||
inventory_file = "../../../inventory/hosts"
|
||||
|
||||
@@ -25,7 +25,7 @@ data "aws_ami" "distro" {
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
|
||||
values = ["debian-10-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
@@ -33,7 +33,7 @@ data "aws_ami" "distro" {
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"] # Canonical
|
||||
owners = ["136693071363"] # Debian-10
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
@@ -63,10 +63,18 @@ variable "aws_bastion_size" {
|
||||
* The number should be divisable by the number of used
|
||||
* AWS Availability Zones without an remainder.
|
||||
*/
|
||||
variable "aws_bastion_num" {
|
||||
description = "Number of Bastion Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_num" {
|
||||
description = "Number of Kubernetes Master Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_disk_size" {
|
||||
description = "Disk size for Kubernetes Master Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_kube_master_size" {
|
||||
description = "Instance size of Kube Master Nodes"
|
||||
}
|
||||
@@ -75,6 +83,10 @@ variable "aws_etcd_num" {
|
||||
description = "Number of etcd Nodes"
|
||||
}
|
||||
|
||||
variable "aws_etcd_disk_size" {
|
||||
description = "Disk size for etcd Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_etcd_size" {
|
||||
description = "Instance size of etcd Nodes"
|
||||
}
|
||||
@@ -83,16 +95,20 @@ variable "aws_kube_worker_num" {
|
||||
description = "Number of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_disk_size" {
|
||||
description = "Disk size for Kubernetes Worker Nodes (in GiB)"
|
||||
}
|
||||
|
||||
variable "aws_kube_worker_size" {
|
||||
description = "Instance size of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
* AWS NLB Settings
|
||||
*
|
||||
*/
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
|
||||
@@ -74,14 +74,23 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `master_sa_email`: Service account email to use for the master nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the master nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the control plane nodes *(Defaults to `false`)*
|
||||
* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)*
|
||||
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
|
||||
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the worker nodes *(Defaults to `false`)*
|
||||
* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)*
|
||||
|
||||
An example variables file can be found `tfvars.json`
|
||||
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "~> 4.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
credentials = file(var.keyfile_location)
|
||||
region = var.region
|
||||
project = var.gcp_project_id
|
||||
version = "~> 3.48"
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
@@ -13,12 +21,17 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
master_preemptible = var.master_preemptible
|
||||
master_additional_disk_type = var.master_additional_disk_type
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
worker_preemptible = var.worker_preemptible
|
||||
worker_additional_disk_type = var.worker_additional_disk_type
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
resource "google_compute_network" "main" {
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
auto_create_subnetworks = false
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "main" {
|
||||
@@ -20,6 +22,8 @@ resource "google_compute_firewall" "deny_all" {
|
||||
|
||||
priority = 1000
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
|
||||
deny {
|
||||
protocol = "all"
|
||||
}
|
||||
@@ -39,6 +43,8 @@ resource "google_compute_firewall" "allow_internal" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ssh" {
|
||||
count = length(var.ssh_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-ssh-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -53,6 +59,8 @@ resource "google_compute_firewall" "ssh" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "api_server" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-api-server-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -67,6 +75,8 @@ resource "google_compute_firewall" "api_server" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "nodeport" {
|
||||
count = length(var.nodeport_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-nodeport-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@@ -81,11 +91,15 @@ resource "google_compute_firewall" "nodeport" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_http" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-http-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80"]
|
||||
@@ -93,11 +107,15 @@ resource "google_compute_firewall" "ingress_http" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_https" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-https-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["443"]
|
||||
@@ -173,7 +191,7 @@ resource "google_compute_disk" "master" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.master_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@@ -229,19 +247,28 @@ resource "google_compute_instance" "master" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.master_preemptible
|
||||
automatic_restart = !var.master_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-forward-rule"
|
||||
|
||||
port_range = "6443"
|
||||
|
||||
target = google_compute_target_pool.master_lb.id
|
||||
target = google_compute_target_pool.master_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-pool"
|
||||
instances = local.master_target_list
|
||||
}
|
||||
@@ -258,7 +285,7 @@ resource "google_compute_disk" "worker" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.worker_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@@ -326,35 +353,48 @@ resource "google_compute_instance" "worker" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.worker_preemptible
|
||||
automatic_restart = !var.worker_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-address"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_http_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-http-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "80"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_https_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-https-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "443"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ output "worker_ip_addresses" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = google_compute_address.worker_lb.address
|
||||
value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : ""
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = google_compute_forwarding_rule.master_lb.ip_address
|
||||
value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : ""
|
||||
}
|
||||
|
||||
@@ -27,6 +27,14 @@ variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
}
|
||||
@@ -35,6 +43,14 @@ variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
@@ -49,6 +65,11 @@ variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
"nodeport_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"ingress_whitelist": [
|
||||
"0.0.0.0/0"
|
||||
],
|
||||
|
||||
"machines": {
|
||||
"master-0": {
|
||||
@@ -24,7 +27,7 @@
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@@ -38,7 +41,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@@ -52,7 +55,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,16 @@ variable "master_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
@@ -54,6 +64,16 @@ variable "worker_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
@@ -70,3 +90,8 @@ variable api_server_whitelist {
|
||||
variable nodeport_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
108
contrib/terraform/hetzner/README.md
Normal file
108
contrib/terraform/hetzner/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Kubernetes on Hetzner with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+--------------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+--------------------------+
|
||||
```
|
||||
|
||||
The nodes uses a private network for node to node communication and a public interface for all external communication.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.14.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: Assumes you are at the root of the kubespray repo.
|
||||
|
||||
For authentication in your cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export HCLOUD_TOKEN=api-token
|
||||
```
|
||||
|
||||
Copy the cluster configuration file.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-hetzner-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can use the inventory file with kubespray to set up a cluster.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
You can setup Kubernetes with kubespray using the generated inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Cloud controller
|
||||
|
||||
For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver).
|
||||
|
||||
Please read the instructions in both repos on how to install it.
|
||||
|
||||
## Teardown
|
||||
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `network_zone`: the network zone where the cluster is running
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: Size of the VM
|
||||
* `image`: The image to use for the VM
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443
|
||||
44
contrib/terraform/hetzner/default.tfvars
Normal file
44
contrib/terraform/hetzner/default.tfvars
Normal file
@@ -0,0 +1,44 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
network_zone = "eu-central"
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ingress_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
52
contrib/terraform/hetzner/main.tf
Normal file
52
contrib/terraform/hetzner/main.tf
Normal file
@@ -0,0 +1,52 @@
|
||||
provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
zone = var.zone
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
network_id = module.kubernetes.network_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,122 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.master.id]
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "master" {
|
||||
for_each = hcloud_server.master
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.worker.id]
|
||||
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "worker" {
|
||||
for_each = hcloud_server.worker
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "master" {
|
||||
name = "${var.prefix}-master-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.master :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.master[key].ip
|
||||
"public_ip" = hcloud_server.master[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.worker :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.worker[key].ip
|
||||
"public_ip" = hcloud_server.worker[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: ubuntu
|
||||
shell: /bin/bash
|
||||
sudo: "ALL=(ALL) NOPASSWD:ALL"
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
||||
7
contrib/terraform/hetzner/output.tf
Normal file
7
contrib/terraform/hetzner/output.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
19
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
19
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
@@ -0,0 +1,19 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s-cluster:vars]
|
||||
network_id=${network_id}
|
||||
50
contrib/terraform/hetzner/variables.tf
Normal file
50
contrib/terraform/hetzner/variables.tf
Normal file
@@ -0,0 +1,50 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
variable "network_zone" {
|
||||
description = "The network zone where the cluster is running"
|
||||
default = "eu-central"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "Public SSH key which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for HTTP"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
||||
15
contrib/terraform/hetzner/versions.tf
Normal file
15
contrib/terraform/hetzner/versions.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
||||
@@ -1,16 +1,16 @@
|
||||
# Kubernetes on Packet with Terraform
|
||||
# Kubernetes on Equinix Metal with Terraform
|
||||
|
||||
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||
[Packet](https://www.packet.com).
|
||||
[Equinix Metal](https://metal.equinix.com) ([formerly Packet](https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/)).
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on Packet bare metal. It should work in all locations and on most server types.
|
||||
This will install a Kubernetes cluster on Equinix Metal. It should work in all locations and on most server types.
|
||||
|
||||
## Approach
|
||||
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your Packet project.
|
||||
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../..//cluster.yml)
|
||||
to actually install Kubernetes with Kubespray.
|
||||
@@ -35,13 +35,13 @@ now six total etcd replicas.
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Install dependencies: `sudo pip install -r requirements.txt`
|
||||
- Account with Packet Host
|
||||
- [Install Ansible dependencies](/docs/ansible.md#installing-ansible)
|
||||
- Account with Equinix Metal
|
||||
- An SSH key pair
|
||||
|
||||
## SSH Key Setup
|
||||
|
||||
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.
|
||||
An SSH keypair is required so Ansible can access the newly provisioned nodes (Equinix Metal hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Equinix Metal (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.
|
||||
|
||||
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
|
||||
|
||||
@@ -51,7 +51,7 @@ ssh-keygen -f ~/.ssh/id_rsa
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the Packet resources with base software as appropriate.
|
||||
Terraform will be used to provision all of the Equinix Metal resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -60,25 +60,25 @@ Terraform will be used to provision all of the Packet resources with base softwa
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
ln -s ../../contrib/terraform/metal/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
|
||||
#### Packet API access
|
||||
#### Equinix Metal API access
|
||||
|
||||
Your Packet API key must be available in the `PACKET_AUTH_TOKEN` environment variable.
|
||||
Your Equinix Metal API key must be available in the `PACKET_AUTH_TOKEN` environment variable.
|
||||
This key is typically stored outside of the code repo since it is considered secret.
|
||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||
|
||||
For more information on how to generate an API key or find your project ID, please see
|
||||
[API Integrations](https://support.packet.com/kb/articles/api-integrations)
|
||||
[Accounts Index](https://metal.equinix.com/developers/docs/accounts/).
|
||||
|
||||
The Packet Project ID associated with the key will be set later in cluster.tfvars.
|
||||
The Equinix Metal Project ID associated with the key will be set later in `cluster.tfvars`.
|
||||
|
||||
For more information about the API, please see [Packet API](https://www.packet.com/developers/api/)
|
||||
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
|
||||
|
||||
Example:
|
||||
|
||||
@@ -101,7 +101,7 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
|
||||
#### Enable localhost access
|
||||
|
||||
@@ -119,7 +119,7 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
`.gitignore` file in the `terraform/metal` directory :
|
||||
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
@@ -135,7 +135,7 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/packet
|
||||
terraform init ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
@@ -146,7 +146,7 @@ You can apply the Terraform configuration to your cluster with the following com
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
@@ -156,7 +156,7 @@ ansible-playbook -i hosts ../../cluster.yml
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
@@ -1,16 +1,15 @@
|
||||
# Configure the Packet Provider
|
||||
provider "packet" {
|
||||
version = "~> 2.0"
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "metal" {
|
||||
}
|
||||
|
||||
resource "packet_ssh_key" "k8s" {
|
||||
resource "metal_ssh_key" "k8s" {
|
||||
count = var.public_key_path != "" ? 1 : 0
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@@ -18,12 +17,12 @@ resource "packet_device" "k8s_master" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master_no_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@@ -31,12 +30,12 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_etcd
|
||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
@@ -44,12 +43,12 @@ resource "packet_device" "k8s_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_node" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_node" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_nodes
|
||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
@@ -57,7 +56,7 @@ resource "packet_device" "k8s_node" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
||||
16
contrib/terraform/metal/output.tf
Normal file
16
contrib/terraform/metal/output.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
output "k8s_masters" {
|
||||
value = metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# your Kubernetes cluster name here
|
||||
cluster_name = "mycluster"
|
||||
|
||||
# Your Packet project ID. See https://support.packet.com/kb/articles/api-integrations
|
||||
packet_project_id = "Example-API-Token"
|
||||
# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/
|
||||
metal_project_id = "Example-API-Token"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Packet nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Packet project
|
||||
# Terraform will complain if the public key is setup in Packet
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
||||
# Terraform will complain if the public key is setup in Equinix Metal
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# cluster location
|
||||
@@ -2,12 +2,12 @@ variable "cluster_name" {
|
||||
default = "kubespray"
|
||||
}
|
||||
|
||||
variable "packet_project_id" {
|
||||
description = "Your Packet project ID. See https://support.packet.com/kb/articles/api-integrations"
|
||||
variable "metal_project_id" {
|
||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
||||
}
|
||||
|
||||
variable "operating_system" {
|
||||
default = "ubuntu_16_04"
|
||||
default = "ubuntu_20_04"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
@@ -24,23 +24,23 @@ variable "facility" {
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters_no_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_nodes" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.medium.x86"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
@@ -52,6 +52,6 @@ variable "number_of_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
packet = {
|
||||
source = "terraform-providers/packet"
|
||||
metal = {
|
||||
source = "equinix/metal"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,9 +17,10 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [ELASTX](https://elastx.se/)
|
||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||
- [FugaCloud](https://fuga.cloud/)
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/)
|
||||
- [OVH](https://www.ovh.com/)
|
||||
- [Rackspace](https://www.rackspace.com/)
|
||||
- [Safespring](https://www.safespring.com)
|
||||
- [Ultimum](https://ultimum.io/)
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
@@ -247,10 +248,12 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default |
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|
||||
|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
@@ -270,15 +273,21 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default |
|
||||
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
@@ -406,18 +415,39 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" init
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Customizing with cloud-init
|
||||
|
||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||
One common template is used for all instances. Adjust the file shown below:
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||
|
||||
```ShellSession
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
ssh_pwauth: yes
|
||||
chpasswd:
|
||||
list: |
|
||||
root:secret
|
||||
expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
disable_root: false
|
||||
```
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
@@ -432,7 +462,7 @@ pick it up automatically.
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
router_id = var.router_id
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
port_security_enabled = var.port_security_enabled
|
||||
router_id = var.router_id
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
@@ -23,7 +24,9 @@ module "ips" {
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
bastion_fips = var.bastion_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
}
|
||||
|
||||
@@ -42,6 +45,7 @@ module "compute" {
|
||||
number_of_bastions = var.number_of_bastions
|
||||
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
|
||||
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_nodes = var.k8s_nodes
|
||||
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
|
||||
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
|
||||
@@ -50,6 +54,7 @@ module "compute" {
|
||||
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
|
||||
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
|
||||
master_volume_type = var.master_volume_type
|
||||
node_volume_type = var.node_volume_type
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_uuid = var.image_uuid
|
||||
@@ -67,6 +72,7 @@ module "compute" {
|
||||
flavor_bastion = var.flavor_bastion
|
||||
k8s_master_fips = module.ips.k8s_master_fips
|
||||
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
|
||||
k8s_masters_fips = module.ips.k8s_masters_fips
|
||||
k8s_node_fips = module.ips.k8s_node_fips
|
||||
k8s_nodes_fips = module.ips.k8s_nodes_fips
|
||||
bastion_fips = module.ips.bastion_fips
|
||||
@@ -78,13 +84,18 @@ module "compute" {
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
etcd_server_group_policy = var.etcd_server_group_policy
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
|
||||
network_id = module.network.router_id
|
||||
group_vars_path = var.group_vars_path
|
||||
port_security_enabled = var.port_security_enabled
|
||||
force_null_port_security = var.force_null_port_security
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
|
||||
@@ -15,6 +15,15 @@ data "openstack_images_image_v2" "image_master" {
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
data "template_file" "cloudinit" {
|
||||
template = file("${path.module}/templates/cloudinit.yaml")
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
count = var.use_existing_network ? 1 : 0
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
@@ -130,36 +139,45 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.master_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-master-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.master_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.node_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-node-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.node_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.etcd_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-etcd-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.etcd_server_group_policy]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s_master.id,
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.worker.id,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "",
|
||||
])
|
||||
# bastion groups
|
||||
bastion_sec_groups = compact(concat([
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.bastion[0].id,
|
||||
]))
|
||||
# etcd groups
|
||||
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
# glusterfs groups
|
||||
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
@@ -169,12 +187,27 @@ locals {
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
count = var.number_of_bastions
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -189,25 +222,35 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
count = var.number_of_k8s_masters
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters
|
||||
@@ -215,6 +258,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@@ -231,13 +275,11 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@@ -246,15 +288,87 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
@@ -262,6 +376,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@@ -278,13 +393,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@@ -293,15 +406,29 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "etcd_port" {
|
||||
count = var.number_of_etcd
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = var.number_of_etcd
|
||||
@@ -309,6 +436,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@@ -323,13 +451,11 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||
}
|
||||
@@ -338,11 +464,25 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
@@ -365,13 +505,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@@ -380,11 +518,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
@@ -392,6 +544,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@@ -407,13 +560,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@@ -422,11 +573,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
count = var.number_of_k8s_nodes
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes
|
||||
@@ -434,6 +599,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -441,6 +607,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@@ -448,13 +615,12 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@@ -463,15 +629,29 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
@@ -479,6 +659,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -486,6 +667,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@@ -493,13 +675,11 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@@ -508,11 +688,25 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
@@ -520,6 +714,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@@ -527,6 +722,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@@ -534,13 +730,11 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@@ -549,15 +743,29 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
@@ -579,13 +787,11 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@@ -594,44 +800,46 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "bastion" {
|
||||
count = var.number_of_bastions
|
||||
floating_ip = var.bastion_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.bastion.*.id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
|
||||
count = var.number_of_k8s_masters
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
|
||||
floating_ip = var.k8s_master_fips[count.index]
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_masters_fips[each.key].address
|
||||
port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_node" {
|
||||
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
|
||||
floating_ip = var.k8s_node_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_nodes_fips[each.key].address
|
||||
instance_id = openstack_compute_instance_v2.k8s_nodes[each.key].id
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user